2 * Copyright (c) 2012-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/types.h>
31 #include <sys/syslog.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <sys/socket.h>
35 #include <sys/kpi_mbuf.h>
37 #include <sys/domain.h>
38 #include <sys/protosw.h>
39 #include <sys/socketvar.h>
40 #include <sys/kernel.h>
41 #include <sys/systm.h>
42 #include <sys/kern_control.h>
44 #include <sys/codesign.h>
45 #include <libkern/tree.h>
46 #include <kern/locks.h>
47 #include <kern/debug.h>
48 #include <kern/task.h>
49 #include <mach/task_info.h>
50 #include <net/if_var.h>
51 #include <net/route.h>
52 #include <net/flowhash.h>
53 #include <net/ntstat.h>
54 #include <netinet/in.h>
55 #include <netinet/in_var.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/tcp_fsm.h>
59 #include <netinet/flow_divert.h>
60 #include <netinet/flow_divert_proto.h>
62 #include <netinet6/in6_pcb.h>
63 #include <netinet6/ip6protosw.h>
65 #include <dev/random/randomdev.h>
66 #include <libkern/crypto/sha1.h>
67 #include <libkern/crypto/crypto_internal.h>
69 #include <corecrypto/cc.h>
71 #include <net/content_filter.h>
72 #endif /* CONTENT_FILTER */
74 #define FLOW_DIVERT_CONNECT_STARTED 0x00000001
75 #define FLOW_DIVERT_READ_CLOSED 0x00000002
76 #define FLOW_DIVERT_WRITE_CLOSED 0x00000004
77 #define FLOW_DIVERT_TUNNEL_RD_CLOSED 0x00000008
78 #define FLOW_DIVERT_TUNNEL_WR_CLOSED 0x00000010
79 #define FLOW_DIVERT_TRANSFERRED 0x00000020
80 #define FLOW_DIVERT_HAS_HMAC 0x00000040
82 #define FDLOG(level, pcb, format, ...) \
83 os_log_with_type(OS_LOG_DEFAULT, flow_divert_syslog_type_to_oslog_type(level), "(%u): " format "\n", (pcb)->hash, __VA_ARGS__)
85 #define FDLOG0(level, pcb, msg) \
86 os_log_with_type(OS_LOG_DEFAULT, flow_divert_syslog_type_to_oslog_type(level), "(%u): " msg "\n", (pcb)->hash)
88 #define FDRETAIN(pcb) if ((pcb) != NULL) OSIncrementAtomic(&(pcb)->ref_count)
89 #define FDRELEASE(pcb) \
91 if ((pcb) != NULL && 1 == OSDecrementAtomic(&(pcb)->ref_count)) { \
92 flow_divert_pcb_destroy(pcb); \
96 #define FDLOCK(pcb) lck_mtx_lock(&(pcb)->mtx)
97 #define FDUNLOCK(pcb) lck_mtx_unlock(&(pcb)->mtx)
99 #define FD_CTL_SENDBUFF_SIZE (128 * 1024)
100 #define FD_CTL_RCVBUFF_SIZE (128 * 1024)
102 #define GROUP_BIT_CTL_ENQUEUE_BLOCKED 0
104 #define GROUP_COUNT_MAX 32
105 #define FLOW_DIVERT_MAX_NAME_SIZE 4096
106 #define FLOW_DIVERT_MAX_KEY_SIZE 1024
107 #define FLOW_DIVERT_MAX_TRIE_MEMORY (1024 * 1024)
109 struct flow_divert_trie_node
{
115 #define CHILD_MAP_SIZE 256
116 #define NULL_TRIE_IDX 0xffff
117 #define TRIE_NODE(t, i) ((t)->nodes[(i)])
118 #define TRIE_CHILD(t, i, b) (((t)->child_maps + (CHILD_MAP_SIZE * TRIE_NODE(t, i).child_map))[(b)])
119 #define TRIE_BYTE(t, i) ((t)->bytes[(i)])
121 static struct flow_divert_pcb nil_pcb
;
123 decl_lck_rw_data(static, g_flow_divert_group_lck
);
124 static struct flow_divert_group
**g_flow_divert_groups
= NULL
;
125 static uint32_t g_active_group_count
= 0;
127 static lck_grp_attr_t
*flow_divert_grp_attr
= NULL
;
128 static lck_attr_t
*flow_divert_mtx_attr
= NULL
;
129 static lck_grp_t
*flow_divert_mtx_grp
= NULL
;
130 static errno_t g_init_result
= 0;
132 static kern_ctl_ref g_flow_divert_kctl_ref
= NULL
;
134 static struct protosw g_flow_divert_in_protosw
;
135 static struct pr_usrreqs g_flow_divert_in_usrreqs
;
136 static struct protosw g_flow_divert_in_udp_protosw
;
137 static struct pr_usrreqs g_flow_divert_in_udp_usrreqs
;
139 static struct ip6protosw g_flow_divert_in6_protosw
;
140 static struct pr_usrreqs g_flow_divert_in6_usrreqs
;
141 static struct ip6protosw g_flow_divert_in6_udp_protosw
;
142 static struct pr_usrreqs g_flow_divert_in6_udp_usrreqs
;
145 static struct protosw
*g_tcp_protosw
= NULL
;
146 static struct ip6protosw
*g_tcp6_protosw
= NULL
;
147 static struct protosw
*g_udp_protosw
= NULL
;
148 static struct ip6protosw
*g_udp6_protosw
= NULL
;
151 flow_divert_dup_addr(sa_family_t family
, struct sockaddr
*addr
, struct sockaddr
**dup
);
154 flow_divert_inp_to_sockaddr(const struct inpcb
*inp
, struct sockaddr
**local_socket
);
157 flow_divert_is_sockaddr_valid(struct sockaddr
*addr
);
160 flow_divert_append_target_endpoint_tlv(mbuf_t connect_packet
, struct sockaddr
*toaddr
);
163 flow_divert_get_buffered_target_address(mbuf_t buffer
);
166 flow_divert_has_pcb_local_address(const struct inpcb
*inp
);
169 flow_divert_disconnect_socket(struct socket
*so
);
171 static inline uint8_t
172 flow_divert_syslog_type_to_oslog_type(int syslog_type
)
174 switch (syslog_type
) {
175 case LOG_ERR
: return OS_LOG_TYPE_ERROR
;
176 case LOG_INFO
: return OS_LOG_TYPE_INFO
;
177 case LOG_DEBUG
: return OS_LOG_TYPE_DEBUG
;
178 default: return OS_LOG_TYPE_DEFAULT
;
183 flow_divert_pcb_cmp(const struct flow_divert_pcb
*pcb_a
, const struct flow_divert_pcb
*pcb_b
)
185 return memcmp(&pcb_a
->hash
, &pcb_b
->hash
, sizeof(pcb_a
->hash
));
188 RB_PROTOTYPE(fd_pcb_tree
, flow_divert_pcb
, rb_link
, flow_divert_pcb_cmp
);
189 RB_GENERATE(fd_pcb_tree
, flow_divert_pcb
, rb_link
, flow_divert_pcb_cmp
);
192 flow_divert_packet_type2str(uint8_t packet_type
)
194 switch (packet_type
) {
195 case FLOW_DIVERT_PKT_CONNECT
:
197 case FLOW_DIVERT_PKT_CONNECT_RESULT
:
198 return "connect result";
199 case FLOW_DIVERT_PKT_DATA
:
201 case FLOW_DIVERT_PKT_CLOSE
:
203 case FLOW_DIVERT_PKT_READ_NOTIFY
:
204 return "read notification";
205 case FLOW_DIVERT_PKT_PROPERTIES_UPDATE
:
206 return "properties update";
207 case FLOW_DIVERT_PKT_APP_MAP_CREATE
:
208 return "app map create";
214 static struct flow_divert_pcb
*
215 flow_divert_pcb_lookup(uint32_t hash
, struct flow_divert_group
*group
)
217 struct flow_divert_pcb key_item
;
218 struct flow_divert_pcb
*fd_cb
= NULL
;
220 key_item
.hash
= hash
;
222 lck_rw_lock_shared(&group
->lck
);
223 fd_cb
= RB_FIND(fd_pcb_tree
, &group
->pcb_tree
, &key_item
);
225 lck_rw_done(&group
->lck
);
231 flow_divert_pcb_insert(struct flow_divert_pcb
*fd_cb
, uint32_t ctl_unit
)
234 struct flow_divert_pcb
*exist
= NULL
;
235 struct flow_divert_group
*group
;
236 static uint32_t g_nextkey
= 1;
237 static uint32_t g_hash_seed
= 0;
240 if (ctl_unit
== 0 || ctl_unit
>= GROUP_COUNT_MAX
) {
244 socket_unlock(fd_cb
->so
, 0);
245 lck_rw_lock_shared(&g_flow_divert_group_lck
);
247 if (g_flow_divert_groups
== NULL
|| g_active_group_count
== 0) {
248 FDLOG0(LOG_ERR
, &nil_pcb
, "No active groups, flow divert cannot be used for this socket");
253 group
= g_flow_divert_groups
[ctl_unit
];
255 FDLOG(LOG_ERR
, &nil_pcb
, "Group for control unit %u is NULL, flow divert cannot be used for this socket", ctl_unit
);
260 socket_lock(fd_cb
->so
, 0);
266 key
[0] = g_nextkey
++;
267 key
[1] = RandomULong();
269 if (g_hash_seed
== 0) {
270 g_hash_seed
= RandomULong();
273 fd_cb
->hash
= net_flowhash(key
, sizeof(key
), g_hash_seed
);
275 for (idx
= 1; idx
< GROUP_COUNT_MAX
; idx
++) {
276 struct flow_divert_group
*curr_group
= g_flow_divert_groups
[idx
];
277 if (curr_group
!= NULL
&& curr_group
!= group
) {
278 lck_rw_lock_shared(&curr_group
->lck
);
279 exist
= RB_FIND(fd_pcb_tree
, &curr_group
->pcb_tree
, fd_cb
);
280 lck_rw_done(&curr_group
->lck
);
288 lck_rw_lock_exclusive(&group
->lck
);
289 exist
= RB_INSERT(fd_pcb_tree
, &group
->pcb_tree
, fd_cb
);
290 lck_rw_done(&group
->lck
);
292 } while (exist
!= NULL
&& try_count
++ < 3);
295 fd_cb
->group
= group
;
296 FDRETAIN(fd_cb
); /* The group now has a reference */
302 socket_unlock(fd_cb
->so
, 0);
305 lck_rw_done(&g_flow_divert_group_lck
);
306 socket_lock(fd_cb
->so
, 0);
311 static struct flow_divert_pcb
*
312 flow_divert_pcb_create(socket_t so
)
314 struct flow_divert_pcb
*new_pcb
= NULL
;
316 MALLOC_ZONE(new_pcb
, struct flow_divert_pcb
*, sizeof(*new_pcb
), M_FLOW_DIVERT_PCB
, M_WAITOK
);
317 if (new_pcb
== NULL
) {
318 FDLOG0(LOG_ERR
, &nil_pcb
, "failed to allocate a pcb");
322 memset(new_pcb
, 0, sizeof(*new_pcb
));
324 lck_mtx_init(&new_pcb
->mtx
, flow_divert_mtx_grp
, flow_divert_mtx_attr
);
326 new_pcb
->log_level
= nil_pcb
.log_level
;
328 FDRETAIN(new_pcb
); /* Represents the socket's reference */
334 flow_divert_pcb_destroy(struct flow_divert_pcb
*fd_cb
)
336 FDLOG(LOG_INFO
, fd_cb
, "Destroying, app tx %u, app rx %u, tunnel tx %u, tunnel rx %u",
337 fd_cb
->bytes_written_by_app
, fd_cb
->bytes_read_by_app
, fd_cb
->bytes_sent
, fd_cb
->bytes_received
);
339 if (fd_cb
->local_address
!= NULL
) {
340 FREE(fd_cb
->local_address
, M_SONAME
);
342 if (fd_cb
->remote_address
!= NULL
) {
343 FREE(fd_cb
->remote_address
, M_SONAME
);
345 if (fd_cb
->connect_token
!= NULL
) {
346 mbuf_freem(fd_cb
->connect_token
);
348 if (fd_cb
->connect_packet
!= NULL
) {
349 mbuf_freem(fd_cb
->connect_packet
);
351 if (fd_cb
->app_data
!= NULL
) {
352 FREE(fd_cb
->app_data
, M_TEMP
);
354 FREE_ZONE(fd_cb
, sizeof(*fd_cb
), M_FLOW_DIVERT_PCB
);
358 flow_divert_pcb_remove(struct flow_divert_pcb
*fd_cb
)
360 if (fd_cb
->group
!= NULL
) {
361 struct flow_divert_group
*group
= fd_cb
->group
;
362 lck_rw_lock_exclusive(&group
->lck
);
363 FDLOG(LOG_INFO
, fd_cb
, "Removing from group %d, ref count = %d", group
->ctl_unit
, fd_cb
->ref_count
);
364 RB_REMOVE(fd_pcb_tree
, &group
->pcb_tree
, fd_cb
);
366 FDRELEASE(fd_cb
); /* Release the group's reference */
367 lck_rw_done(&group
->lck
);
372 flow_divert_packet_init(struct flow_divert_pcb
*fd_cb
, uint8_t packet_type
, mbuf_t
*packet
)
374 struct flow_divert_packet_header hdr
;
377 error
= mbuf_gethdr(MBUF_DONTWAIT
, MBUF_TYPE_HEADER
, packet
);
379 FDLOG(LOG_ERR
, fd_cb
, "failed to allocate the header mbuf: %d", error
);
383 hdr
.packet_type
= packet_type
;
384 hdr
.conn_id
= htonl(fd_cb
->hash
);
386 /* Lay down the header */
387 error
= mbuf_copyback(*packet
, 0, sizeof(hdr
), &hdr
, MBUF_DONTWAIT
);
389 FDLOG(LOG_ERR
, fd_cb
, "mbuf_copyback(hdr) failed: %d", error
);
399 flow_divert_packet_append_tlv(mbuf_t packet
, uint8_t type
, uint32_t length
, const void *value
)
401 uint32_t net_length
= htonl(length
);
404 error
= mbuf_copyback(packet
, mbuf_pkthdr_len(packet
), sizeof(type
), &type
, MBUF_DONTWAIT
);
406 FDLOG(LOG_ERR
, &nil_pcb
, "failed to append the type (%d)", type
);
410 error
= mbuf_copyback(packet
, mbuf_pkthdr_len(packet
), sizeof(net_length
), &net_length
, MBUF_DONTWAIT
);
412 FDLOG(LOG_ERR
, &nil_pcb
, "failed to append the length (%u)", length
);
416 error
= mbuf_copyback(packet
, mbuf_pkthdr_len(packet
), length
, value
, MBUF_DONTWAIT
);
418 FDLOG0(LOG_ERR
, &nil_pcb
, "failed to append the value");
426 flow_divert_packet_find_tlv(mbuf_t packet
, int offset
, uint8_t type
, int *err
, int next
)
428 size_t cursor
= offset
;
430 uint32_t curr_length
;
437 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_type
), &curr_type
);
444 curr_type
= FLOW_DIVERT_TLV_NIL
;
447 if (curr_type
!= type
) {
448 cursor
+= sizeof(curr_type
);
449 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_length
), &curr_length
);
455 cursor
+= (sizeof(curr_length
) + ntohl(curr_length
));
457 } while (curr_type
!= type
);
463 flow_divert_packet_get_tlv(mbuf_t packet
, int offset
, uint8_t type
, size_t buff_len
, void *buff
, uint32_t *val_size
)
469 tlv_offset
= flow_divert_packet_find_tlv(packet
, offset
, type
, &error
, 0);
470 if (tlv_offset
< 0) {
474 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(type
), sizeof(length
), &length
);
479 length
= ntohl(length
);
481 uint32_t data_offset
= tlv_offset
+ sizeof(type
) + sizeof(length
);
483 if (length
> (mbuf_pkthdr_len(packet
) - data_offset
)) {
484 FDLOG(LOG_ERR
, &nil_pcb
, "Length of %u TLV (%u) is larger than remaining packet data (%lu)", type
, length
, (mbuf_pkthdr_len(packet
) - data_offset
));
488 if (val_size
!= NULL
) {
492 if (buff
!= NULL
&& buff_len
> 0) {
493 memset(buff
, 0, buff_len
);
494 size_t to_copy
= (length
< buff_len
) ? length
: buff_len
;
495 error
= mbuf_copydata(packet
, data_offset
, to_copy
, buff
);
505 flow_divert_packet_compute_hmac(mbuf_t packet
, struct flow_divert_group
*group
, uint8_t *hmac
)
507 mbuf_t curr_mbuf
= packet
;
509 if (g_crypto_funcs
== NULL
|| group
->token_key
== NULL
) {
513 cchmac_di_decl(g_crypto_funcs
->ccsha1_di
, hmac_ctx
);
514 g_crypto_funcs
->cchmac_init_fn(g_crypto_funcs
->ccsha1_di
, hmac_ctx
, group
->token_key_size
, group
->token_key
);
516 while (curr_mbuf
!= NULL
) {
517 g_crypto_funcs
->cchmac_update_fn(g_crypto_funcs
->ccsha1_di
, hmac_ctx
, mbuf_len(curr_mbuf
), mbuf_data(curr_mbuf
));
518 curr_mbuf
= mbuf_next(curr_mbuf
);
521 g_crypto_funcs
->cchmac_final_fn(g_crypto_funcs
->ccsha1_di
, hmac_ctx
, hmac
);
527 flow_divert_packet_verify_hmac(mbuf_t packet
, uint32_t ctl_unit
)
530 struct flow_divert_group
*group
= NULL
;
532 uint8_t packet_hmac
[SHA_DIGEST_LENGTH
];
533 uint8_t computed_hmac
[SHA_DIGEST_LENGTH
];
536 lck_rw_lock_shared(&g_flow_divert_group_lck
);
538 if (g_flow_divert_groups
!= NULL
&& g_active_group_count
> 0) {
539 group
= g_flow_divert_groups
[ctl_unit
];
543 lck_rw_done(&g_flow_divert_group_lck
);
547 lck_rw_lock_shared(&group
->lck
);
549 if (group
->token_key
== NULL
) {
554 hmac_offset
= flow_divert_packet_find_tlv(packet
, 0, FLOW_DIVERT_TLV_HMAC
, &error
, 0);
555 if (hmac_offset
< 0) {
559 error
= flow_divert_packet_get_tlv(packet
, hmac_offset
, FLOW_DIVERT_TLV_HMAC
, sizeof(packet_hmac
), packet_hmac
, NULL
);
564 /* Chop off the HMAC TLV */
565 error
= mbuf_split(packet
, hmac_offset
, MBUF_WAITOK
, &tail
);
572 error
= flow_divert_packet_compute_hmac(packet
, group
, computed_hmac
);
577 if (cc_cmp_safe(sizeof(packet_hmac
), packet_hmac
, computed_hmac
)) {
578 FDLOG0(LOG_WARNING
, &nil_pcb
, "HMAC in token does not match computed HMAC");
584 lck_rw_done(&group
->lck
);
585 lck_rw_done(&g_flow_divert_group_lck
);
590 flow_divert_add_data_statistics(struct flow_divert_pcb
*fd_cb
, int data_len
, Boolean send
)
592 struct inpcb
*inp
= NULL
;
593 struct ifnet
*ifp
= NULL
;
594 Boolean cell
= FALSE
;
595 Boolean wifi
= FALSE
;
596 Boolean wired
= FALSE
;
598 inp
= sotoinpcb(fd_cb
->so
);
603 ifp
= inp
->inp_last_outifp
;
605 cell
= IFNET_IS_CELLULAR(ifp
);
606 wifi
= (!cell
&& IFNET_IS_WIFI(ifp
));
607 wired
= (!wifi
&& IFNET_IS_WIRED(ifp
));
611 INP_ADD_STAT(inp
, cell
, wifi
, wired
, txpackets
, 1);
612 INP_ADD_STAT(inp
, cell
, wifi
, wired
, txbytes
, data_len
);
614 INP_ADD_STAT(inp
, cell
, wifi
, wired
, rxpackets
, 1);
615 INP_ADD_STAT(inp
, cell
, wifi
, wired
, rxbytes
, data_len
);
617 inp_set_activity_bitmap(inp
);
621 flow_divert_check_no_cellular(struct flow_divert_pcb
*fd_cb
)
623 struct inpcb
*inp
= NULL
;
625 inp
= sotoinpcb(fd_cb
->so
);
626 if (inp
&& INP_NO_CELLULAR(inp
) && inp
->inp_last_outifp
&&
627 IFNET_IS_CELLULAR(inp
->inp_last_outifp
)) {
635 flow_divert_check_no_expensive(struct flow_divert_pcb
*fd_cb
)
637 struct inpcb
*inp
= NULL
;
639 inp
= sotoinpcb(fd_cb
->so
);
640 if (inp
&& INP_NO_EXPENSIVE(inp
) && inp
->inp_last_outifp
&&
641 IFNET_IS_EXPENSIVE(inp
->inp_last_outifp
)) {
649 flow_divert_check_no_constrained(struct flow_divert_pcb
*fd_cb
)
651 struct inpcb
*inp
= NULL
;
653 inp
= sotoinpcb(fd_cb
->so
);
654 if (inp
&& INP_NO_CONSTRAINED(inp
) && inp
->inp_last_outifp
&&
655 IFNET_IS_CONSTRAINED(inp
->inp_last_outifp
)) {
663 flow_divert_update_closed_state(struct flow_divert_pcb
*fd_cb
, int how
, Boolean tunnel
)
665 if (how
!= SHUT_RD
) {
666 fd_cb
->flags
|= FLOW_DIVERT_WRITE_CLOSED
;
667 if (tunnel
|| !(fd_cb
->flags
& FLOW_DIVERT_CONNECT_STARTED
)) {
668 fd_cb
->flags
|= FLOW_DIVERT_TUNNEL_WR_CLOSED
;
669 /* If the tunnel is not accepting writes any more, then flush the send buffer */
670 sbflush(&fd_cb
->so
->so_snd
);
673 if (how
!= SHUT_WR
) {
674 fd_cb
->flags
|= FLOW_DIVERT_READ_CLOSED
;
675 if (tunnel
|| !(fd_cb
->flags
& FLOW_DIVERT_CONNECT_STARTED
)) {
676 fd_cb
->flags
|= FLOW_DIVERT_TUNNEL_RD_CLOSED
;
682 trie_node_alloc(struct flow_divert_trie
*trie
)
684 if (trie
->nodes_free_next
< trie
->nodes_count
) {
685 uint16_t node_idx
= trie
->nodes_free_next
++;
686 TRIE_NODE(trie
, node_idx
).child_map
= NULL_TRIE_IDX
;
689 return NULL_TRIE_IDX
;
694 trie_child_map_alloc(struct flow_divert_trie
*trie
)
696 if (trie
->child_maps_free_next
< trie
->child_maps_count
) {
697 return trie
->child_maps_free_next
++;
699 return NULL_TRIE_IDX
;
704 trie_bytes_move(struct flow_divert_trie
*trie
, uint16_t bytes_idx
, size_t bytes_size
)
706 uint16_t start
= trie
->bytes_free_next
;
707 if (start
+ bytes_size
<= trie
->bytes_count
) {
708 if (start
!= bytes_idx
) {
709 memmove(&TRIE_BYTE(trie
, start
), &TRIE_BYTE(trie
, bytes_idx
), bytes_size
);
711 trie
->bytes_free_next
+= bytes_size
;
714 return NULL_TRIE_IDX
;
719 flow_divert_trie_insert(struct flow_divert_trie
*trie
, uint16_t string_start
, size_t string_len
)
721 uint16_t current
= trie
->root
;
722 uint16_t child
= trie
->root
;
723 uint16_t string_end
= string_start
+ string_len
;
724 uint16_t string_idx
= string_start
;
725 uint16_t string_remainder
= string_len
;
727 while (child
!= NULL_TRIE_IDX
) {
728 uint16_t parent
= current
;
730 uint16_t current_end
;
733 child
= NULL_TRIE_IDX
;
735 current_end
= TRIE_NODE(trie
, current
).start
+ TRIE_NODE(trie
, current
).length
;
737 for (node_idx
= TRIE_NODE(trie
, current
).start
;
738 node_idx
< current_end
&&
739 string_idx
< string_end
&&
740 TRIE_BYTE(trie
, node_idx
) == TRIE_BYTE(trie
, string_idx
);
741 node_idx
++, string_idx
++) {
745 string_remainder
= string_end
- string_idx
;
747 if (node_idx
< (TRIE_NODE(trie
, current
).start
+ TRIE_NODE(trie
, current
).length
)) {
749 * We did not reach the end of the current node's string.
750 * We need to split the current node into two:
751 * 1. A new node that contains the prefix of the node that matches
752 * the prefix of the string being inserted.
753 * 2. The current node modified to point to the remainder
754 * of the current node's string.
756 uint16_t prefix
= trie_node_alloc(trie
);
757 if (prefix
== NULL_TRIE_IDX
) {
758 FDLOG0(LOG_ERR
, &nil_pcb
, "Ran out of trie nodes while splitting an existing node");
759 return NULL_TRIE_IDX
;
763 * Prefix points to the portion of the current nodes's string that has matched
764 * the input string thus far.
766 TRIE_NODE(trie
, prefix
).start
= TRIE_NODE(trie
, current
).start
;
767 TRIE_NODE(trie
, prefix
).length
= (node_idx
- TRIE_NODE(trie
, current
).start
);
770 * Prefix has the current node as the child corresponding to the first byte
773 TRIE_NODE(trie
, prefix
).child_map
= trie_child_map_alloc(trie
);
774 if (TRIE_NODE(trie
, prefix
).child_map
== NULL_TRIE_IDX
) {
775 FDLOG0(LOG_ERR
, &nil_pcb
, "Ran out of child maps while splitting an existing node");
776 return NULL_TRIE_IDX
;
778 TRIE_CHILD(trie
, prefix
, TRIE_BYTE(trie
, node_idx
)) = current
;
780 /* Parent has the prefix as the child correspoding to the first byte in the prefix */
781 TRIE_CHILD(trie
, parent
, TRIE_BYTE(trie
, TRIE_NODE(trie
, prefix
).start
)) = prefix
;
783 /* Current node is adjusted to point to the remainder */
784 TRIE_NODE(trie
, current
).start
= node_idx
;
785 TRIE_NODE(trie
, current
).length
-= TRIE_NODE(trie
, prefix
).length
;
787 /* We want to insert the new leaf (if any) as a child of the prefix */
791 if (string_remainder
> 0) {
793 * We still have bytes in the string that have not been matched yet.
794 * If the current node has children, iterate to the child corresponding
795 * to the next byte in the string.
797 if (TRIE_NODE(trie
, current
).child_map
!= NULL_TRIE_IDX
) {
798 child
= TRIE_CHILD(trie
, current
, TRIE_BYTE(trie
, string_idx
));
801 } /* while (child != NULL_TRIE_IDX) */
803 if (string_remainder
> 0) {
804 /* Add a new leaf containing the remainder of the string */
805 uint16_t leaf
= trie_node_alloc(trie
);
806 if (leaf
== NULL_TRIE_IDX
) {
807 FDLOG0(LOG_ERR
, &nil_pcb
, "Ran out of trie nodes while inserting a new leaf");
808 return NULL_TRIE_IDX
;
811 TRIE_NODE(trie
, leaf
).start
= trie_bytes_move(trie
, string_idx
, string_remainder
);
812 if (TRIE_NODE(trie
, leaf
).start
== NULL_TRIE_IDX
) {
813 FDLOG0(LOG_ERR
, &nil_pcb
, "Ran out of bytes while inserting a new leaf");
814 return NULL_TRIE_IDX
;
816 TRIE_NODE(trie
, leaf
).length
= string_remainder
;
818 /* Set the new leaf as the child of the current node */
819 if (TRIE_NODE(trie
, current
).child_map
== NULL_TRIE_IDX
) {
820 TRIE_NODE(trie
, current
).child_map
= trie_child_map_alloc(trie
);
821 if (TRIE_NODE(trie
, current
).child_map
== NULL_TRIE_IDX
) {
822 FDLOG0(LOG_ERR
, &nil_pcb
, "Ran out of child maps while inserting a new leaf");
823 return NULL_TRIE_IDX
;
826 TRIE_CHILD(trie
, current
, TRIE_BYTE(trie
, TRIE_NODE(trie
, leaf
).start
)) = leaf
;
828 } /* else duplicate or this string is a prefix of one of the existing strings */
833 #define APPLE_WEBCLIP_ID_PREFIX "com.apple.webapp"
835 flow_divert_trie_search(struct flow_divert_trie
*trie
, uint8_t *string_bytes
)
837 uint16_t current
= trie
->root
;
838 uint16_t string_idx
= 0;
840 while (current
!= NULL_TRIE_IDX
) {
841 uint16_t next
= NULL_TRIE_IDX
;
842 uint16_t node_end
= TRIE_NODE(trie
, current
).start
+ TRIE_NODE(trie
, current
).length
;
845 for (node_idx
= TRIE_NODE(trie
, current
).start
;
846 node_idx
< node_end
&& string_bytes
[string_idx
] != '\0' && string_bytes
[string_idx
] == TRIE_BYTE(trie
, node_idx
);
847 node_idx
++, string_idx
++) {
851 if (node_idx
== node_end
) {
852 if (string_bytes
[string_idx
] == '\0') {
853 return current
; /* Got an exact match */
854 } else if (string_idx
== strlen(APPLE_WEBCLIP_ID_PREFIX
) &&
855 0 == strncmp((const char *)string_bytes
, APPLE_WEBCLIP_ID_PREFIX
, string_idx
)) {
856 string_bytes
[string_idx
] = '\0';
857 return current
; /* Got an apple webclip id prefix match */
858 } else if (TRIE_NODE(trie
, current
).child_map
!= NULL_TRIE_IDX
) {
859 next
= TRIE_CHILD(trie
, current
, string_bytes
[string_idx
]);
865 return NULL_TRIE_IDX
;
868 struct uuid_search_info
{
870 char *found_signing_id
;
871 boolean_t found_multiple_signing_ids
;
876 flow_divert_find_proc_by_uuid_callout(proc_t p
, void *arg
)
878 struct uuid_search_info
*info
= (struct uuid_search_info
*)arg
;
879 int result
= PROC_RETURNED_DONE
; /* By default, we didn't find the process */
881 if (info
->found_signing_id
!= NULL
) {
882 if (!info
->found_multiple_signing_ids
) {
883 /* All processes that were found had the same signing identifier, so just claim this first one and be done. */
884 info
->found_proc
= p
;
885 result
= PROC_CLAIMED_DONE
;
887 uuid_string_t uuid_str
;
888 uuid_unparse(info
->target_uuid
, uuid_str
);
889 FDLOG(LOG_WARNING
, &nil_pcb
, "Found multiple processes with UUID %s with different signing identifiers", uuid_str
);
891 FREE(info
->found_signing_id
, M_TEMP
);
892 info
->found_signing_id
= NULL
;
895 if (result
== PROC_RETURNED_DONE
) {
896 uuid_string_t uuid_str
;
897 uuid_unparse(info
->target_uuid
, uuid_str
);
898 FDLOG(LOG_WARNING
, &nil_pcb
, "Failed to find a process with UUID %s", uuid_str
);
905 flow_divert_find_proc_by_uuid_filter(proc_t p
, void *arg
)
907 struct uuid_search_info
*info
= (struct uuid_search_info
*)arg
;
910 if (info
->found_multiple_signing_ids
) {
914 include
= (uuid_compare(p
->p_uuid
, info
->target_uuid
) == 0);
916 const char *signing_id
= cs_identity_get(p
);
917 if (signing_id
!= NULL
) {
918 FDLOG(LOG_INFO
, &nil_pcb
, "Found process %d with signing identifier %s", p
->p_pid
, signing_id
);
919 size_t signing_id_size
= strlen(signing_id
) + 1;
920 if (info
->found_signing_id
== NULL
) {
921 MALLOC(info
->found_signing_id
, char *, signing_id_size
, M_TEMP
, M_WAITOK
);
922 memcpy(info
->found_signing_id
, signing_id
, signing_id_size
);
923 } else if (memcmp(signing_id
, info
->found_signing_id
, signing_id_size
)) {
924 info
->found_multiple_signing_ids
= TRUE
;
927 info
->found_multiple_signing_ids
= TRUE
;
929 include
= !info
->found_multiple_signing_ids
;
936 flow_divert_find_proc_by_uuid(uuid_t uuid
)
938 struct uuid_search_info info
;
940 if (LOG_INFO
<= nil_pcb
.log_level
) {
941 uuid_string_t uuid_str
;
942 uuid_unparse(uuid
, uuid_str
);
943 FDLOG(LOG_INFO
, &nil_pcb
, "Looking for process with UUID %s", uuid_str
);
946 memset(&info
, 0, sizeof(info
));
947 info
.found_proc
= PROC_NULL
;
948 uuid_copy(info
.target_uuid
, uuid
);
950 proc_iterate(PROC_ALLPROCLIST
, flow_divert_find_proc_by_uuid_callout
, &info
, flow_divert_find_proc_by_uuid_filter
, &info
);
952 return info
.found_proc
;
956 flow_divert_get_src_proc(struct socket
*so
, proc_t
*proc
)
960 if (so
->so_flags
& SOF_DELEGATED
) {
961 if ((*proc
)->p_pid
!= so
->e_pid
) {
962 *proc
= proc_find(so
->e_pid
);
964 } else if (uuid_compare((*proc
)->p_uuid
, so
->e_uuid
)) {
965 *proc
= flow_divert_find_proc_by_uuid(so
->e_uuid
);
968 } else if (*proc
== PROC_NULL
) {
969 *proc
= current_proc();
972 if (*proc
!= PROC_NULL
) {
973 if ((*proc
)->p_pid
== 0) {
986 flow_divert_send_packet(struct flow_divert_pcb
*fd_cb
, mbuf_t packet
, Boolean enqueue
)
990 if (fd_cb
->group
== NULL
) {
991 fd_cb
->so
->so_error
= ECONNABORTED
;
992 flow_divert_disconnect_socket(fd_cb
->so
);
996 lck_rw_lock_shared(&fd_cb
->group
->lck
);
998 if (MBUFQ_EMPTY(&fd_cb
->group
->send_queue
)) {
999 error
= ctl_enqueuembuf(g_flow_divert_kctl_ref
, fd_cb
->group
->ctl_unit
, packet
, CTL_DATA_EOR
);
1004 if (error
== ENOBUFS
) {
1006 if (!lck_rw_lock_shared_to_exclusive(&fd_cb
->group
->lck
)) {
1007 lck_rw_lock_exclusive(&fd_cb
->group
->lck
);
1009 MBUFQ_ENQUEUE(&fd_cb
->group
->send_queue
, packet
);
1012 OSTestAndSet(GROUP_BIT_CTL_ENQUEUE_BLOCKED
, &fd_cb
->group
->atomic_bits
);
1015 lck_rw_done(&fd_cb
->group
->lck
);
1021 flow_divert_create_connect_packet(struct flow_divert_pcb
*fd_cb
, struct sockaddr
*to
, struct socket
*so
, proc_t p
, mbuf_t
*out_connect_packet
)
1025 char *signing_id
= NULL
;
1026 int free_signing_id
= 0;
1027 mbuf_t connect_packet
= NULL
;
1028 proc_t src_proc
= p
;
1029 int release_proc
= 0;
1031 error
= flow_divert_packet_init(fd_cb
, FLOW_DIVERT_PKT_CONNECT
, &connect_packet
);
1038 if (fd_cb
->connect_token
!= NULL
&& (fd_cb
->flags
& FLOW_DIVERT_HAS_HMAC
)) {
1039 uint32_t sid_size
= 0;
1040 int find_error
= flow_divert_packet_get_tlv(fd_cb
->connect_token
, 0, FLOW_DIVERT_TLV_SIGNING_ID
, 0, NULL
, &sid_size
);
1041 if (find_error
== 0 && sid_size
> 0) {
1042 MALLOC(signing_id
, char *, sid_size
+ 1, M_TEMP
, M_WAITOK
| M_ZERO
);
1043 if (signing_id
!= NULL
) {
1044 flow_divert_packet_get_tlv(fd_cb
->connect_token
, 0, FLOW_DIVERT_TLV_SIGNING_ID
, sid_size
, signing_id
, NULL
);
1045 FDLOG(LOG_INFO
, fd_cb
, "Got %s from token", signing_id
);
1046 free_signing_id
= 1;
1051 socket_unlock(so
, 0);
1053 release_proc
= flow_divert_get_src_proc(so
, &src_proc
);
1054 if (src_proc
!= PROC_NULL
) {
1055 proc_lock(src_proc
);
1056 if (signing_id
== NULL
) {
1057 if (src_proc
->p_csflags
& (CS_VALID
| CS_DEBUGGED
)) {
1059 cs_id
= cs_identity_get(src_proc
);
1060 signing_id
= __DECONST(char *, cs_id
);
1062 FDLOG0(LOG_WARNING
, fd_cb
, "Signature is invalid");
1066 FDLOG0(LOG_WARNING
, fd_cb
, "Failed to determine the current proc");
1069 if (signing_id
!= NULL
) {
1070 uint16_t result
= NULL_TRIE_IDX
;
1071 lck_rw_lock_shared(&fd_cb
->group
->lck
);
1072 if (fd_cb
->group
->flags
& FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP
) {
1075 result
= flow_divert_trie_search(&fd_cb
->group
->signing_id_trie
, (uint8_t *)signing_id
);
1077 lck_rw_done(&fd_cb
->group
->lck
);
1078 if (result
!= NULL_TRIE_IDX
) {
1080 FDLOG(LOG_INFO
, fd_cb
, "%s matched", signing_id
);
1082 error
= flow_divert_packet_append_tlv(connect_packet
, FLOW_DIVERT_TLV_SIGNING_ID
, strlen(signing_id
), signing_id
);
1084 if (src_proc
!= PROC_NULL
) {
1085 unsigned char cdhash
[SHA1_RESULTLEN
];
1086 error
= proc_getcdhash(src_proc
, cdhash
);
1088 error
= flow_divert_packet_append_tlv(connect_packet
, FLOW_DIVERT_TLV_CDHASH
, sizeof(cdhash
), cdhash
);
1090 FDLOG(LOG_ERR
, fd_cb
, "failed to append the cdhash: %d", error
);
1093 FDLOG(LOG_ERR
, fd_cb
, "failed to get the cdhash: %d", error
);
1097 FDLOG(LOG_ERR
, fd_cb
, "failed to append the signing ID: %d", error
);
1100 FDLOG(LOG_WARNING
, fd_cb
, "%s did not match", signing_id
);
1103 FDLOG0(LOG_WARNING
, fd_cb
, "Failed to get the code signing identity");
1104 if (fd_cb
->group
->flags
& FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP
) {
1109 if (error
== 0 && src_proc
!= PROC_NULL
) {
1110 task_t task
= proc_task(src_proc
);
1111 if (task
!= TASK_NULL
) {
1112 audit_token_t audit_token
;
1113 mach_msg_type_number_t count
= TASK_AUDIT_TOKEN_COUNT
;
1114 kern_return_t rc
= task_info(task
, TASK_AUDIT_TOKEN
, (task_info_t
)&audit_token
, &count
);
1115 if (rc
== KERN_SUCCESS
) {
1116 error
= flow_divert_packet_append_tlv(connect_packet
,
1117 FLOW_DIVERT_TLV_APP_AUDIT_TOKEN
,
1118 sizeof(audit_token_t
),
1121 FDLOG(LOG_ERR
, fd_cb
, "failed to append app audit token: %d", error
);
1122 error
= 0; /* do not treat this as fatal error, proceed */
1125 FDLOG(LOG_ERR
, fd_cb
, "failed to retrieve app audit token: %d", rc
);
1130 if (src_proc
!= PROC_NULL
) {
1131 proc_unlock(src_proc
);
1133 proc_rele(src_proc
);
1138 if (free_signing_id
) {
1139 FREE(signing_id
, M_TEMP
);
1146 error
= flow_divert_packet_append_tlv(connect_packet
,
1147 FLOW_DIVERT_TLV_TRAFFIC_CLASS
,
1148 sizeof(fd_cb
->so
->so_traffic_class
),
1149 &fd_cb
->so
->so_traffic_class
);
1154 if (SOCK_TYPE(fd_cb
->so
) == SOCK_STREAM
) {
1155 flow_type
= FLOW_DIVERT_FLOW_TYPE_TCP
;
1156 } else if (SOCK_TYPE(fd_cb
->so
) == SOCK_DGRAM
) {
1157 flow_type
= FLOW_DIVERT_FLOW_TYPE_UDP
;
1162 error
= flow_divert_packet_append_tlv(connect_packet
,
1163 FLOW_DIVERT_TLV_FLOW_TYPE
,
1171 if (fd_cb
->so
->so_flags
& SOF_DELEGATED
) {
1172 error
= flow_divert_packet_append_tlv(connect_packet
,
1173 FLOW_DIVERT_TLV_PID
,
1174 sizeof(fd_cb
->so
->e_pid
),
1180 error
= flow_divert_packet_append_tlv(connect_packet
,
1181 FLOW_DIVERT_TLV_UUID
,
1182 sizeof(fd_cb
->so
->e_uuid
),
1183 &fd_cb
->so
->e_uuid
);
1188 error
= flow_divert_packet_append_tlv(connect_packet
,
1189 FLOW_DIVERT_TLV_PID
,
1190 sizeof(fd_cb
->so
->e_pid
),
1191 &fd_cb
->so
->last_pid
);
1196 error
= flow_divert_packet_append_tlv(connect_packet
,
1197 FLOW_DIVERT_TLV_UUID
,
1198 sizeof(fd_cb
->so
->e_uuid
),
1199 &fd_cb
->so
->last_uuid
);
1205 if (fd_cb
->connect_token
!= NULL
) {
1206 unsigned int token_len
= m_length(fd_cb
->connect_token
);
1207 mbuf_concatenate(connect_packet
, fd_cb
->connect_token
);
1208 mbuf_pkthdr_adjustlen(connect_packet
, token_len
);
1209 fd_cb
->connect_token
= NULL
;
1211 uint32_t ctl_unit
= htonl(fd_cb
->control_group_unit
);
1213 error
= flow_divert_packet_append_tlv(connect_packet
, FLOW_DIVERT_TLV_CTL_UNIT
, sizeof(ctl_unit
), &ctl_unit
);
1218 error
= flow_divert_append_target_endpoint_tlv(connect_packet
, to
);
1224 if (fd_cb
->local_address
!= NULL
) {
1228 struct inpcb
*inp
= sotoinpcb(so
);
1229 if (flow_divert_has_pcb_local_address(inp
)) {
1230 error
= flow_divert_inp_to_sockaddr(inp
, &fd_cb
->local_address
);
1232 FDLOG0(LOG_ERR
, fd_cb
, "failed to get the local socket address.");
1238 if (fd_cb
->local_address
!= NULL
) {
1239 /* socket is bound. */
1240 error
= flow_divert_packet_append_tlv(connect_packet
, FLOW_DIVERT_TLV_LOCAL_ADDR
,
1241 fd_cb
->local_address
->sa_len
, fd_cb
->local_address
);
1247 if (so
->so_flags1
& SOF1_DATA_IDEMPOTENT
) {
1248 uint32_t flags
= FLOW_DIVERT_TOKEN_FLAG_TFO
;
1249 error
= flow_divert_packet_append_tlv(connect_packet
, FLOW_DIVERT_TLV_FLAGS
, sizeof(flags
), &flags
);
1257 *out_connect_packet
= connect_packet
;
1258 } else if (connect_packet
!= NULL
) {
1259 mbuf_freem(connect_packet
);
1266 flow_divert_send_connect_result(struct flow_divert_pcb
*fd_cb
)
1269 mbuf_t packet
= NULL
;
1270 int rbuff_space
= 0;
1272 error
= flow_divert_packet_init(fd_cb
, FLOW_DIVERT_PKT_CONNECT_RESULT
, &packet
);
1274 FDLOG(LOG_ERR
, fd_cb
, "failed to create a connect result packet: %d", error
);
1278 rbuff_space
= fd_cb
->so
->so_rcv
.sb_hiwat
;
1279 if (rbuff_space
< 0) {
1282 rbuff_space
= htonl(rbuff_space
);
1283 error
= flow_divert_packet_append_tlv(packet
,
1284 FLOW_DIVERT_TLV_SPACE_AVAILABLE
,
1285 sizeof(rbuff_space
),
1291 error
= flow_divert_send_packet(fd_cb
, packet
, TRUE
);
1297 if (error
&& packet
!= NULL
) {
1305 flow_divert_send_close(struct flow_divert_pcb
*fd_cb
, int how
)
1308 mbuf_t packet
= NULL
;
1311 error
= flow_divert_packet_init(fd_cb
, FLOW_DIVERT_PKT_CLOSE
, &packet
);
1313 FDLOG(LOG_ERR
, fd_cb
, "failed to create a close packet: %d", error
);
1317 error
= flow_divert_packet_append_tlv(packet
, FLOW_DIVERT_TLV_ERROR_CODE
, sizeof(zero
), &zero
);
1319 FDLOG(LOG_ERR
, fd_cb
, "failed to add the error code TLV: %d", error
);
1324 error
= flow_divert_packet_append_tlv(packet
, FLOW_DIVERT_TLV_HOW
, sizeof(how
), &how
);
1326 FDLOG(LOG_ERR
, fd_cb
, "failed to add the how flag: %d", error
);
1330 error
= flow_divert_send_packet(fd_cb
, packet
, TRUE
);
1336 if (error
&& packet
!= NULL
) {
1344 flow_divert_tunnel_how_closed(struct flow_divert_pcb
*fd_cb
)
1346 if ((fd_cb
->flags
& (FLOW_DIVERT_TUNNEL_RD_CLOSED
| FLOW_DIVERT_TUNNEL_WR_CLOSED
)) ==
1347 (FLOW_DIVERT_TUNNEL_RD_CLOSED
| FLOW_DIVERT_TUNNEL_WR_CLOSED
)) {
1349 } else if (fd_cb
->flags
& FLOW_DIVERT_TUNNEL_RD_CLOSED
) {
1351 } else if (fd_cb
->flags
& FLOW_DIVERT_TUNNEL_WR_CLOSED
) {
1359 * Determine what close messages if any need to be sent to the tunnel. Returns TRUE if the tunnel is closed for both reads and
1360 * writes. Returns FALSE otherwise.
1363 flow_divert_send_close_if_needed(struct flow_divert_pcb
*fd_cb
)
1367 /* Do not send any close messages if there is still data in the send buffer */
1368 if (fd_cb
->so
->so_snd
.sb_cc
== 0) {
1369 if ((fd_cb
->flags
& (FLOW_DIVERT_READ_CLOSED
| FLOW_DIVERT_TUNNEL_RD_CLOSED
)) == FLOW_DIVERT_READ_CLOSED
) {
1370 /* Socket closed reads, but tunnel did not. Tell tunnel to close reads */
1373 if ((fd_cb
->flags
& (FLOW_DIVERT_WRITE_CLOSED
| FLOW_DIVERT_TUNNEL_WR_CLOSED
)) == FLOW_DIVERT_WRITE_CLOSED
) {
1374 /* Socket closed writes, but tunnel did not. Tell tunnel to close writes */
1375 if (how
== SHUT_RD
) {
1384 FDLOG(LOG_INFO
, fd_cb
, "sending close, how = %d", how
);
1385 if (flow_divert_send_close(fd_cb
, how
) != ENOBUFS
) {
1386 /* Successfully sent the close packet. Record the ways in which the tunnel has been closed */
1387 if (how
!= SHUT_RD
) {
1388 fd_cb
->flags
|= FLOW_DIVERT_TUNNEL_WR_CLOSED
;
1390 if (how
!= SHUT_WR
) {
1391 fd_cb
->flags
|= FLOW_DIVERT_TUNNEL_RD_CLOSED
;
1396 if (flow_divert_tunnel_how_closed(fd_cb
) == SHUT_RDWR
) {
1397 flow_divert_disconnect_socket(fd_cb
->so
);
1402 flow_divert_send_data_packet(struct flow_divert_pcb
*fd_cb
, mbuf_t data
, size_t data_len
, struct sockaddr
*toaddr
, Boolean force
)
1408 error
= flow_divert_packet_init(fd_cb
, FLOW_DIVERT_PKT_DATA
, &packet
);
1410 FDLOG(LOG_ERR
, fd_cb
, "flow_divert_packet_init failed: %d", error
);
1414 if (toaddr
!= NULL
) {
1415 error
= flow_divert_append_target_endpoint_tlv(packet
, toaddr
);
1417 FDLOG(LOG_ERR
, fd_cb
, "flow_divert_append_target_endpoint_tlv() failed: %d", error
);
1422 if (data_len
> 0 && data
!= NULL
) {
1423 last
= m_last(packet
);
1424 mbuf_setnext(last
, data
);
1425 mbuf_pkthdr_adjustlen(packet
, data_len
);
1427 error
= flow_divert_send_packet(fd_cb
, packet
, force
);
1430 mbuf_setnext(last
, NULL
);
1433 fd_cb
->bytes_sent
+= data_len
;
1434 flow_divert_add_data_statistics(fd_cb
, data_len
, TRUE
);
1441 flow_divert_send_buffered_data(struct flow_divert_pcb
*fd_cb
, Boolean force
)
1448 to_send
= fd_cb
->so
->so_snd
.sb_cc
;
1449 buffer
= fd_cb
->so
->so_snd
.sb_mb
;
1451 if (buffer
== NULL
&& to_send
> 0) {
1452 FDLOG(LOG_ERR
, fd_cb
, "Send buffer is NULL, but size is supposed to be %lu", to_send
);
1456 /* Ignore the send window if force is enabled */
1457 if (!force
&& (to_send
> fd_cb
->send_window
)) {
1458 to_send
= fd_cb
->send_window
;
1461 if (SOCK_TYPE(fd_cb
->so
) == SOCK_STREAM
) {
1462 while (sent
< to_send
) {
1466 data_len
= to_send
- sent
;
1467 if (data_len
> FLOW_DIVERT_CHUNK_SIZE
) {
1468 data_len
= FLOW_DIVERT_CHUNK_SIZE
;
1471 error
= mbuf_copym(buffer
, sent
, data_len
, MBUF_DONTWAIT
, &data
);
1473 FDLOG(LOG_ERR
, fd_cb
, "mbuf_copym failed: %d", error
);
1477 error
= flow_divert_send_data_packet(fd_cb
, data
, data_len
, NULL
, force
);
1485 sbdrop(&fd_cb
->so
->so_snd
, sent
);
1486 sowwakeup(fd_cb
->so
);
1487 } else if (SOCK_TYPE(fd_cb
->so
) == SOCK_DGRAM
) {
1493 struct sockaddr
*toaddr
= flow_divert_get_buffered_target_address(buffer
);
1496 if (toaddr
!= NULL
) {
1497 /* look for data in the chain */
1500 if (m
!= NULL
&& m
->m_type
== MT_DATA
) {
1506 FDLOG0(LOG_ERR
, fd_cb
, "failed to find type MT_DATA in the mbuf chain.");
1510 data_len
= mbuf_pkthdr_len(m
);
1512 FDLOG(LOG_DEBUG
, fd_cb
, "mbuf_copym() data_len = %lu", data_len
);
1513 error
= mbuf_copym(m
, 0, data_len
, MBUF_DONTWAIT
, &data
);
1515 FDLOG(LOG_ERR
, fd_cb
, "mbuf_copym failed: %d", error
);
1521 error
= flow_divert_send_data_packet(fd_cb
, data
, data_len
, toaddr
, force
);
1528 buffer
= buffer
->m_nextpkt
;
1529 (void) sbdroprecord(&(fd_cb
->so
->so_snd
));
1534 FDLOG(LOG_DEBUG
, fd_cb
, "sent %lu bytes of buffered data", sent
);
1535 if (fd_cb
->send_window
>= sent
) {
1536 fd_cb
->send_window
-= sent
;
1538 fd_cb
->send_window
= 0;
1544 flow_divert_send_app_data(struct flow_divert_pcb
*fd_cb
, mbuf_t data
, struct sockaddr
*toaddr
)
1546 size_t to_send
= mbuf_pkthdr_len(data
);
1549 if (to_send
> fd_cb
->send_window
) {
1550 to_send
= fd_cb
->send_window
;
1553 if (fd_cb
->so
->so_snd
.sb_cc
> 0) {
1554 to_send
= 0; /* If the send buffer is non-empty, then we can't send anything */
1557 if (SOCK_TYPE(fd_cb
->so
) == SOCK_STREAM
) {
1559 mbuf_t remaining_data
= data
;
1560 mbuf_t pkt_data
= NULL
;
1561 while (sent
< to_send
&& remaining_data
!= NULL
) {
1562 size_t pkt_data_len
;
1564 pkt_data
= remaining_data
;
1566 if ((to_send
- sent
) > FLOW_DIVERT_CHUNK_SIZE
) {
1567 pkt_data_len
= FLOW_DIVERT_CHUNK_SIZE
;
1569 pkt_data_len
= to_send
- sent
;
1572 if (pkt_data_len
< mbuf_pkthdr_len(pkt_data
)) {
1573 error
= mbuf_split(pkt_data
, pkt_data_len
, MBUF_DONTWAIT
, &remaining_data
);
1575 FDLOG(LOG_ERR
, fd_cb
, "mbuf_split failed: %d", error
);
1580 remaining_data
= NULL
;
1583 error
= flow_divert_send_data_packet(fd_cb
, pkt_data
, pkt_data_len
, NULL
, FALSE
);
1590 sent
+= pkt_data_len
;
1593 fd_cb
->send_window
-= sent
;
1597 if (pkt_data
!= NULL
) {
1598 if (sbspace(&fd_cb
->so
->so_snd
) > 0) {
1599 if (!sbappendstream(&fd_cb
->so
->so_snd
, pkt_data
)) {
1600 FDLOG(LOG_ERR
, fd_cb
, "sbappendstream failed with pkt_data, send buffer size = %u, send_window = %u\n",
1601 fd_cb
->so
->so_snd
.sb_cc
, fd_cb
->send_window
);
1608 if (remaining_data
!= NULL
) {
1609 if (sbspace(&fd_cb
->so
->so_snd
) > 0) {
1610 if (!sbappendstream(&fd_cb
->so
->so_snd
, remaining_data
)) {
1611 FDLOG(LOG_ERR
, fd_cb
, "sbappendstream failed with remaining_data, send buffer size = %u, send_window = %u\n",
1612 fd_cb
->so
->so_snd
.sb_cc
, fd_cb
->send_window
);
1618 } else if (SOCK_TYPE(fd_cb
->so
) == SOCK_DGRAM
) {
1619 if (to_send
|| mbuf_pkthdr_len(data
) == 0) {
1620 error
= flow_divert_send_data_packet(fd_cb
, data
, to_send
, toaddr
, FALSE
);
1622 FDLOG(LOG_ERR
, fd_cb
, "flow_divert_send_data_packet failed. send data size = %lu", to_send
);
1624 fd_cb
->send_window
-= to_send
;
1628 if (sbspace(&fd_cb
->so
->so_snd
) >= (int)mbuf_pkthdr_len(data
)) {
1629 if (toaddr
!= NULL
) {
1630 if (!sbappendaddr(&fd_cb
->so
->so_snd
, toaddr
, data
, NULL
, &error
)) {
1631 FDLOG(LOG_ERR
, fd_cb
,
1632 "sbappendaddr failed. send buffer size = %u, send_window = %u, error = %d\n",
1633 fd_cb
->so
->so_snd
.sb_cc
, fd_cb
->send_window
, error
);
1636 if (!sbappendrecord(&fd_cb
->so
->so_snd
, data
)) {
1637 FDLOG(LOG_ERR
, fd_cb
,
1638 "sbappendrecord failed. send buffer size = %u, send_window = %u, error = %d\n",
1639 fd_cb
->so
->so_snd
.sb_cc
, fd_cb
->send_window
, error
);
1652 flow_divert_send_read_notification(struct flow_divert_pcb
*fd_cb
, uint32_t read_count
)
1655 mbuf_t packet
= NULL
;
1656 uint32_t net_read_count
= htonl(read_count
);
1658 error
= flow_divert_packet_init(fd_cb
, FLOW_DIVERT_PKT_READ_NOTIFY
, &packet
);
1660 FDLOG(LOG_ERR
, fd_cb
, "failed to create a read notification packet: %d", error
);
1664 error
= flow_divert_packet_append_tlv(packet
, FLOW_DIVERT_TLV_READ_COUNT
, sizeof(net_read_count
), &net_read_count
);
1666 FDLOG(LOG_ERR
, fd_cb
, "failed to add the read count: %d", error
);
1670 error
= flow_divert_send_packet(fd_cb
, packet
, TRUE
);
1676 if (error
&& packet
!= NULL
) {
1684 flow_divert_send_traffic_class_update(struct flow_divert_pcb
*fd_cb
, int traffic_class
)
1687 mbuf_t packet
= NULL
;
1689 error
= flow_divert_packet_init(fd_cb
, FLOW_DIVERT_PKT_PROPERTIES_UPDATE
, &packet
);
1691 FDLOG(LOG_ERR
, fd_cb
, "failed to create a properties update packet: %d", error
);
1695 error
= flow_divert_packet_append_tlv(packet
, FLOW_DIVERT_TLV_TRAFFIC_CLASS
, sizeof(traffic_class
), &traffic_class
);
1697 FDLOG(LOG_ERR
, fd_cb
, "failed to add the traffic class: %d", error
);
1701 error
= flow_divert_send_packet(fd_cb
, packet
, TRUE
);
1707 if (error
&& packet
!= NULL
) {
1715 flow_divert_handle_connect_result(struct flow_divert_pcb
*fd_cb
, mbuf_t packet
, int offset
)
1717 uint32_t connect_error
;
1718 uint32_t ctl_unit
= 0;
1720 struct flow_divert_group
*grp
= NULL
;
1721 struct sockaddr_storage local_address
;
1722 int out_if_index
= 0;
1723 struct sockaddr_storage remote_address
;
1724 uint32_t send_window
;
1725 uint32_t app_data_length
= 0;
1727 memset(&local_address
, 0, sizeof(local_address
));
1728 memset(&remote_address
, 0, sizeof(remote_address
));
1730 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_ERROR_CODE
, sizeof(connect_error
), &connect_error
, NULL
);
1732 FDLOG(LOG_ERR
, fd_cb
, "failed to get the connect result: %d", error
);
1736 FDLOG(LOG_INFO
, fd_cb
, "received connect result %u", connect_error
);
1738 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_SPACE_AVAILABLE
, sizeof(send_window
), &send_window
, NULL
);
1740 FDLOG(LOG_ERR
, fd_cb
, "failed to get the send window: %d", error
);
1744 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_CTL_UNIT
, sizeof(ctl_unit
), &ctl_unit
, NULL
);
1746 FDLOG0(LOG_INFO
, fd_cb
, "No control unit provided in the connect result");
1749 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_LOCAL_ADDR
, sizeof(local_address
), &local_address
, NULL
);
1751 FDLOG0(LOG_INFO
, fd_cb
, "No local address provided");
1754 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_REMOTE_ADDR
, sizeof(remote_address
), &remote_address
, NULL
);
1756 FDLOG0(LOG_INFO
, fd_cb
, "No remote address provided");
1759 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_OUT_IF_INDEX
, sizeof(out_if_index
), &out_if_index
, NULL
);
1761 FDLOG0(LOG_INFO
, fd_cb
, "No output if index provided");
1764 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_APP_DATA
, 0, NULL
, &app_data_length
);
1766 FDLOG0(LOG_INFO
, fd_cb
, "No application data provided in connect result");
1770 connect_error
= ntohl(connect_error
);
1771 ctl_unit
= ntohl(ctl_unit
);
1773 lck_rw_lock_shared(&g_flow_divert_group_lck
);
1775 if (connect_error
== 0 && ctl_unit
> 0) {
1776 if (ctl_unit
>= GROUP_COUNT_MAX
) {
1777 FDLOG(LOG_ERR
, fd_cb
, "Connect result contains an invalid control unit: %u", ctl_unit
);
1779 } else if (g_flow_divert_groups
== NULL
|| g_active_group_count
== 0) {
1780 FDLOG0(LOG_ERR
, fd_cb
, "No active groups, dropping connection");
1783 grp
= g_flow_divert_groups
[ctl_unit
];
1791 if (fd_cb
->so
!= NULL
) {
1792 struct inpcb
*inp
= NULL
;
1793 struct ifnet
*ifp
= NULL
;
1794 struct flow_divert_group
*old_group
;
1796 socket_lock(fd_cb
->so
, 0);
1798 if (!(fd_cb
->so
->so_state
& SS_ISCONNECTING
)) {
1802 inp
= sotoinpcb(fd_cb
->so
);
1804 if (connect_error
|| error
) {
1805 goto set_socket_state
;
1808 if (local_address
.ss_family
== 0 && fd_cb
->local_address
== NULL
) {
1810 goto set_socket_state
;
1812 if (local_address
.ss_family
!= 0 && fd_cb
->local_address
== NULL
) {
1813 if (local_address
.ss_len
> sizeof(local_address
)) {
1814 local_address
.ss_len
= sizeof(local_address
);
1816 fd_cb
->local_address
= dup_sockaddr((struct sockaddr
*)&local_address
, 1);
1818 if (flow_divert_is_sockaddr_valid((struct sockaddr
*)&local_address
)) {
1819 if (inp
->inp_vflag
& INP_IPV4
&& local_address
.ss_family
== AF_INET
) {
1820 struct sockaddr_in
*local_in_address
= (struct sockaddr_in
*)&local_address
;
1821 inp
->inp_lport
= local_in_address
->sin_port
;
1822 memcpy(&inp
->inp_laddr
, &local_in_address
->sin_addr
, sizeof(struct in_addr
));
1823 } else if (inp
->inp_vflag
& INP_IPV6
&& local_address
.ss_family
== AF_INET6
) {
1824 struct sockaddr_in6
*local_in6_address
= (struct sockaddr_in6
*)&local_address
;
1825 inp
->inp_lport
= local_in6_address
->sin6_port
;
1826 memcpy(&inp
->in6p_laddr
, &local_in6_address
->sin6_addr
, sizeof(struct in6_addr
));
1830 if (remote_address
.ss_family
!= 0) {
1831 if (fd_cb
->remote_address
!= NULL
) {
1832 FREE(fd_cb
->remote_address
, M_SONAME
);
1833 fd_cb
->remote_address
= NULL
;
1835 if (remote_address
.ss_len
> sizeof(remote_address
)) {
1836 remote_address
.ss_len
= sizeof(remote_address
);
1838 fd_cb
->remote_address
= dup_sockaddr((struct sockaddr
*)&remote_address
, 1);
1839 if (flow_divert_is_sockaddr_valid((struct sockaddr
*)&remote_address
)) {
1840 if (inp
->inp_vflag
& INP_IPV4
&& remote_address
.ss_family
== AF_INET
) {
1841 struct sockaddr_in
*remote_in_address
= (struct sockaddr_in
*)&remote_address
;
1842 inp
->inp_fport
= remote_in_address
->sin_port
;
1843 memcpy(&inp
->inp_faddr
, &remote_in_address
->sin_addr
, sizeof(struct in_addr
));
1844 } else if (inp
->inp_vflag
& INP_IPV6
&& remote_address
.ss_family
== AF_INET6
) {
1845 struct sockaddr_in6
*remote_in6_address
= (struct sockaddr_in6
*)&remote_address
;
1846 inp
->inp_fport
= remote_in6_address
->sin6_port
;
1847 memcpy(&inp
->in6p_faddr
, &remote_in6_address
->sin6_addr
, sizeof(struct in6_addr
));
1852 goto set_socket_state
;
1855 if (app_data_length
> 0) {
1856 uint8_t *app_data
= NULL
;
1857 MALLOC(app_data
, uint8_t *, app_data_length
, M_TEMP
, M_WAITOK
);
1858 if (app_data
!= NULL
) {
1859 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_APP_DATA
, app_data_length
, app_data
, NULL
);
1861 FDLOG(LOG_INFO
, fd_cb
, "Got %u bytes of app data from the connect result", app_data_length
);
1862 if (fd_cb
->app_data
!= NULL
) {
1863 FREE(fd_cb
->app_data
, M_TEMP
);
1865 fd_cb
->app_data
= app_data
;
1866 fd_cb
->app_data_length
= app_data_length
;
1868 FDLOG(LOG_ERR
, fd_cb
, "Failed to copy %u bytes of application data from the connect result packet", app_data_length
);
1869 FREE(app_data
, M_TEMP
);
1872 FDLOG(LOG_ERR
, fd_cb
, "Failed to allocate a buffer of size %u to hold the application data from the connect result", app_data_length
);
1876 ifnet_head_lock_shared();
1877 if (out_if_index
> 0 && out_if_index
<= if_index
) {
1878 ifp
= ifindex2ifnet
[out_if_index
];
1882 inp
->inp_last_outifp
= ifp
;
1889 goto set_socket_state
;
1892 if (fd_cb
->group
== NULL
) {
1894 goto set_socket_state
;
1898 old_group
= fd_cb
->group
;
1900 lck_rw_lock_exclusive(&old_group
->lck
);
1901 lck_rw_lock_exclusive(&grp
->lck
);
1903 RB_REMOVE(fd_pcb_tree
, &old_group
->pcb_tree
, fd_cb
);
1904 if (RB_INSERT(fd_pcb_tree
, &grp
->pcb_tree
, fd_cb
) != NULL
) {
1905 panic("group with unit %u already contains a connection with hash %u", grp
->ctl_unit
, fd_cb
->hash
);
1910 lck_rw_done(&grp
->lck
);
1911 lck_rw_done(&old_group
->lck
);
1914 fd_cb
->send_window
= ntohl(send_window
);
1917 if (!connect_error
&& !error
) {
1918 FDLOG0(LOG_INFO
, fd_cb
, "sending connect result");
1919 error
= flow_divert_send_connect_result(fd_cb
);
1922 if (connect_error
|| error
) {
1923 if (!connect_error
) {
1924 flow_divert_update_closed_state(fd_cb
, SHUT_RDWR
, FALSE
);
1925 fd_cb
->so
->so_error
= error
;
1926 flow_divert_send_close_if_needed(fd_cb
);
1928 flow_divert_update_closed_state(fd_cb
, SHUT_RDWR
, TRUE
);
1929 fd_cb
->so
->so_error
= connect_error
;
1931 flow_divert_disconnect_socket(fd_cb
->so
);
1934 /* Update NECP client with connected five-tuple */
1935 if (!uuid_is_null(inp
->necp_client_uuid
)) {
1936 socket_unlock(fd_cb
->so
, 0);
1937 necp_client_assign_from_socket(fd_cb
->so
->last_pid
, inp
->necp_client_uuid
, inp
);
1938 socket_lock(fd_cb
->so
, 0);
1942 flow_divert_send_buffered_data(fd_cb
, FALSE
);
1943 soisconnected(fd_cb
->so
);
1947 socket_unlock(fd_cb
->so
, 0);
1951 lck_rw_done(&g_flow_divert_group_lck
);
1955 flow_divert_handle_close(struct flow_divert_pcb
*fd_cb
, mbuf_t packet
, int offset
)
1957 uint32_t close_error
;
1961 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_ERROR_CODE
, sizeof(close_error
), &close_error
, NULL
);
1963 FDLOG(LOG_ERR
, fd_cb
, "failed to get the close error: %d", error
);
1967 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_HOW
, sizeof(how
), &how
, NULL
);
1969 FDLOG(LOG_ERR
, fd_cb
, "failed to get the close how flag: %d", error
);
1975 FDLOG(LOG_INFO
, fd_cb
, "close received, how = %d", how
);
1978 if (fd_cb
->so
!= NULL
) {
1979 socket_lock(fd_cb
->so
, 0);
1981 fd_cb
->so
->so_error
= ntohl(close_error
);
1983 flow_divert_update_closed_state(fd_cb
, how
, TRUE
);
1985 how
= flow_divert_tunnel_how_closed(fd_cb
);
1986 if (how
== SHUT_RDWR
) {
1987 flow_divert_disconnect_socket(fd_cb
->so
);
1988 } else if (how
== SHUT_RD
) {
1989 socantrcvmore(fd_cb
->so
);
1990 } else if (how
== SHUT_WR
) {
1991 socantsendmore(fd_cb
->so
);
1994 socket_unlock(fd_cb
->so
, 0);
2000 flow_divert_get_control_mbuf(struct flow_divert_pcb
*fd_cb
)
2002 struct inpcb
*inp
= sotoinpcb(fd_cb
->so
);
2003 if ((inp
->inp_vflag
& INP_IPV4
) && (inp
->inp_flags
& INP_RECVDSTADDR
)) {
2004 struct in_addr ia
= { };
2006 if (fd_cb
->local_address
!= NULL
&& fd_cb
->local_address
->sa_family
== AF_INET
&& fd_cb
->local_address
->sa_len
>= sizeof(struct sockaddr_in
)) {
2007 struct sockaddr_in
*sin
= (struct sockaddr_in
*)(void *)fd_cb
->local_address
;
2008 bcopy(&sin
->sin_addr
, &ia
, sizeof(struct in_addr
));
2011 return sbcreatecontrol((caddr_t
)&ia
, sizeof(ia
), IP_RECVDSTADDR
, IPPROTO_IP
);
2012 } else if ((inp
->inp_vflag
& INP_IPV6
) && (inp
->inp_flags
& IN6P_PKTINFO
)) {
2013 struct in6_pktinfo pi6
;
2014 memset(&pi6
, 0, sizeof(pi6
));
2016 if (fd_cb
->local_address
!= NULL
&& fd_cb
->local_address
->sa_family
== AF_INET6
&& fd_cb
->local_address
->sa_len
>= sizeof(struct sockaddr_in6
)) {
2017 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)fd_cb
->local_address
;
2018 bcopy(&sin6
->sin6_addr
, &pi6
.ipi6_addr
, sizeof(struct in6_addr
));
2019 pi6
.ipi6_ifindex
= 0;
2022 return sbcreatecontrol((caddr_t
)&pi6
, sizeof(pi6
), IPV6_PKTINFO
, IPPROTO_IPV6
);
2028 flow_divert_handle_data(struct flow_divert_pcb
*fd_cb
, mbuf_t packet
, size_t offset
)
2031 if (fd_cb
->so
!= NULL
) {
2035 struct sockaddr_storage remote_address
;
2036 boolean_t got_remote_sa
= FALSE
;
2038 socket_lock(fd_cb
->so
, 0);
2040 if (SOCK_TYPE(fd_cb
->so
) == SOCK_DGRAM
) {
2041 uint32_t val_size
= 0;
2043 /* check if we got remote address with data */
2044 memset(&remote_address
, 0, sizeof(remote_address
));
2045 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_REMOTE_ADDR
, sizeof(remote_address
), &remote_address
, &val_size
);
2046 if (error
|| val_size
> sizeof(remote_address
)) {
2047 FDLOG0(LOG_INFO
, fd_cb
, "No remote address provided");
2050 /* validate the address */
2051 if (flow_divert_is_sockaddr_valid((struct sockaddr
*)&remote_address
)) {
2052 got_remote_sa
= TRUE
;
2054 offset
+= (sizeof(uint8_t) + sizeof(uint32_t) + val_size
);
2058 data_size
= (mbuf_pkthdr_len(packet
) - offset
);
2060 FDLOG(LOG_DEBUG
, fd_cb
, "received %lu bytes of data", data_size
);
2062 error
= mbuf_split(packet
, offset
, MBUF_DONTWAIT
, &data
);
2063 if (error
|| data
== NULL
) {
2064 FDLOG(LOG_ERR
, fd_cb
, "mbuf_split failed: %d", error
);
2066 if (flow_divert_check_no_cellular(fd_cb
) ||
2067 flow_divert_check_no_expensive(fd_cb
) ||
2068 flow_divert_check_no_constrained(fd_cb
)) {
2069 flow_divert_update_closed_state(fd_cb
, SHUT_RDWR
, TRUE
);
2070 flow_divert_send_close(fd_cb
, SHUT_RDWR
);
2071 flow_divert_disconnect_socket(fd_cb
->so
);
2072 } else if (!(fd_cb
->so
->so_state
& SS_CANTRCVMORE
)) {
2073 if (SOCK_TYPE(fd_cb
->so
) == SOCK_STREAM
) {
2074 if (sbappendstream(&fd_cb
->so
->so_rcv
, data
)) {
2075 fd_cb
->bytes_received
+= data_size
;
2076 flow_divert_add_data_statistics(fd_cb
, data_size
, FALSE
);
2077 fd_cb
->sb_size
= fd_cb
->so
->so_rcv
.sb_cc
;
2078 sorwakeup(fd_cb
->so
);
2081 FDLOG0(LOG_ERR
, fd_cb
, "received data, but appendstream failed");
2083 } else if (SOCK_TYPE(fd_cb
->so
) == SOCK_DGRAM
) {
2084 struct sockaddr
*append_sa
;
2087 if (got_remote_sa
== TRUE
) {
2088 error
= flow_divert_dup_addr(fd_cb
->so
->so_proto
->pr_domain
->dom_family
,
2089 (struct sockaddr
*)&remote_address
, &append_sa
);
2091 error
= flow_divert_dup_addr(fd_cb
->so
->so_proto
->pr_domain
->dom_family
,
2092 fd_cb
->remote_address
, &append_sa
);
2095 FDLOG0(LOG_ERR
, fd_cb
, "failed to dup the socket address.");
2098 mctl
= flow_divert_get_control_mbuf(fd_cb
);
2099 int append_error
= 0;
2100 if (sbappendaddr(&fd_cb
->so
->so_rcv
, append_sa
, data
, mctl
, &append_error
)) {
2101 fd_cb
->bytes_received
+= data_size
;
2102 flow_divert_add_data_statistics(fd_cb
, data_size
, FALSE
);
2103 fd_cb
->sb_size
= fd_cb
->so
->so_rcv
.sb_cc
;
2104 sorwakeup(fd_cb
->so
);
2106 } else if (append_error
!= EJUSTRETURN
) {
2107 FDLOG0(LOG_ERR
, fd_cb
, "received data, but sbappendaddr failed");
2110 FREE(append_sa
, M_TEMP
);
2115 socket_unlock(fd_cb
->so
, 0);
2121 flow_divert_handle_read_notification(struct flow_divert_pcb
*fd_cb
, mbuf_t packet
, int offset
)
2123 uint32_t read_count
;
2126 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_READ_COUNT
, sizeof(read_count
), &read_count
, NULL
);
2128 FDLOG(LOG_ERR
, fd_cb
, "failed to get the read count: %d", error
);
2132 FDLOG(LOG_DEBUG
, fd_cb
, "received a read notification for %u bytes", ntohl(read_count
));
2135 if (fd_cb
->so
!= NULL
) {
2136 socket_lock(fd_cb
->so
, 0);
2137 fd_cb
->send_window
+= ntohl(read_count
);
2138 flow_divert_send_buffered_data(fd_cb
, FALSE
);
2139 socket_unlock(fd_cb
->so
, 0);
2145 flow_divert_handle_group_init(struct flow_divert_group
*group
, mbuf_t packet
, int offset
)
2148 uint32_t key_size
= 0;
2152 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_TOKEN_KEY
, 0, NULL
, &key_size
);
2154 FDLOG(LOG_ERR
, &nil_pcb
, "failed to get the key size: %d", error
);
2158 if (key_size
== 0 || key_size
> FLOW_DIVERT_MAX_KEY_SIZE
) {
2159 FDLOG(LOG_ERR
, &nil_pcb
, "Invalid key size: %u", key_size
);
2163 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_LOG_LEVEL
, sizeof(log_level
), &log_level
, NULL
);
2165 nil_pcb
.log_level
= log_level
;
2168 lck_rw_lock_exclusive(&group
->lck
);
2170 if (group
->token_key
!= NULL
) {
2171 FREE(group
->token_key
, M_TEMP
);
2172 group
->token_key
= NULL
;
2175 MALLOC(group
->token_key
, uint8_t *, key_size
, M_TEMP
, M_WAITOK
);
2176 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_TOKEN_KEY
, key_size
, group
->token_key
, NULL
);
2178 FDLOG(LOG_ERR
, &nil_pcb
, "failed to get the token key: %d", error
);
2179 FREE(group
->token_key
, M_TEMP
);
2180 group
->token_key
= NULL
;
2181 lck_rw_done(&group
->lck
);
2185 group
->token_key_size
= key_size
;
2187 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_FLAGS
, sizeof(flags
), &flags
, NULL
);
2189 group
->flags
= flags
;
2192 lck_rw_done(&group
->lck
);
2196 flow_divert_handle_properties_update(struct flow_divert_pcb
*fd_cb
, mbuf_t packet
, int offset
)
2199 struct sockaddr_storage local_address
;
2200 int out_if_index
= 0;
2201 struct sockaddr_storage remote_address
;
2202 uint32_t app_data_length
= 0;
2204 FDLOG0(LOG_INFO
, fd_cb
, "received a properties update");
2206 memset(&local_address
, 0, sizeof(local_address
));
2207 memset(&remote_address
, 0, sizeof(remote_address
));
2209 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_LOCAL_ADDR
, sizeof(local_address
), &local_address
, NULL
);
2211 FDLOG0(LOG_INFO
, fd_cb
, "No local address provided in properties update");
2214 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_REMOTE_ADDR
, sizeof(remote_address
), &remote_address
, NULL
);
2216 FDLOG0(LOG_INFO
, fd_cb
, "No remote address provided in properties update");
2219 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_OUT_IF_INDEX
, sizeof(out_if_index
), &out_if_index
, NULL
);
2221 FDLOG0(LOG_INFO
, fd_cb
, "No output if index provided in properties update");
2224 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_APP_DATA
, 0, NULL
, &app_data_length
);
2226 FDLOG0(LOG_INFO
, fd_cb
, "No application data provided in properties update");
2230 if (fd_cb
->so
!= NULL
) {
2231 socket_lock(fd_cb
->so
, 0);
2233 if (local_address
.ss_family
!= 0) {
2234 if (local_address
.ss_len
> sizeof(local_address
)) {
2235 local_address
.ss_len
= sizeof(local_address
);
2237 if (fd_cb
->local_address
!= NULL
) {
2238 FREE(fd_cb
->local_address
, M_SONAME
);
2239 fd_cb
->local_address
= NULL
;
2241 fd_cb
->local_address
= dup_sockaddr((struct sockaddr
*)&local_address
, 1);
2244 if (remote_address
.ss_family
!= 0) {
2245 if (remote_address
.ss_len
> sizeof(remote_address
)) {
2246 remote_address
.ss_len
= sizeof(remote_address
);
2248 if (fd_cb
->remote_address
!= NULL
) {
2249 FREE(fd_cb
->remote_address
, M_SONAME
);
2250 fd_cb
->remote_address
= NULL
;
2252 fd_cb
->remote_address
= dup_sockaddr((struct sockaddr
*)&remote_address
, 1);
2255 if (out_if_index
> 0) {
2256 struct inpcb
*inp
= NULL
;
2257 struct ifnet
*ifp
= NULL
;
2259 inp
= sotoinpcb(fd_cb
->so
);
2261 ifnet_head_lock_shared();
2262 if (out_if_index
<= if_index
) {
2263 ifp
= ifindex2ifnet
[out_if_index
];
2267 inp
->inp_last_outifp
= ifp
;
2272 if (app_data_length
> 0) {
2273 uint8_t *app_data
= NULL
;
2274 MALLOC(app_data
, uint8_t *, app_data_length
, M_TEMP
, M_WAITOK
);
2275 if (app_data
!= NULL
) {
2276 error
= flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_APP_DATA
, app_data_length
, app_data
, NULL
);
2278 if (fd_cb
->app_data
!= NULL
) {
2279 FREE(fd_cb
->app_data
, M_TEMP
);
2281 fd_cb
->app_data
= app_data
;
2282 fd_cb
->app_data_length
= app_data_length
;
2284 FDLOG(LOG_ERR
, fd_cb
, "Failed to copy %u bytes of application data from the properties update packet", app_data_length
);
2285 FREE(app_data
, M_TEMP
);
2288 FDLOG(LOG_ERR
, fd_cb
, "Failed to allocate a buffer of size %u to hold the application data from the properties update", app_data_length
);
2292 socket_unlock(fd_cb
->so
, 0);
2298 flow_divert_handle_app_map_create(struct flow_divert_group
*group
, mbuf_t packet
, int offset
)
2300 size_t bytes_mem_size
;
2301 size_t child_maps_mem_size
;
2304 struct flow_divert_trie new_trie
;
2305 int insert_error
= 0;
2306 size_t nodes_mem_size
;
2307 int prefix_count
= 0;
2308 int signing_id_count
= 0;
2309 size_t trie_memory_size
= 0;
2311 lck_rw_lock_exclusive(&group
->lck
);
2313 /* Re-set the current trie */
2314 if (group
->signing_id_trie
.memory
!= NULL
) {
2315 FREE(group
->signing_id_trie
.memory
, M_TEMP
);
2317 memset(&group
->signing_id_trie
, 0, sizeof(group
->signing_id_trie
));
2318 group
->signing_id_trie
.root
= NULL_TRIE_IDX
;
2320 memset(&new_trie
, 0, sizeof(new_trie
));
2322 /* Get the number of shared prefixes in the new set of signing ID strings */
2323 flow_divert_packet_get_tlv(packet
, offset
, FLOW_DIVERT_TLV_PREFIX_COUNT
, sizeof(prefix_count
), &prefix_count
, NULL
);
2325 if (prefix_count
< 0) {
2326 lck_rw_done(&group
->lck
);
2330 /* Compute the number of signing IDs and the total amount of bytes needed to store them */
2331 for (cursor
= flow_divert_packet_find_tlv(packet
, offset
, FLOW_DIVERT_TLV_SIGNING_ID
, &error
, 0);
2333 cursor
= flow_divert_packet_find_tlv(packet
, cursor
, FLOW_DIVERT_TLV_SIGNING_ID
, &error
, 1)) {
2334 uint32_t sid_size
= 0;
2335 flow_divert_packet_get_tlv(packet
, cursor
, FLOW_DIVERT_TLV_SIGNING_ID
, 0, NULL
, &sid_size
);
2336 new_trie
.bytes_count
+= sid_size
;
2340 if (signing_id_count
== 0) {
2341 lck_rw_done(&group
->lck
);
2345 new_trie
.nodes_count
= (prefix_count
+ signing_id_count
+ 1); /* + 1 for the root node */
2346 new_trie
.child_maps_count
= (prefix_count
+ 1); /* + 1 for the root node */
2348 FDLOG(LOG_INFO
, &nil_pcb
, "Nodes count = %lu, child maps count = %lu, bytes_count = %lu",
2349 new_trie
.nodes_count
, new_trie
.child_maps_count
, new_trie
.bytes_count
);
2351 if (os_mul_overflow(sizeof(*new_trie
.nodes
), new_trie
.nodes_count
, &nodes_mem_size
) ||
2352 os_mul3_overflow(sizeof(*new_trie
.child_maps
), CHILD_MAP_SIZE
, new_trie
.child_maps_count
, &child_maps_mem_size
) ||
2353 os_mul_overflow(sizeof(*new_trie
.bytes
), new_trie
.bytes_count
, &bytes_mem_size
) ||
2354 os_add3_overflow(nodes_mem_size
, child_maps_mem_size
, bytes_mem_size
, &trie_memory_size
)) {
2355 FDLOG0(LOG_ERR
, &nil_pcb
, "Overflow while computing trie memory sizes");
2356 lck_rw_done(&group
->lck
);
2360 if (trie_memory_size
> FLOW_DIVERT_MAX_TRIE_MEMORY
) {
2361 FDLOG(LOG_ERR
, &nil_pcb
, "Trie memory size (%lu) is too big (maximum is %u)", trie_memory_size
, FLOW_DIVERT_MAX_TRIE_MEMORY
);
2362 lck_rw_done(&group
->lck
);
2366 MALLOC(new_trie
.memory
, void *, trie_memory_size
, M_TEMP
, M_WAITOK
);
2367 if (new_trie
.memory
== NULL
) {
2368 FDLOG(LOG_ERR
, &nil_pcb
, "Failed to allocate %lu bytes of memory for the signing ID trie",
2369 nodes_mem_size
+ child_maps_mem_size
+ bytes_mem_size
);
2370 lck_rw_done(&group
->lck
);
2374 /* Initialize the free lists */
2375 new_trie
.nodes
= (struct flow_divert_trie_node
*)new_trie
.memory
;
2376 new_trie
.nodes_free_next
= 0;
2377 memset(new_trie
.nodes
, 0, nodes_mem_size
);
2379 new_trie
.child_maps
= (uint16_t *)(void *)((uint8_t *)new_trie
.memory
+ nodes_mem_size
);
2380 new_trie
.child_maps_free_next
= 0;
2381 memset(new_trie
.child_maps
, 0xff, child_maps_mem_size
);
2383 new_trie
.bytes
= (uint8_t *)(void *)((uint8_t *)new_trie
.memory
+ nodes_mem_size
+ child_maps_mem_size
);
2384 new_trie
.bytes_free_next
= 0;
2386 /* The root is an empty node */
2387 new_trie
.root
= trie_node_alloc(&new_trie
);
2389 /* Add each signing ID to the trie */
2390 for (cursor
= flow_divert_packet_find_tlv(packet
, offset
, FLOW_DIVERT_TLV_SIGNING_ID
, &error
, 0);
2392 cursor
= flow_divert_packet_find_tlv(packet
, cursor
, FLOW_DIVERT_TLV_SIGNING_ID
, &error
, 1)) {
2393 uint32_t sid_size
= 0;
2394 flow_divert_packet_get_tlv(packet
, cursor
, FLOW_DIVERT_TLV_SIGNING_ID
, 0, NULL
, &sid_size
);
2395 if (new_trie
.bytes_free_next
+ sid_size
<= new_trie
.bytes_count
) {
2396 uint16_t new_node_idx
;
2397 flow_divert_packet_get_tlv(packet
, cursor
, FLOW_DIVERT_TLV_SIGNING_ID
, sid_size
, &TRIE_BYTE(&new_trie
, new_trie
.bytes_free_next
), NULL
);
2398 new_node_idx
= flow_divert_trie_insert(&new_trie
, new_trie
.bytes_free_next
, sid_size
);
2399 if (new_node_idx
== NULL_TRIE_IDX
) {
2400 insert_error
= EINVAL
;
2404 FDLOG0(LOG_ERR
, &nil_pcb
, "No place to put signing ID for insertion");
2405 insert_error
= ENOBUFS
;
2410 if (!insert_error
) {
2411 group
->signing_id_trie
= new_trie
;
2413 FREE(new_trie
.memory
, M_TEMP
);
2416 lck_rw_done(&group
->lck
);
2420 flow_divert_input(mbuf_t packet
, struct flow_divert_group
*group
)
2422 struct flow_divert_packet_header hdr
;
2424 struct flow_divert_pcb
*fd_cb
;
2426 if (mbuf_pkthdr_len(packet
) < sizeof(hdr
)) {
2427 FDLOG(LOG_ERR
, &nil_pcb
, "got a bad packet, length (%lu) < sizeof hdr (%lu)", mbuf_pkthdr_len(packet
), sizeof(hdr
));
2432 if (mbuf_pkthdr_len(packet
) > FD_CTL_RCVBUFF_SIZE
) {
2433 FDLOG(LOG_ERR
, &nil_pcb
, "got a bad packet, length (%lu) > %d", mbuf_pkthdr_len(packet
), FD_CTL_RCVBUFF_SIZE
);
2438 error
= mbuf_copydata(packet
, 0, sizeof(hdr
), &hdr
);
2440 FDLOG(LOG_ERR
, &nil_pcb
, "mbuf_copydata failed for the header: %d", error
);
2445 hdr
.conn_id
= ntohl(hdr
.conn_id
);
2447 if (hdr
.conn_id
== 0) {
2448 switch (hdr
.packet_type
) {
2449 case FLOW_DIVERT_PKT_GROUP_INIT
:
2450 flow_divert_handle_group_init(group
, packet
, sizeof(hdr
));
2452 case FLOW_DIVERT_PKT_APP_MAP_CREATE
:
2453 flow_divert_handle_app_map_create(group
, packet
, sizeof(hdr
));
2456 FDLOG(LOG_WARNING
, &nil_pcb
, "got an unknown message type: %d", hdr
.packet_type
);
2462 fd_cb
= flow_divert_pcb_lookup(hdr
.conn_id
, group
); /* This retains the PCB */
2463 if (fd_cb
== NULL
) {
2464 if (hdr
.packet_type
!= FLOW_DIVERT_PKT_CLOSE
&& hdr
.packet_type
!= FLOW_DIVERT_PKT_READ_NOTIFY
) {
2465 FDLOG(LOG_NOTICE
, &nil_pcb
, "got a %s message from group %d for an unknown pcb: %u", flow_divert_packet_type2str(hdr
.packet_type
), group
->ctl_unit
, hdr
.conn_id
);
2470 switch (hdr
.packet_type
) {
2471 case FLOW_DIVERT_PKT_CONNECT_RESULT
:
2472 flow_divert_handle_connect_result(fd_cb
, packet
, sizeof(hdr
));
2474 case FLOW_DIVERT_PKT_CLOSE
:
2475 flow_divert_handle_close(fd_cb
, packet
, sizeof(hdr
));
2477 case FLOW_DIVERT_PKT_DATA
:
2478 flow_divert_handle_data(fd_cb
, packet
, sizeof(hdr
));
2480 case FLOW_DIVERT_PKT_READ_NOTIFY
:
2481 flow_divert_handle_read_notification(fd_cb
, packet
, sizeof(hdr
));
2483 case FLOW_DIVERT_PKT_PROPERTIES_UPDATE
:
2484 flow_divert_handle_properties_update(fd_cb
, packet
, sizeof(hdr
));
2487 FDLOG(LOG_WARNING
, fd_cb
, "got an unknown message type: %d", hdr
.packet_type
);
2499 flow_divert_close_all(struct flow_divert_group
*group
)
2501 struct flow_divert_pcb
*fd_cb
;
2502 SLIST_HEAD(, flow_divert_pcb
) tmp_list
;
2504 SLIST_INIT(&tmp_list
);
2506 lck_rw_lock_exclusive(&group
->lck
);
2508 MBUFQ_DRAIN(&group
->send_queue
);
2510 RB_FOREACH(fd_cb
, fd_pcb_tree
, &group
->pcb_tree
) {
2512 SLIST_INSERT_HEAD(&tmp_list
, fd_cb
, tmp_list_entry
);
2515 lck_rw_done(&group
->lck
);
2517 while (!SLIST_EMPTY(&tmp_list
)) {
2518 fd_cb
= SLIST_FIRST(&tmp_list
);
2520 SLIST_REMOVE_HEAD(&tmp_list
, tmp_list_entry
);
2521 if (fd_cb
->so
!= NULL
) {
2522 socket_lock(fd_cb
->so
, 0);
2523 flow_divert_pcb_remove(fd_cb
);
2524 flow_divert_update_closed_state(fd_cb
, SHUT_RDWR
, TRUE
);
2525 fd_cb
->so
->so_error
= ECONNABORTED
;
2526 flow_divert_disconnect_socket(fd_cb
->so
);
2527 socket_unlock(fd_cb
->so
, 0);
2535 flow_divert_detach(struct socket
*so
)
2537 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
2539 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
2541 so
->so_flags
&= ~SOF_FLOW_DIVERT
;
2542 so
->so_fd_pcb
= NULL
;
2544 FDLOG(LOG_INFO
, fd_cb
, "Detaching, ref count = %d", fd_cb
->ref_count
);
2546 if (fd_cb
->group
!= NULL
) {
2547 /* Last-ditch effort to send any buffered data */
2548 flow_divert_send_buffered_data(fd_cb
, TRUE
);
2550 flow_divert_update_closed_state(fd_cb
, SHUT_RDWR
, FALSE
);
2551 flow_divert_send_close_if_needed(fd_cb
);
2552 /* Remove from the group */
2553 flow_divert_pcb_remove(fd_cb
);
2556 socket_unlock(so
, 0);
2562 FDRELEASE(fd_cb
); /* Release the socket's reference */
2566 flow_divert_close(struct socket
*so
)
2568 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
2570 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
2572 FDLOG0(LOG_INFO
, fd_cb
, "Closing");
2574 if (SOCK_TYPE(so
) == SOCK_STREAM
) {
2575 soisdisconnecting(so
);
2576 sbflush(&so
->so_rcv
);
2579 flow_divert_send_buffered_data(fd_cb
, TRUE
);
2580 flow_divert_update_closed_state(fd_cb
, SHUT_RDWR
, FALSE
);
2581 flow_divert_send_close_if_needed(fd_cb
);
2583 /* Remove from the group */
2584 flow_divert_pcb_remove(fd_cb
);
2590 flow_divert_disconnectx(struct socket
*so
, sae_associd_t aid
,
2591 sae_connid_t cid __unused
)
2593 if (aid
!= SAE_ASSOCID_ANY
&& aid
!= SAE_ASSOCID_ALL
) {
2597 return flow_divert_close(so
);
2601 flow_divert_shutdown(struct socket
*so
)
2603 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
2605 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
2607 FDLOG0(LOG_INFO
, fd_cb
, "Can't send more");
2611 flow_divert_update_closed_state(fd_cb
, SHUT_WR
, FALSE
);
2612 flow_divert_send_close_if_needed(fd_cb
);
2618 flow_divert_rcvd(struct socket
*so
, int flags __unused
)
2620 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
2621 uint32_t latest_sb_size
;
2622 uint32_t read_count
;
2624 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
2626 latest_sb_size
= fd_cb
->so
->so_rcv
.sb_cc
;
2628 if (fd_cb
->sb_size
< latest_sb_size
) {
2629 panic("flow divert rcvd event handler (%u): saved rcv buffer size (%u) is less than latest rcv buffer size (%u)",
2630 fd_cb
->hash
, fd_cb
->sb_size
, latest_sb_size
);
2633 read_count
= fd_cb
->sb_size
- latest_sb_size
;
2635 FDLOG(LOG_DEBUG
, fd_cb
, "app read %u bytes", read_count
);
2637 if (read_count
> 0 && flow_divert_send_read_notification(fd_cb
, read_count
) == 0) {
2638 fd_cb
->bytes_read_by_app
+= read_count
;
2639 fd_cb
->sb_size
= latest_sb_size
;
2646 flow_divert_append_target_endpoint_tlv(mbuf_t connect_packet
, struct sockaddr
*toaddr
)
2651 if (!flow_divert_is_sockaddr_valid(toaddr
)) {
2652 FDLOG(LOG_ERR
, &nil_pcb
, "Invalid target address, family = %u, length = %u", toaddr
->sa_family
, toaddr
->sa_len
);
2657 error
= flow_divert_packet_append_tlv(connect_packet
, FLOW_DIVERT_TLV_TARGET_ADDRESS
, toaddr
->sa_len
, toaddr
);
2662 if (toaddr
->sa_family
== AF_INET
) {
2663 port
= ntohs((satosin(toaddr
))->sin_port
);
2667 port
= ntohs((satosin6(toaddr
))->sin6_port
);
2671 error
= flow_divert_packet_append_tlv(connect_packet
, FLOW_DIVERT_TLV_TARGET_PORT
, sizeof(port
), &port
);
2681 flow_divert_get_buffered_target_address(mbuf_t buffer
)
2683 if (buffer
!= NULL
&& buffer
->m_type
== MT_SONAME
) {
2684 struct sockaddr
*toaddr
= mtod(buffer
, struct sockaddr
*);
2685 if (toaddr
!= NULL
&& flow_divert_is_sockaddr_valid(toaddr
)) {
2693 flow_divert_is_sockaddr_valid(struct sockaddr
*addr
)
2695 switch (addr
->sa_family
) {
2697 if (addr
->sa_len
< sizeof(struct sockaddr_in
)) {
2703 if (addr
->sa_len
< sizeof(struct sockaddr_in6
)) {
2715 flow_divert_inp_to_sockaddr(const struct inpcb
*inp
, struct sockaddr
**local_socket
)
2718 union sockaddr_in_4_6 sin46
;
2720 bzero(&sin46
, sizeof(sin46
));
2721 if (inp
->inp_vflag
& INP_IPV4
) {
2722 struct sockaddr_in
*sin
= &sin46
.sin
;
2724 sin
->sin_family
= AF_INET
;
2725 sin
->sin_len
= sizeof(*sin
);
2726 sin
->sin_port
= inp
->inp_lport
;
2727 sin
->sin_addr
= inp
->inp_laddr
;
2728 } else if (inp
->inp_vflag
& INP_IPV6
) {
2729 struct sockaddr_in6
*sin6
= &sin46
.sin6
;
2731 sin6
->sin6_len
= sizeof(*sin6
);
2732 sin6
->sin6_family
= AF_INET6
;
2733 sin6
->sin6_port
= inp
->inp_lport
;
2734 sin6
->sin6_addr
= inp
->in6p_laddr
;
2736 *local_socket
= dup_sockaddr((struct sockaddr
*)&sin46
, 1);
2737 if (*local_socket
== NULL
) {
2744 flow_divert_has_pcb_local_address(const struct inpcb
*inp
)
2746 return inp
->inp_lport
!= 0
2747 && (inp
->inp_laddr
.s_addr
!= INADDR_ANY
|| !IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
));
2751 flow_divert_dup_addr(sa_family_t family
, struct sockaddr
*addr
,
2752 struct sockaddr
**dup
)
2755 struct sockaddr
*result
;
2756 struct sockaddr_storage ss
;
2761 memset(&ss
, 0, sizeof(ss
));
2762 ss
.ss_family
= family
;
2763 if (ss
.ss_family
== AF_INET
) {
2764 ss
.ss_len
= sizeof(struct sockaddr_in
);
2767 else if (ss
.ss_family
== AF_INET6
) {
2768 ss
.ss_len
= sizeof(struct sockaddr_in6
);
2774 result
= (struct sockaddr
*)&ss
;
2778 *dup
= dup_sockaddr(result
, 1);
2788 flow_divert_disconnect_socket(struct socket
*so
)
2790 soisdisconnected(so
);
2791 if (SOCK_TYPE(so
) == SOCK_DGRAM
) {
2792 struct inpcb
*inp
= NULL
;
2794 inp
= sotoinpcb(so
);
2797 if (SOCK_CHECK_DOM(so
, PF_INET6
)) {
2807 flow_divert_getpeername(struct socket
*so
, struct sockaddr
**sa
)
2809 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
2811 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
2813 return flow_divert_dup_addr(so
->so_proto
->pr_domain
->dom_family
,
2814 fd_cb
->remote_address
,
2819 flow_divert_getsockaddr(struct socket
*so
, struct sockaddr
**sa
)
2821 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
2823 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
2825 return flow_divert_dup_addr(so
->so_proto
->pr_domain
->dom_family
,
2826 fd_cb
->local_address
,
2831 flow_divert_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
2833 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
2835 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
2837 if (sopt
->sopt_name
== SO_TRAFFIC_CLASS
) {
2838 if (sopt
->sopt_dir
== SOPT_SET
&& fd_cb
->flags
& FLOW_DIVERT_CONNECT_STARTED
) {
2839 flow_divert_send_traffic_class_update(fd_cb
, so
->so_traffic_class
);
2843 if (SOCK_DOM(so
) == PF_INET
) {
2844 return g_tcp_protosw
->pr_ctloutput(so
, sopt
);
2847 else if (SOCK_DOM(so
) == PF_INET6
) {
2848 return g_tcp6_protosw
->pr_ctloutput(so
, sopt
);
2855 flow_divert_connect_out(struct socket
*so
, struct sockaddr
*to
, proc_t p
)
2857 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
2859 struct inpcb
*inp
= sotoinpcb(so
);
2860 struct sockaddr_in
*sinp
;
2861 mbuf_t connect_packet
= NULL
;
2864 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
2866 if (fd_cb
->group
== NULL
) {
2867 error
= ENETUNREACH
;
2874 } else if (inp
->inp_state
== INPCB_STATE_DEAD
) {
2876 error
= so
->so_error
;
2884 if ((fd_cb
->flags
& FLOW_DIVERT_CONNECT_STARTED
) && !(fd_cb
->flags
& FLOW_DIVERT_TRANSFERRED
)) {
2889 if (fd_cb
->flags
& FLOW_DIVERT_TRANSFERRED
) {
2890 FDLOG0(LOG_INFO
, fd_cb
, "fully transferred");
2891 fd_cb
->flags
&= ~FLOW_DIVERT_TRANSFERRED
;
2892 if (fd_cb
->remote_address
!= NULL
) {
2893 soisconnected(fd_cb
->so
);
2898 FDLOG0(LOG_INFO
, fd_cb
, "Connecting");
2900 if (fd_cb
->connect_packet
== NULL
) {
2902 FDLOG0(LOG_ERR
, fd_cb
, "No destination address available when creating connect packet");
2907 sinp
= (struct sockaddr_in
*)(void *)to
;
2908 if (sinp
->sin_family
== AF_INET
&& IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
2909 error
= EAFNOSUPPORT
;
2913 error
= flow_divert_create_connect_packet(fd_cb
, to
, so
, p
, &connect_packet
);
2918 if (so
->so_flags1
& SOF1_PRECONNECT_DATA
) {
2919 FDLOG0(LOG_INFO
, fd_cb
, "Delaying sending the connect packet until send or receive");
2923 FDLOG0(LOG_INFO
, fd_cb
, "Sending saved connect packet");
2924 connect_packet
= fd_cb
->connect_packet
;
2925 fd_cb
->connect_packet
= NULL
;
2929 error
= flow_divert_send_packet(fd_cb
, connect_packet
, TRUE
);
2934 fd_cb
->flags
|= FLOW_DIVERT_CONNECT_STARTED
;
2936 fd_cb
->connect_packet
= connect_packet
;
2937 connect_packet
= NULL
;
2943 if (error
&& connect_packet
!= NULL
) {
2944 mbuf_freem(connect_packet
);
2950 flow_divert_connectx_out_common(struct socket
*so
, struct sockaddr
*dst
,
2951 struct proc
*p
, sae_connid_t
*pcid
, struct uio
*auio
, user_ssize_t
*bytes_written
)
2953 struct inpcb
*inp
= sotoinpcb(so
);
2960 VERIFY(dst
!= NULL
);
2962 error
= flow_divert_connect_out(so
, dst
, p
);
2968 /* if there is data, send it */
2970 user_ssize_t datalen
= 0;
2972 socket_unlock(so
, 0);
2974 VERIFY(bytes_written
!= NULL
);
2976 datalen
= uio_resid(auio
);
2977 error
= so
->so_proto
->pr_usrreqs
->pru_sosend(so
, NULL
, (uio_t
)auio
, NULL
, NULL
, 0);
2980 if (error
== 0 || error
== EWOULDBLOCK
) {
2981 *bytes_written
= datalen
- uio_resid(auio
);
2985 * sosend returns EWOULDBLOCK if it's a non-blocking
2986 * socket or a timeout occured (this allows to return
2987 * the amount of queued data through sendit()).
2989 * However, connectx() returns EINPROGRESS in case of a
2990 * blocking socket. So we change the return value here.
2992 if (error
== EWOULDBLOCK
) {
2993 error
= EINPROGRESS
;
2997 if (error
== 0 && pcid
!= NULL
) {
2998 *pcid
= 1; /* there is only 1 connection for a TCP */
3005 flow_divert_connectx_out(struct socket
*so
, struct sockaddr
*src __unused
,
3006 struct sockaddr
*dst
, struct proc
*p
, uint32_t ifscope __unused
,
3007 sae_associd_t aid __unused
, sae_connid_t
*pcid
, uint32_t flags __unused
, void *arg __unused
,
3008 uint32_t arglen __unused
, struct uio
*uio
, user_ssize_t
*bytes_written
)
3010 return flow_divert_connectx_out_common(so
, dst
, p
, pcid
, uio
, bytes_written
);
3015 flow_divert_connectx6_out(struct socket
*so
, struct sockaddr
*src __unused
,
3016 struct sockaddr
*dst
, struct proc
*p
, uint32_t ifscope __unused
,
3017 sae_associd_t aid __unused
, sae_connid_t
*pcid
, uint32_t flags __unused
, void *arg __unused
,
3018 uint32_t arglen __unused
, struct uio
*uio
, user_ssize_t
*bytes_written
)
3020 return flow_divert_connectx_out_common(so
, dst
, p
, pcid
, uio
, bytes_written
);
3025 flow_divert_getconninfo(struct socket
*so
, sae_connid_t cid
, uint32_t *flags
,
3026 uint32_t *ifindex
, int32_t *soerror
, user_addr_t src
, socklen_t
*src_len
,
3027 user_addr_t dst
, socklen_t
*dst_len
, uint32_t *aux_type
,
3028 user_addr_t aux_data __unused
, uint32_t *aux_len
)
3031 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
3032 struct ifnet
*ifp
= NULL
;
3033 struct inpcb
*inp
= sotoinpcb(so
);
3035 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
));
3037 if (so
->so_fd_pcb
== NULL
|| inp
== NULL
) {
3042 if (cid
!= SAE_CONNID_ANY
&& cid
!= SAE_CONNID_ALL
&& cid
!= 1) {
3047 ifp
= inp
->inp_last_outifp
;
3048 *ifindex
= ((ifp
!= NULL
) ? ifp
->if_index
: 0);
3049 *soerror
= so
->so_error
;
3052 if (so
->so_state
& SS_ISCONNECTED
) {
3053 *flags
|= (CIF_CONNECTED
| CIF_PREFERRED
);
3056 if (fd_cb
->local_address
== NULL
) {
3057 struct sockaddr_in sin
;
3058 bzero(&sin
, sizeof(sin
));
3059 sin
.sin_len
= sizeof(sin
);
3060 sin
.sin_family
= AF_INET
;
3061 *src_len
= sin
.sin_len
;
3062 if (src
!= USER_ADDR_NULL
) {
3063 error
= copyout(&sin
, src
, sin
.sin_len
);
3069 *src_len
= fd_cb
->local_address
->sa_len
;
3070 if (src
!= USER_ADDR_NULL
) {
3071 error
= copyout(fd_cb
->local_address
, src
, fd_cb
->local_address
->sa_len
);
3078 if (fd_cb
->remote_address
== NULL
) {
3079 struct sockaddr_in sin
;
3080 bzero(&sin
, sizeof(sin
));
3081 sin
.sin_len
= sizeof(sin
);
3082 sin
.sin_family
= AF_INET
;
3083 *dst_len
= sin
.sin_len
;
3084 if (dst
!= USER_ADDR_NULL
) {
3085 error
= copyout(&sin
, dst
, sin
.sin_len
);
3091 *dst_len
= fd_cb
->remote_address
->sa_len
;
3092 if (dst
!= USER_ADDR_NULL
) {
3093 error
= copyout(fd_cb
->remote_address
, dst
, fd_cb
->remote_address
->sa_len
);
3108 flow_divert_control(struct socket
*so
, u_long cmd
, caddr_t data
, struct ifnet
*ifp __unused
, struct proc
*p __unused
)
3113 case SIOCGCONNINFO32
: {
3114 struct so_cinforeq32 cifr
;
3115 bcopy(data
, &cifr
, sizeof(cifr
));
3116 error
= flow_divert_getconninfo(so
, cifr
.scir_cid
, &cifr
.scir_flags
,
3117 &cifr
.scir_ifindex
, &cifr
.scir_error
, cifr
.scir_src
,
3118 &cifr
.scir_src_len
, cifr
.scir_dst
, &cifr
.scir_dst_len
,
3119 &cifr
.scir_aux_type
, cifr
.scir_aux_data
,
3120 &cifr
.scir_aux_len
);
3122 bcopy(&cifr
, data
, sizeof(cifr
));
3127 case SIOCGCONNINFO64
: {
3128 struct so_cinforeq64 cifr
;
3129 bcopy(data
, &cifr
, sizeof(cifr
));
3130 error
= flow_divert_getconninfo(so
, cifr
.scir_cid
, &cifr
.scir_flags
,
3131 &cifr
.scir_ifindex
, &cifr
.scir_error
, cifr
.scir_src
,
3132 &cifr
.scir_src_len
, cifr
.scir_dst
, &cifr
.scir_dst_len
,
3133 &cifr
.scir_aux_type
, cifr
.scir_aux_data
,
3134 &cifr
.scir_aux_len
);
3136 bcopy(&cifr
, data
, sizeof(cifr
));
3149 flow_divert_in_control(struct socket
*so
, u_long cmd
, caddr_t data
, struct ifnet
*ifp
, struct proc
*p
)
3151 int error
= flow_divert_control(so
, cmd
, data
, ifp
, p
);
3153 if (error
== EOPNOTSUPP
) {
3154 error
= in_control(so
, cmd
, data
, ifp
, p
);
3161 flow_divert_in6_control(struct socket
*so
, u_long cmd
, caddr_t data
, struct ifnet
*ifp
, struct proc
*p
)
3163 int error
= flow_divert_control(so
, cmd
, data
, ifp
, p
);
3165 if (error
== EOPNOTSUPP
) {
3166 error
= in6_control(so
, cmd
, data
, ifp
, p
);
3173 flow_divert_data_out(struct socket
*so
, int flags
, mbuf_t data
, struct sockaddr
*to
, mbuf_t control
, struct proc
*p
)
3175 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
3179 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
3181 inp
= sotoinpcb(so
);
3182 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
) {
3187 if (control
&& mbuf_len(control
) > 0) {
3192 if (flags
& MSG_OOB
) {
3194 goto done
; /* We don't support OOB data */
3197 error
= flow_divert_check_no_cellular(fd_cb
) ||
3198 flow_divert_check_no_expensive(fd_cb
) ||
3199 flow_divert_check_no_constrained(fd_cb
);
3204 /* Implicit connect */
3205 if (!(fd_cb
->flags
& FLOW_DIVERT_CONNECT_STARTED
)) {
3206 FDLOG0(LOG_INFO
, fd_cb
, "implicit connect");
3210 * If the socket is subject to a UDP Content Filter and no remote address is passed in,
3211 * retrieve the CFIL saved remote address from the mbuf and use it.
3213 if (to
== NULL
&& so
->so_cfil_db
) {
3214 struct sockaddr
*cfil_faddr
= NULL
;
3215 struct m_tag
*cfil_tag
= cfil_udp_get_socket_state(data
, NULL
, NULL
, &cfil_faddr
);
3217 to
= (struct sockaddr
*)(void *)cfil_faddr
;
3219 FDLOG(LOG_INFO
, fd_cb
, "Using remote address from CFIL saved state: %p", to
);
3222 error
= flow_divert_connect_out(so
, to
, p
);
3227 if (so
->so_flags1
& SOF1_DATA_IDEMPOTENT
) {
3228 /* Open up the send window so that the data will get sent right away */
3229 fd_cb
->send_window
= mbuf_pkthdr_len(data
);
3233 FDLOG(LOG_DEBUG
, fd_cb
, "app wrote %lu bytes", mbuf_pkthdr_len(data
));
3235 fd_cb
->bytes_written_by_app
+= mbuf_pkthdr_len(data
);
3236 error
= flow_divert_send_app_data(fd_cb
, data
, to
);
3243 if (flags
& PRUS_EOF
) {
3244 flow_divert_shutdown(so
);
3258 flow_divert_preconnect(struct socket
*so
)
3260 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
3263 if (!(fd_cb
->flags
& FLOW_DIVERT_CONNECT_STARTED
) && fd_cb
->connect_packet
!= NULL
) {
3264 FDLOG0(LOG_INFO
, fd_cb
, "Pre-connect read: sending saved connect packet");
3265 mbuf_t connect_packet
= fd_cb
->connect_packet
;
3266 fd_cb
->connect_packet
= NULL
;
3268 error
= flow_divert_send_packet(fd_cb
, connect_packet
, TRUE
);
3270 mbuf_freem(connect_packet
);
3273 fd_cb
->flags
|= FLOW_DIVERT_CONNECT_STARTED
;
3276 soclearfastopen(so
);
3282 flow_divert_set_protosw(struct socket
*so
)
3284 so
->so_flags
|= SOF_FLOW_DIVERT
;
3285 if (SOCK_DOM(so
) == PF_INET
) {
3286 so
->so_proto
= &g_flow_divert_in_protosw
;
3290 so
->so_proto
= (struct protosw
*)&g_flow_divert_in6_protosw
;
3296 flow_divert_set_udp_protosw(struct socket
*so
)
3298 so
->so_flags
|= SOF_FLOW_DIVERT
;
3299 if (SOCK_DOM(so
) == PF_INET
) {
3300 so
->so_proto
= &g_flow_divert_in_udp_protosw
;
3304 so
->so_proto
= (struct protosw
*)&g_flow_divert_in6_udp_protosw
;
3310 flow_divert_attach(struct socket
*so
, uint32_t flow_id
, uint32_t ctl_unit
)
3313 struct flow_divert_pcb
*fd_cb
= NULL
;
3314 struct ifnet
*ifp
= NULL
;
3315 struct inpcb
*inp
= NULL
;
3316 struct socket
*old_so
;
3317 mbuf_t recv_data
= NULL
;
3319 socket_unlock(so
, 0);
3321 FDLOG(LOG_INFO
, &nil_pcb
, "Attaching socket to flow %u", flow_id
);
3323 /* Find the flow divert control block */
3324 lck_rw_lock_shared(&g_flow_divert_group_lck
);
3325 if (g_flow_divert_groups
!= NULL
&& g_active_group_count
> 0) {
3326 struct flow_divert_group
*group
= g_flow_divert_groups
[ctl_unit
];
3327 if (group
!= NULL
) {
3328 fd_cb
= flow_divert_pcb_lookup(flow_id
, group
);
3331 lck_rw_done(&g_flow_divert_group_lck
);
3333 if (fd_cb
== NULL
) {
3340 /* Dis-associate the flow divert control block from its current socket */
3343 inp
= sotoinpcb(old_so
);
3345 VERIFY(inp
!= NULL
);
3347 socket_lock(old_so
, 0);
3348 flow_divert_disconnect_socket(old_so
);
3349 old_so
->so_flags
&= ~SOF_FLOW_DIVERT
;
3350 old_so
->so_fd_pcb
= NULL
;
3351 if (SOCK_TYPE(old_so
) == SOCK_STREAM
) {
3352 old_so
->so_proto
= pffindproto(SOCK_DOM(old_so
), IPPROTO_TCP
, SOCK_STREAM
);
3353 } else if (SOCK_TYPE(old_so
) == SOCK_DGRAM
) {
3354 old_so
->so_proto
= pffindproto(SOCK_DOM(old_so
), IPPROTO_UDP
, SOCK_DGRAM
);
3357 /* Save the output interface */
3358 ifp
= inp
->inp_last_outifp
;
3359 if (old_so
->so_rcv
.sb_cc
> 0) {
3360 error
= mbuf_dup(old_so
->so_rcv
.sb_mb
, MBUF_DONTWAIT
, &recv_data
);
3361 sbflush(&old_so
->so_rcv
);
3363 socket_unlock(old_so
, 0);
3365 /* Associate the new socket with the flow divert control block */
3367 so
->so_fd_pcb
= fd_cb
;
3368 inp
= sotoinpcb(so
);
3369 inp
->inp_last_outifp
= ifp
;
3370 if (recv_data
!= NULL
) {
3371 if (sbappendstream(&so
->so_rcv
, recv_data
)) {
3375 flow_divert_set_protosw(so
);
3376 socket_unlock(so
, 0);
3379 fd_cb
->flags
|= FLOW_DIVERT_TRANSFERRED
;
3386 if (fd_cb
!= NULL
) {
3387 FDRELEASE(fd_cb
); /* Release the reference obtained via flow_divert_pcb_lookup */
3394 flow_divert_implicit_data_out(struct socket
*so
, int flags
, mbuf_t data
, struct sockaddr
*to
, mbuf_t control
, struct proc
*p
)
3396 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
3400 inp
= sotoinpcb(so
);
3405 if (fd_cb
== NULL
) {
3406 uint32_t fd_ctl_unit
= necp_socket_get_flow_divert_control_unit(inp
);
3407 if (fd_ctl_unit
> 0) {
3408 error
= flow_divert_pcb_init(so
, fd_ctl_unit
);
3409 fd_cb
= so
->so_fd_pcb
;
3410 if (error
!= 0 || fd_cb
== NULL
) {
3418 return flow_divert_data_out(so
, flags
, data
, to
, control
, p
);
3432 flow_divert_pcb_init(struct socket
*so
, uint32_t ctl_unit
)
3435 struct flow_divert_pcb
*fd_cb
;
3437 if (so
->so_flags
& SOF_FLOW_DIVERT
) {
3441 fd_cb
= flow_divert_pcb_create(so
);
3442 if (fd_cb
!= NULL
) {
3443 error
= flow_divert_pcb_insert(fd_cb
, ctl_unit
);
3445 FDLOG(LOG_ERR
, fd_cb
, "pcb insert failed: %d", error
);
3448 fd_cb
->control_group_unit
= ctl_unit
;
3449 so
->so_fd_pcb
= fd_cb
;
3451 if (SOCK_TYPE(so
) == SOCK_STREAM
) {
3452 flow_divert_set_protosw(so
);
3453 } else if (SOCK_TYPE(so
) == SOCK_DGRAM
) {
3454 flow_divert_set_udp_protosw(so
);
3457 FDLOG0(LOG_INFO
, fd_cb
, "Created");
3467 flow_divert_token_set(struct socket
*so
, struct sockopt
*sopt
)
3469 uint32_t ctl_unit
= 0;
3470 uint32_t key_unit
= 0;
3471 uint32_t flow_id
= 0;
3474 mbuf_t token
= NULL
;
3476 if (so
->so_flags
& SOF_FLOW_DIVERT
) {
3481 if (g_init_result
) {
3482 FDLOG(LOG_ERR
, &nil_pcb
, "flow_divert_init failed (%d), cannot use flow divert", g_init_result
);
3483 error
= ENOPROTOOPT
;
3487 if ((SOCK_TYPE(so
) != SOCK_STREAM
&& SOCK_TYPE(so
) != SOCK_DGRAM
) ||
3488 (SOCK_PROTO(so
) != IPPROTO_TCP
&& SOCK_PROTO(so
) != IPPROTO_UDP
) ||
3489 (SOCK_DOM(so
) != PF_INET
3491 && SOCK_DOM(so
) != PF_INET6
3497 if (SOCK_TYPE(so
) == SOCK_STREAM
&& SOCK_PROTO(so
) == IPPROTO_TCP
) {
3498 struct tcpcb
*tp
= sototcpcb(so
);
3499 if (tp
== NULL
|| tp
->t_state
!= TCPS_CLOSED
) {
3506 error
= soopt_getm(sopt
, &token
);
3512 error
= soopt_mcopyin(sopt
, token
);
3518 error
= flow_divert_packet_get_tlv(token
, 0, FLOW_DIVERT_TLV_KEY_UNIT
, sizeof(key_unit
), (void *)&key_unit
, NULL
);
3520 key_unit
= ntohl(key_unit
);
3521 if (key_unit
>= GROUP_COUNT_MAX
) {
3524 } else if (error
!= ENOENT
) {
3525 FDLOG(LOG_ERR
, &nil_pcb
, "Failed to get the key unit from the token: %d", error
);
3531 error
= flow_divert_packet_get_tlv(token
, 0, FLOW_DIVERT_TLV_CTL_UNIT
, sizeof(ctl_unit
), (void *)&ctl_unit
, NULL
);
3533 FDLOG(LOG_ERR
, &nil_pcb
, "Failed to get the control socket unit from the token: %d", error
);
3537 /* A valid kernel control unit is required */
3538 ctl_unit
= ntohl(ctl_unit
);
3539 if (ctl_unit
== 0 || ctl_unit
>= GROUP_COUNT_MAX
) {
3540 FDLOG(LOG_ERR
, &nil_pcb
, "Got an invalid control socket unit: %u", ctl_unit
);
3545 socket_unlock(so
, 0);
3546 hmac_error
= flow_divert_packet_verify_hmac(token
, (key_unit
!= 0 ? key_unit
: ctl_unit
));
3549 if (hmac_error
&& hmac_error
!= ENOENT
) {
3550 FDLOG(LOG_ERR
, &nil_pcb
, "HMAC verfication failed: %d", hmac_error
);
3555 error
= flow_divert_packet_get_tlv(token
, 0, FLOW_DIVERT_TLV_FLOW_ID
, sizeof(flow_id
), (void *)&flow_id
, NULL
);
3556 if (error
&& error
!= ENOENT
) {
3557 FDLOG(LOG_ERR
, &nil_pcb
, "Failed to get the flow ID from the token: %d", error
);
3562 error
= flow_divert_pcb_init(so
, ctl_unit
);
3564 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
3565 int log_level
= LOG_NOTICE
;
3567 error
= flow_divert_packet_get_tlv(token
, 0, FLOW_DIVERT_TLV_LOG_LEVEL
,
3568 sizeof(log_level
), &log_level
, NULL
);
3570 fd_cb
->log_level
= log_level
;
3574 fd_cb
->connect_token
= token
;
3578 error
= flow_divert_attach(so
, flow_id
, ctl_unit
);
3581 if (hmac_error
== 0) {
3582 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
3583 if (fd_cb
!= NULL
) {
3584 fd_cb
->flags
|= FLOW_DIVERT_HAS_HMAC
;
3589 if (token
!= NULL
) {
3597 flow_divert_token_get(struct socket
*so
, struct sockopt
*sopt
)
3601 uint8_t hmac
[SHA_DIGEST_LENGTH
];
3602 struct flow_divert_pcb
*fd_cb
= so
->so_fd_pcb
;
3603 mbuf_t token
= NULL
;
3604 struct flow_divert_group
*control_group
= NULL
;
3606 if (!(so
->so_flags
& SOF_FLOW_DIVERT
)) {
3611 VERIFY((so
->so_flags
& SOF_FLOW_DIVERT
) && so
->so_fd_pcb
!= NULL
);
3613 if (fd_cb
->group
== NULL
) {
3618 error
= mbuf_gethdr(MBUF_DONTWAIT
, MBUF_TYPE_HEADER
, &token
);
3620 FDLOG(LOG_ERR
, fd_cb
, "failed to allocate the header mbuf: %d", error
);
3624 ctl_unit
= htonl(fd_cb
->group
->ctl_unit
);
3626 error
= flow_divert_packet_append_tlv(token
, FLOW_DIVERT_TLV_CTL_UNIT
, sizeof(ctl_unit
), &ctl_unit
);
3631 error
= flow_divert_packet_append_tlv(token
, FLOW_DIVERT_TLV_FLOW_ID
, sizeof(fd_cb
->hash
), &fd_cb
->hash
);
3636 if (fd_cb
->app_data
!= NULL
) {
3637 error
= flow_divert_packet_append_tlv(token
, FLOW_DIVERT_TLV_APP_DATA
, fd_cb
->app_data_length
, fd_cb
->app_data
);
3643 socket_unlock(so
, 0);
3644 lck_rw_lock_shared(&g_flow_divert_group_lck
);
3646 if (g_flow_divert_groups
!= NULL
&& g_active_group_count
> 0 &&
3647 fd_cb
->control_group_unit
> 0 && fd_cb
->control_group_unit
< GROUP_COUNT_MAX
) {
3648 control_group
= g_flow_divert_groups
[fd_cb
->control_group_unit
];
3651 if (control_group
!= NULL
) {
3652 lck_rw_lock_shared(&control_group
->lck
);
3653 ctl_unit
= htonl(control_group
->ctl_unit
);
3654 error
= flow_divert_packet_append_tlv(token
, FLOW_DIVERT_TLV_KEY_UNIT
, sizeof(ctl_unit
), &ctl_unit
);
3656 error
= flow_divert_packet_compute_hmac(token
, control_group
, hmac
);
3658 lck_rw_done(&control_group
->lck
);
3660 error
= ENOPROTOOPT
;
3663 lck_rw_done(&g_flow_divert_group_lck
);
3670 error
= flow_divert_packet_append_tlv(token
, FLOW_DIVERT_TLV_HMAC
, sizeof(hmac
), hmac
);
3675 if (sopt
->sopt_val
== USER_ADDR_NULL
) {
3676 /* If the caller passed NULL to getsockopt, just set the size of the token and return */
3677 sopt
->sopt_valsize
= mbuf_pkthdr_len(token
);
3681 error
= soopt_mcopyout(sopt
, token
);
3683 token
= NULL
; /* For some reason, soopt_mcopyout() frees the mbuf if it fails */
3688 if (token
!= NULL
) {
3696 flow_divert_kctl_connect(kern_ctl_ref kctlref __unused
, struct sockaddr_ctl
*sac
, void **unitinfo
)
3698 struct flow_divert_group
*new_group
= NULL
;
3701 if (sac
->sc_unit
>= GROUP_COUNT_MAX
) {
3708 MALLOC_ZONE(new_group
, struct flow_divert_group
*, sizeof(*new_group
), M_FLOW_DIVERT_GROUP
, M_WAITOK
);
3709 if (new_group
== NULL
) {
3714 memset(new_group
, 0, sizeof(*new_group
));
3716 lck_rw_init(&new_group
->lck
, flow_divert_mtx_grp
, flow_divert_mtx_attr
);
3717 RB_INIT(&new_group
->pcb_tree
);
3718 new_group
->ctl_unit
= sac
->sc_unit
;
3719 MBUFQ_INIT(&new_group
->send_queue
);
3720 new_group
->signing_id_trie
.root
= NULL_TRIE_IDX
;
3722 lck_rw_lock_exclusive(&g_flow_divert_group_lck
);
3724 if (g_flow_divert_groups
== NULL
) {
3725 MALLOC(g_flow_divert_groups
,
3726 struct flow_divert_group
**,
3727 GROUP_COUNT_MAX
* sizeof(struct flow_divert_group
*),
3732 if (g_flow_divert_groups
== NULL
) {
3734 } else if (g_flow_divert_groups
[sac
->sc_unit
] != NULL
) {
3737 g_flow_divert_groups
[sac
->sc_unit
] = new_group
;
3738 g_active_group_count
++;
3741 lck_rw_done(&g_flow_divert_group_lck
);
3743 *unitinfo
= new_group
;
3746 if (error
!= 0 && new_group
!= NULL
) {
3747 FREE_ZONE(new_group
, sizeof(*new_group
), M_FLOW_DIVERT_GROUP
);
3753 flow_divert_kctl_disconnect(kern_ctl_ref kctlref __unused
, uint32_t unit
, void *unitinfo
)
3755 struct flow_divert_group
*group
= NULL
;
3758 if (unit
>= GROUP_COUNT_MAX
) {
3762 FDLOG(LOG_INFO
, &nil_pcb
, "disconnecting group %d", unit
);
3764 lck_rw_lock_exclusive(&g_flow_divert_group_lck
);
3766 if (g_flow_divert_groups
== NULL
|| g_active_group_count
== 0) {
3767 panic("flow divert group %u is disconnecting, but no groups are active (groups = %p, active count = %u", unit
,
3768 g_flow_divert_groups
, g_active_group_count
);
3771 group
= g_flow_divert_groups
[unit
];
3773 if (group
!= (struct flow_divert_group
*)unitinfo
) {
3774 panic("group with unit %d (%p) != unit info (%p)", unit
, group
, unitinfo
);
3777 g_flow_divert_groups
[unit
] = NULL
;
3778 g_active_group_count
--;
3780 if (g_active_group_count
== 0) {
3781 FREE(g_flow_divert_groups
, M_TEMP
);
3782 g_flow_divert_groups
= NULL
;
3785 lck_rw_done(&g_flow_divert_group_lck
);
3787 if (group
!= NULL
) {
3788 flow_divert_close_all(group
);
3790 lck_rw_lock_exclusive(&group
->lck
);
3792 if (group
->token_key
!= NULL
) {
3793 memset(group
->token_key
, 0, group
->token_key_size
);
3794 FREE(group
->token_key
, M_TEMP
);
3795 group
->token_key
= NULL
;
3796 group
->token_key_size
= 0;
3799 /* Re-set the current trie */
3800 if (group
->signing_id_trie
.memory
!= NULL
) {
3801 FREE(group
->signing_id_trie
.memory
, M_TEMP
);
3803 memset(&group
->signing_id_trie
, 0, sizeof(group
->signing_id_trie
));
3804 group
->signing_id_trie
.root
= NULL_TRIE_IDX
;
3806 lck_rw_done(&group
->lck
);
3808 FREE_ZONE(group
, sizeof(*group
), M_FLOW_DIVERT_GROUP
);
3817 flow_divert_kctl_send(kern_ctl_ref kctlref __unused
, uint32_t unit __unused
, void *unitinfo
, mbuf_t m
, int flags __unused
)
3819 return flow_divert_input(m
, (struct flow_divert_group
*)unitinfo
);
3823 flow_divert_kctl_rcvd(kern_ctl_ref kctlref __unused
, uint32_t unit __unused
, void *unitinfo
, int flags __unused
)
3825 struct flow_divert_group
*group
= (struct flow_divert_group
*)unitinfo
;
3827 if (!OSTestAndClear(GROUP_BIT_CTL_ENQUEUE_BLOCKED
, &group
->atomic_bits
)) {
3828 struct flow_divert_pcb
*fd_cb
;
3829 SLIST_HEAD(, flow_divert_pcb
) tmp_list
;
3831 lck_rw_lock_shared(&g_flow_divert_group_lck
);
3832 lck_rw_lock_exclusive(&group
->lck
);
3834 while (!MBUFQ_EMPTY(&group
->send_queue
)) {
3836 FDLOG0(LOG_DEBUG
, &nil_pcb
, "trying ctl_enqueuembuf again");
3837 next_packet
= MBUFQ_FIRST(&group
->send_queue
);
3838 int error
= ctl_enqueuembuf(g_flow_divert_kctl_ref
, group
->ctl_unit
, next_packet
, CTL_DATA_EOR
);
3840 FDLOG(LOG_DEBUG
, &nil_pcb
, "ctl_enqueuembuf returned an error: %d", error
);
3841 OSTestAndSet(GROUP_BIT_CTL_ENQUEUE_BLOCKED
, &group
->atomic_bits
);
3842 lck_rw_done(&group
->lck
);
3843 lck_rw_done(&g_flow_divert_group_lck
);
3846 MBUFQ_DEQUEUE(&group
->send_queue
, next_packet
);
3849 SLIST_INIT(&tmp_list
);
3851 RB_FOREACH(fd_cb
, fd_pcb_tree
, &group
->pcb_tree
) {
3853 SLIST_INSERT_HEAD(&tmp_list
, fd_cb
, tmp_list_entry
);
3856 lck_rw_done(&group
->lck
);
3858 SLIST_FOREACH(fd_cb
, &tmp_list
, tmp_list_entry
) {
3860 if (fd_cb
->so
!= NULL
) {
3861 socket_lock(fd_cb
->so
, 0);
3862 if (fd_cb
->group
!= NULL
) {
3863 flow_divert_send_buffered_data(fd_cb
, FALSE
);
3865 socket_unlock(fd_cb
->so
, 0);
3871 lck_rw_done(&g_flow_divert_group_lck
);
3876 flow_divert_kctl_init(void)
3878 struct kern_ctl_reg ctl_reg
;
3881 memset(&ctl_reg
, 0, sizeof(ctl_reg
));
3883 strlcpy(ctl_reg
.ctl_name
, FLOW_DIVERT_CONTROL_NAME
, sizeof(ctl_reg
.ctl_name
));
3884 ctl_reg
.ctl_name
[sizeof(ctl_reg
.ctl_name
) - 1] = '\0';
3885 ctl_reg
.ctl_flags
= CTL_FLAG_PRIVILEGED
| CTL_FLAG_REG_EXTENDED
;
3886 ctl_reg
.ctl_sendsize
= FD_CTL_SENDBUFF_SIZE
;
3887 ctl_reg
.ctl_recvsize
= FD_CTL_RCVBUFF_SIZE
;
3889 ctl_reg
.ctl_connect
= flow_divert_kctl_connect
;
3890 ctl_reg
.ctl_disconnect
= flow_divert_kctl_disconnect
;
3891 ctl_reg
.ctl_send
= flow_divert_kctl_send
;
3892 ctl_reg
.ctl_rcvd
= flow_divert_kctl_rcvd
;
3894 result
= ctl_register(&ctl_reg
, &g_flow_divert_kctl_ref
);
3897 FDLOG(LOG_ERR
, &nil_pcb
, "flow_divert_kctl_init - ctl_register failed: %d\n", result
);
3905 flow_divert_init(void)
3907 memset(&nil_pcb
, 0, sizeof(nil_pcb
));
3908 nil_pcb
.log_level
= LOG_NOTICE
;
3910 g_tcp_protosw
= pffindproto(AF_INET
, IPPROTO_TCP
, SOCK_STREAM
);
3912 VERIFY(g_tcp_protosw
!= NULL
);
3914 memcpy(&g_flow_divert_in_protosw
, g_tcp_protosw
, sizeof(g_flow_divert_in_protosw
));
3915 memcpy(&g_flow_divert_in_usrreqs
, g_tcp_protosw
->pr_usrreqs
, sizeof(g_flow_divert_in_usrreqs
));
3917 g_flow_divert_in_usrreqs
.pru_connect
= flow_divert_connect_out
;
3918 g_flow_divert_in_usrreqs
.pru_connectx
= flow_divert_connectx_out
;
3919 g_flow_divert_in_usrreqs
.pru_control
= flow_divert_in_control
;
3920 g_flow_divert_in_usrreqs
.pru_disconnect
= flow_divert_close
;
3921 g_flow_divert_in_usrreqs
.pru_disconnectx
= flow_divert_disconnectx
;
3922 g_flow_divert_in_usrreqs
.pru_peeraddr
= flow_divert_getpeername
;
3923 g_flow_divert_in_usrreqs
.pru_rcvd
= flow_divert_rcvd
;
3924 g_flow_divert_in_usrreqs
.pru_send
= flow_divert_data_out
;
3925 g_flow_divert_in_usrreqs
.pru_shutdown
= flow_divert_shutdown
;
3926 g_flow_divert_in_usrreqs
.pru_sockaddr
= flow_divert_getsockaddr
;
3927 g_flow_divert_in_usrreqs
.pru_preconnect
= flow_divert_preconnect
;
3929 g_flow_divert_in_protosw
.pr_usrreqs
= &g_flow_divert_in_usrreqs
;
3930 g_flow_divert_in_protosw
.pr_ctloutput
= flow_divert_ctloutput
;
3933 * Socket filters shouldn't attach/detach to/from this protosw
3934 * since pr_protosw is to be used instead, which points to the
3935 * real protocol; if they do, it is a bug and we should panic.
3937 g_flow_divert_in_protosw
.pr_filter_head
.tqh_first
=
3938 (struct socket_filter
*)(uintptr_t)0xdeadbeefdeadbeef;
3939 g_flow_divert_in_protosw
.pr_filter_head
.tqh_last
=
3940 (struct socket_filter
**)(uintptr_t)0xdeadbeefdeadbeef;
3943 g_udp_protosw
= pffindproto(AF_INET
, IPPROTO_UDP
, SOCK_DGRAM
);
3944 VERIFY(g_udp_protosw
!= NULL
);
3946 memcpy(&g_flow_divert_in_udp_protosw
, g_udp_protosw
, sizeof(g_flow_divert_in_udp_protosw
));
3947 memcpy(&g_flow_divert_in_udp_usrreqs
, g_udp_protosw
->pr_usrreqs
, sizeof(g_flow_divert_in_udp_usrreqs
));
3949 g_flow_divert_in_udp_usrreqs
.pru_connect
= flow_divert_connect_out
;
3950 g_flow_divert_in_udp_usrreqs
.pru_connectx
= flow_divert_connectx_out
;
3951 g_flow_divert_in_udp_usrreqs
.pru_control
= flow_divert_in_control
;
3952 g_flow_divert_in_udp_usrreqs
.pru_disconnect
= flow_divert_close
;
3953 g_flow_divert_in_udp_usrreqs
.pru_disconnectx
= flow_divert_disconnectx
;
3954 g_flow_divert_in_udp_usrreqs
.pru_peeraddr
= flow_divert_getpeername
;
3955 g_flow_divert_in_udp_usrreqs
.pru_rcvd
= flow_divert_rcvd
;
3956 g_flow_divert_in_udp_usrreqs
.pru_send
= flow_divert_data_out
;
3957 g_flow_divert_in_udp_usrreqs
.pru_shutdown
= flow_divert_shutdown
;
3958 g_flow_divert_in_udp_usrreqs
.pru_sockaddr
= flow_divert_getsockaddr
;
3959 g_flow_divert_in_udp_usrreqs
.pru_sosend_list
= pru_sosend_list_notsupp
;
3960 g_flow_divert_in_udp_usrreqs
.pru_soreceive_list
= pru_soreceive_list_notsupp
;
3961 g_flow_divert_in_udp_usrreqs
.pru_preconnect
= flow_divert_preconnect
;
3963 g_flow_divert_in_udp_protosw
.pr_usrreqs
= &g_flow_divert_in_usrreqs
;
3964 g_flow_divert_in_udp_protosw
.pr_ctloutput
= flow_divert_ctloutput
;
3967 * Socket filters shouldn't attach/detach to/from this protosw
3968 * since pr_protosw is to be used instead, which points to the
3969 * real protocol; if they do, it is a bug and we should panic.
3971 g_flow_divert_in_udp_protosw
.pr_filter_head
.tqh_first
=
3972 (struct socket_filter
*)(uintptr_t)0xdeadbeefdeadbeef;
3973 g_flow_divert_in_udp_protosw
.pr_filter_head
.tqh_last
=
3974 (struct socket_filter
**)(uintptr_t)0xdeadbeefdeadbeef;
3977 g_tcp6_protosw
= (struct ip6protosw
*)pffindproto(AF_INET6
, IPPROTO_TCP
, SOCK_STREAM
);
3979 VERIFY(g_tcp6_protosw
!= NULL
);
3981 memcpy(&g_flow_divert_in6_protosw
, g_tcp6_protosw
, sizeof(g_flow_divert_in6_protosw
));
3982 memcpy(&g_flow_divert_in6_usrreqs
, g_tcp6_protosw
->pr_usrreqs
, sizeof(g_flow_divert_in6_usrreqs
));
3984 g_flow_divert_in6_usrreqs
.pru_connect
= flow_divert_connect_out
;
3985 g_flow_divert_in6_usrreqs
.pru_connectx
= flow_divert_connectx6_out
;
3986 g_flow_divert_in6_usrreqs
.pru_control
= flow_divert_in6_control
;
3987 g_flow_divert_in6_usrreqs
.pru_disconnect
= flow_divert_close
;
3988 g_flow_divert_in6_usrreqs
.pru_disconnectx
= flow_divert_disconnectx
;
3989 g_flow_divert_in6_usrreqs
.pru_peeraddr
= flow_divert_getpeername
;
3990 g_flow_divert_in6_usrreqs
.pru_rcvd
= flow_divert_rcvd
;
3991 g_flow_divert_in6_usrreqs
.pru_send
= flow_divert_data_out
;
3992 g_flow_divert_in6_usrreqs
.pru_shutdown
= flow_divert_shutdown
;
3993 g_flow_divert_in6_usrreqs
.pru_sockaddr
= flow_divert_getsockaddr
;
3994 g_flow_divert_in6_usrreqs
.pru_preconnect
= flow_divert_preconnect
;
3996 g_flow_divert_in6_protosw
.pr_usrreqs
= &g_flow_divert_in6_usrreqs
;
3997 g_flow_divert_in6_protosw
.pr_ctloutput
= flow_divert_ctloutput
;
3999 * Socket filters shouldn't attach/detach to/from this protosw
4000 * since pr_protosw is to be used instead, which points to the
4001 * real protocol; if they do, it is a bug and we should panic.
4003 g_flow_divert_in6_protosw
.pr_filter_head
.tqh_first
=
4004 (struct socket_filter
*)(uintptr_t)0xdeadbeefdeadbeef;
4005 g_flow_divert_in6_protosw
.pr_filter_head
.tqh_last
=
4006 (struct socket_filter
**)(uintptr_t)0xdeadbeefdeadbeef;
4009 g_udp6_protosw
= (struct ip6protosw
*)pffindproto(AF_INET6
, IPPROTO_UDP
, SOCK_DGRAM
);
4011 VERIFY(g_udp6_protosw
!= NULL
);
4013 memcpy(&g_flow_divert_in6_udp_protosw
, g_udp6_protosw
, sizeof(g_flow_divert_in6_udp_protosw
));
4014 memcpy(&g_flow_divert_in6_udp_usrreqs
, g_udp6_protosw
->pr_usrreqs
, sizeof(g_flow_divert_in6_udp_usrreqs
));
4016 g_flow_divert_in6_udp_usrreqs
.pru_connect
= flow_divert_connect_out
;
4017 g_flow_divert_in6_udp_usrreqs
.pru_connectx
= flow_divert_connectx6_out
;
4018 g_flow_divert_in6_udp_usrreqs
.pru_control
= flow_divert_in6_control
;
4019 g_flow_divert_in6_udp_usrreqs
.pru_disconnect
= flow_divert_close
;
4020 g_flow_divert_in6_udp_usrreqs
.pru_disconnectx
= flow_divert_disconnectx
;
4021 g_flow_divert_in6_udp_usrreqs
.pru_peeraddr
= flow_divert_getpeername
;
4022 g_flow_divert_in6_udp_usrreqs
.pru_rcvd
= flow_divert_rcvd
;
4023 g_flow_divert_in6_udp_usrreqs
.pru_send
= flow_divert_data_out
;
4024 g_flow_divert_in6_udp_usrreqs
.pru_shutdown
= flow_divert_shutdown
;
4025 g_flow_divert_in6_udp_usrreqs
.pru_sockaddr
= flow_divert_getsockaddr
;
4026 g_flow_divert_in6_udp_usrreqs
.pru_sosend_list
= pru_sosend_list_notsupp
;
4027 g_flow_divert_in6_udp_usrreqs
.pru_soreceive_list
= pru_soreceive_list_notsupp
;
4028 g_flow_divert_in6_udp_usrreqs
.pru_preconnect
= flow_divert_preconnect
;
4030 g_flow_divert_in6_udp_protosw
.pr_usrreqs
= &g_flow_divert_in6_udp_usrreqs
;
4031 g_flow_divert_in6_udp_protosw
.pr_ctloutput
= flow_divert_ctloutput
;
4033 * Socket filters shouldn't attach/detach to/from this protosw
4034 * since pr_protosw is to be used instead, which points to the
4035 * real protocol; if they do, it is a bug and we should panic.
4037 g_flow_divert_in6_udp_protosw
.pr_filter_head
.tqh_first
=
4038 (struct socket_filter
*)(uintptr_t)0xdeadbeefdeadbeef;
4039 g_flow_divert_in6_udp_protosw
.pr_filter_head
.tqh_last
=
4040 (struct socket_filter
**)(uintptr_t)0xdeadbeefdeadbeef;
4043 flow_divert_grp_attr
= lck_grp_attr_alloc_init();
4044 if (flow_divert_grp_attr
== NULL
) {
4045 FDLOG0(LOG_ERR
, &nil_pcb
, "lck_grp_attr_alloc_init failed");
4046 g_init_result
= ENOMEM
;
4050 flow_divert_mtx_grp
= lck_grp_alloc_init(FLOW_DIVERT_CONTROL_NAME
, flow_divert_grp_attr
);
4051 if (flow_divert_mtx_grp
== NULL
) {
4052 FDLOG0(LOG_ERR
, &nil_pcb
, "lck_grp_alloc_init failed");
4053 g_init_result
= ENOMEM
;
4057 flow_divert_mtx_attr
= lck_attr_alloc_init();
4058 if (flow_divert_mtx_attr
== NULL
) {
4059 FDLOG0(LOG_ERR
, &nil_pcb
, "lck_attr_alloc_init failed");
4060 g_init_result
= ENOMEM
;
4064 g_init_result
= flow_divert_kctl_init();
4065 if (g_init_result
) {
4069 lck_rw_init(&g_flow_divert_group_lck
, flow_divert_mtx_grp
, flow_divert_mtx_attr
);
4072 if (g_init_result
!= 0) {
4073 if (flow_divert_mtx_attr
!= NULL
) {
4074 lck_attr_free(flow_divert_mtx_attr
);
4075 flow_divert_mtx_attr
= NULL
;
4077 if (flow_divert_mtx_grp
!= NULL
) {
4078 lck_grp_free(flow_divert_mtx_grp
);
4079 flow_divert_mtx_grp
= NULL
;
4081 if (flow_divert_grp_attr
!= NULL
) {
4082 lck_grp_attr_free(flow_divert_grp_attr
);
4083 flow_divert_grp_attr
= NULL
;
4086 if (g_flow_divert_kctl_ref
!= NULL
) {
4087 ctl_deregister(g_flow_divert_kctl_ref
);
4088 g_flow_divert_kctl_ref
= NULL
;