/*
- * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/callout.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/malloc.h>
#include <net/route.h>
#include <net/if.h>
#include <net/content_filter.h>
+#include <net/ntstat.h>
+#include <net/multi_layer_pkt_log.h>
-#define tcp_minmssoverload fring
-#define _IP_VHL
+#define tcp_minmssoverload fring
+#define _IP_VHL
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
-#if INET6
#include <netinet/ip6.h>
#include <netinet/icmp6.h>
-#endif
#include <netinet/in_pcb.h>
-#if INET6
#include <netinet6/in6_pcb.h>
-#endif
#include <netinet/in_var.h>
#include <netinet/ip_var.h>
#include <netinet/icmp_var.h>
-#if INET6
#include <netinet6/ip6_var.h>
-#endif
#include <netinet/mptcp_var.h>
#include <netinet/tcp.h>
#include <netinet/tcp_fsm.h>
#include <netinet/tcp_cache.h>
#include <kern/thread_call.h>
-#if INET6
#include <netinet6/tcp6_var.h>
-#endif
#include <netinet/tcpip.h>
#if TCPDEBUG
#include <netinet/tcp_debug.h>
#endif
+#include <netinet/tcp_log.h>
+
#include <netinet6/ip6protosw.h>
#if IPSEC
#include <netinet6/ipsec.h>
-#if INET6
#include <netinet6/ipsec6.h>
-#endif
#endif /* IPSEC */
#if NECP
#undef tcp_minmssoverload
-#if CONFIG_MACF_NET
-#include <security/mac_framework.h>
-#endif /* MAC_NET */
-
#include <corecrypto/ccaes.h>
#include <libkern/crypto/aes.h>
#include <libkern/crypto/md5.h>
#include <sys/kdebug.h>
#include <mach/sdt.h>
+#include <atm/atm_internal.h>
+#include <pexpert/pexpert.h>
-#include <netinet/lro_ext.h>
-
-#define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
+#define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
static tcp_cc tcp_ccgen;
-extern int tcp_lq_overflow;
extern struct tcptimerlist tcp_timer_list;
extern struct tcptailq tcp_tw_tailq;
SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED,
- int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size");
+ int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size");
-#if INET6
SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt,
- CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS,
- "Default TCP Maximum Segment Size for IPv6");
-#endif
+ CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS,
+ "Default TCP Maximum Segment Size for IPv6");
int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int,
struct sysctl_req *);
SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, CTLTYPE_STRING | CTLFLAG_WR,
- 0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key");
+ 0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key");
/* Current count of half-open TFO connections */
-int tcp_tfo_halfcnt = 0;
+int tcp_tfo_halfcnt = 0;
/* Maximum of half-open TFO connection backlog */
SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen_backlog,
- CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10,
- "Backlog queue for half-open TFO connections");
+ CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10,
+ "Backlog queue for half-open TFO connections");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED,
- int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER,
- "Enable TCP Fastopen (RFC 7413)");
+ int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER,
+ "Enable TCP Fastopen (RFC 7413)");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, now_init, CTLFLAG_RD | CTLFLAG_LOCKED,
- uint32_t, tcp_now_init, 0, "Initial tcp now value");
+ uint32_t, tcp_now_init, 0, "Initial tcp now value");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED,
- uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds");
+ uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds");
/*
* Minimum MSS we accept and use. This prevents DoS attacks where
* checking. This setting prevents us from sending too small packets.
*/
SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED,
- int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size");
-int tcp_do_rfc1323 = 1;
-#if (DEVELOPMENT || DEBUG)
-SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323,
- CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1323, 0,
- "Enable rfc1323 (high performance TCP) extensions");
-#endif /* (DEVELOPMENT || DEBUG) */
-
-// Not used
-static int tcp_do_rfc1644 = 0;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644,
- CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1644, 0,
- "Enable rfc1644 (TTCP) extensions");
-
-SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_tcpdrain, CTLFLAG_RW | CTLFLAG_LOCKED,
- static int, do_tcpdrain, 0,
- "Enable tcp_drain routine for extra help when low on mbufs");
+ int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size");
SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
- &tcbinfo.ipi_count, 0, "Number of active PCBs");
+ &tcbinfo.ipi_count, 0, "Number of active PCBs");
SYSCTL_INT(_net_inet_tcp, OID_AUTO, tw_pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
- &tcbinfo.ipi_twcount, 0, "Number of pcbs in time-wait state");
+ &tcbinfo.ipi_twcount, 0, "Number of pcbs in time-wait state");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED,
- static int, icmp_may_rst, 1,
- "Certain ICMP unreachable messages may abort connections in SYN_SENT");
+ static int, icmp_may_rst, 1,
+ "Certain ICMP unreachable messages may abort connections in SYN_SENT");
-static int tcp_strict_rfc1948 = 0;
-static int tcp_isn_reseed_interval = 0;
+static int tcp_strict_rfc1948 = 0;
+static int tcp_isn_reseed_interval = 0;
#if (DEVELOPMENT || DEBUG)
SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW | CTLFLAG_LOCKED,
- &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
+ &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval,
- CTLFLAG_RW | CTLFLAG_LOCKED,
- &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
#endif /* (DEVELOPMENT || DEBUG) */
SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED,
- int, tcp_TCPTV_MIN, 100, "min rtt value allowed");
+ int, tcp_TCPTV_MIN, 100, "min rtt value allowed");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, rexmt_slop, CTLFLAG_RW,
- int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout");
+ int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
- __private_extern__ int , tcp_use_randomport, 0,
- "Randomize TCP port numbers");
+ __private_extern__ int, tcp_use_randomport, 0,
+ "Randomize TCP port numbers");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
- __private_extern__ int, tcp_win_scale, 3, "Window scaling factor");
+ __private_extern__ int, tcp_win_scale, 3, "Window scaling factor");
-static void tcp_cleartaocache(void);
-static void tcp_notify(struct inpcb *, int);
+#if (DEVELOPMENT || DEBUG)
+SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
+ CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
+ "Initalize RTT from route cache");
+#else
+SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
+ CTLFLAG_RD | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
+ "Initalize RTT from route cache");
+#endif /* (DEVELOPMENT || DEBUG) */
-struct zone *sack_hole_zone;
-struct zone *tcp_reass_zone;
-struct zone *tcp_bwmeas_zone;
-struct zone *tcp_rxt_seg_zone;
+static int tso_debug = 0;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tso_debug, 0, "TSO verbosity");
-extern int slowlink_wsize; /* window correction for slow links */
+static void tcp_notify(struct inpcb *, int);
+
+struct zone *sack_hole_zone;
+struct zone *tcp_reass_zone;
+struct zone *tcp_bwmeas_zone;
+struct zone *tcp_rxt_seg_zone;
+
+extern int slowlink_wsize; /* window correction for slow links */
extern int path_mtu_discovery;
static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb);
-#define TCP_BWMEAS_BURST_MINSIZE 6
-#define TCP_BWMEAS_BURST_MAXSIZE 25
-
-static uint32_t bwmeas_elm_size;
+#define TCP_BWMEAS_BURST_MINSIZE 6
+#define TCP_BWMEAS_BURST_MAXSIZE 25
/*
* Target size of TCP PCB hash tables. Must be a power of two.
* variable net.inet.tcp.tcbhashsize
*/
#ifndef TCBHASHSIZE
-#define TCBHASHSIZE CONFIG_TCBHASHSIZE
+#define TCBHASHSIZE CONFIG_TCBHASHSIZE
#endif
-__private_extern__ int tcp_tcbhashsize = TCBHASHSIZE;
+__private_extern__ int tcp_tcbhashsize = TCBHASHSIZE;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED,
- &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
+ &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
/*
* This is the actual shape of what we allocate using the zone
* we avoid changing most of the rest of the code (although it needs
* to be changed, eventually, for greater efficiency).
*/
-#define ALIGNMENT 32
-struct inp_tp {
- struct inpcb inp;
- struct tcpcb tcb __attribute__((aligned(ALIGNMENT)));
+#define ALIGNMENT 32
+struct inp_tp {
+ struct inpcb inp;
+ struct tcpcb tcb __attribute__((aligned(ALIGNMENT)));
};
#undef ALIGNMENT
int get_inpcb_str_size(void);
int get_tcp_str_size(void);
+os_log_t tcp_mpkl_log_object = NULL;
+
static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
static lck_attr_t *tcp_uptime_mtx_attr = NULL;
tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out, size_t blk_size)
{
u_char in[CCAES_BLOCK_SIZE];
-#if INET6
int isipv6 = inp->inp_vflag & INP_IPV6;
-#endif
VERIFY(blk_size == CCAES_BLOCK_SIZE);
bzero(&in[0], CCAES_BLOCK_SIZE);
bzero(&out[0], CCAES_BLOCK_SIZE);
-#if INET6
- if (isipv6)
+ if (isipv6) {
memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr));
- else
-#endif /* INET6 */
+ } else {
memcpy(in, &inp->inp_faddr, sizeof(struct in_addr));
+ }
aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx);
}
bzero(keystring, sizeof(keystring));
error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL);
- if (error)
+ if (error) {
goto exit;
+ }
for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) {
/*
aes_encrypt_key128((u_char *)key, &tfo_ctx);
exit:
- return (error);
+ return error;
}
int
get_inpcb_str_size(void)
{
- return (sizeof(struct inpcb));
+ return sizeof(struct inpcb);
}
int
get_tcp_str_size(void)
{
- return (sizeof(struct tcpcb));
+ return sizeof(struct tcpcb);
}
static int scale_to_powerof2(int size);
* 3. Same value as argument size if it is already a power of two.
*/
static int
-scale_to_powerof2(int size) {
+scale_to_powerof2(int size)
+{
/* Handle special case of size = 0 */
int ret = size ? size : 1;
* its highest set bit at which point
* it is rounded down power of two.
*/
- size = size & (size -1);
+ size = size & (size - 1);
}
/* Check for overflow when rounding up */
}
}
- return (ret);
+ return ret;
}
static void
static int tcp_initialized = 0;
vm_size_t str_size;
struct inpcbinfo *pcbinfo;
+ uint32_t logging_config;
- VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);
+ VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
- if (tcp_initialized)
+ if (tcp_initialized) {
return;
+ }
tcp_initialized = 1;
tcp_ccgen = 1;
- tcp_cleartaocache();
-
tcp_keepinit = TCPTV_KEEP_INIT;
tcp_keepidle = TCPTV_KEEP_IDLE;
tcp_keepintvl = TCPTV_KEEPINTVL;
/* expose initial uptime/now via systcl for utcp to keep time sync */
tcp_now_init = tcp_now;
- tcp_microuptime_init = tcp_uptime.tv_sec * 1000 + tcp_uptime.tv_usec;
+ tcp_microuptime_init =
+ (uint32_t)(tcp_uptime.tv_usec + (tcp_uptime.tv_sec * USEC_PER_SEC));
SYSCTL_SKMEM_UPDATE_FIELD(tcp.microuptime_init, tcp_microuptime_init);
SYSCTL_SKMEM_UPDATE_FIELD(tcp.now_init, tcp_now_init);
tcp_tcbhashsize = 16;
}
printf("WARNING: TCB hash size not a power of 2, "
- "scaled from %d to %d.\n",
- old_hash_size,
- tcp_tcbhashsize);
+ "scaled from %d to %d.\n",
+ old_hash_size,
+ tcp_tcbhashsize);
}
tcbinfo.ipi_hashbase = hashinit(tcp_tcbhashsize, M_PCB,
&tcbinfo.ipi_hashmask);
tcbinfo.ipi_porthashbase = hashinit(tcp_tcbhashsize, M_PCB,
- &tcbinfo.ipi_porthashmask);
- str_size = P2ROUNDUP(sizeof(struct inp_tp), sizeof(u_int64_t));
- tcbinfo.ipi_zone = zinit(str_size, 120000*str_size, 8192, "tcpcb");
- zone_change(tcbinfo.ipi_zone, Z_CALLERACCT, FALSE);
- zone_change(tcbinfo.ipi_zone, Z_EXPAND, TRUE);
+ &tcbinfo.ipi_porthashmask);
+ str_size = (vm_size_t)P2ROUNDUP(sizeof(struct inp_tp), sizeof(u_int64_t));
+ tcbinfo.ipi_zone = zone_create("tcpcb", str_size, ZC_NONE);
tcbinfo.ipi_gc = tcp_gc;
tcbinfo.ipi_timer = tcp_itimer;
in_pcbinfo_attach(&tcbinfo);
- str_size = P2ROUNDUP(sizeof(struct sackhole), sizeof(u_int64_t));
- sack_hole_zone = zinit(str_size, 120000*str_size, 8192,
- "sack_hole zone");
- zone_change(sack_hole_zone, Z_CALLERACCT, FALSE);
- zone_change(sack_hole_zone, Z_EXPAND, TRUE);
-
- str_size = P2ROUNDUP(sizeof(struct tseg_qent), sizeof(u_int64_t));
- tcp_reass_zone = zinit(str_size, (nmbclusters >> 4) * str_size,
- 0, "tcp_reass_zone");
- if (tcp_reass_zone == NULL) {
- panic("%s: failed allocating tcp_reass_zone", __func__);
- /* NOTREACHED */
- }
- zone_change(tcp_reass_zone, Z_CALLERACCT, FALSE);
- zone_change(tcp_reass_zone, Z_EXPAND, TRUE);
+ str_size = (vm_size_t)P2ROUNDUP(sizeof(struct sackhole), sizeof(u_int64_t));
+ sack_hole_zone = zone_create("sack_hole zone", str_size, ZC_NONE);
- bwmeas_elm_size = P2ROUNDUP(sizeof(struct bwmeas), sizeof(u_int64_t));
- tcp_bwmeas_zone = zinit(bwmeas_elm_size, (100 * bwmeas_elm_size), 0,
- "tcp_bwmeas_zone");
- if (tcp_bwmeas_zone == NULL) {
- panic("%s: failed allocating tcp_bwmeas_zone", __func__);
- /* NOTREACHED */
- }
- zone_change(tcp_bwmeas_zone, Z_CALLERACCT, FALSE);
- zone_change(tcp_bwmeas_zone, Z_EXPAND, TRUE);
-
- str_size = P2ROUNDUP(sizeof(struct tcp_ccstate), sizeof(u_int64_t));
- tcp_cc_zone = zinit(str_size, 20000 * str_size, 0, "tcp_cc_zone");
- zone_change(tcp_cc_zone, Z_CALLERACCT, FALSE);
- zone_change(tcp_cc_zone, Z_EXPAND, TRUE);
-
- str_size = P2ROUNDUP(sizeof(struct tcp_rxt_seg), sizeof(u_int64_t));
- tcp_rxt_seg_zone = zinit(str_size, 10000 * str_size, 0,
- "tcp_rxt_seg_zone");
- zone_change(tcp_rxt_seg_zone, Z_CALLERACCT, FALSE);
- zone_change(tcp_rxt_seg_zone, Z_EXPAND, TRUE);
-
-#if INET6
-#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
-#else /* INET6 */
-#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
-#endif /* INET6 */
+ str_size = (vm_size_t)P2ROUNDUP(sizeof(struct tseg_qent), sizeof(u_int64_t));
+ tcp_reass_zone = zone_create("tcp_reass_zone", str_size, ZC_NONE);
+
+ str_size = (vm_size_t)P2ROUNDUP(sizeof(struct bwmeas), sizeof(u_int64_t));
+ tcp_bwmeas_zone = zone_create("tcp_bwmeas_zone", str_size, ZC_ZFREE_CLEARMEM);
+
+ str_size = (vm_size_t)P2ROUNDUP(sizeof(struct tcp_ccstate), sizeof(u_int64_t));
+ tcp_cc_zone = zone_create("tcp_cc_zone", str_size, ZC_NONE);
+
+ str_size = (vm_size_t)P2ROUNDUP(sizeof(struct tcp_rxt_seg), sizeof(u_int64_t));
+ tcp_rxt_seg_zone = zone_create("tcp_rxt_seg_zone", str_size, ZC_NONE);
+
+#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
if (max_protohdr < TCP_MINPROTOHDR) {
_max_protohdr = TCP_MINPROTOHDR;
- _max_protohdr = max_protohdr; /* round it up */
+ _max_protohdr = (int)max_protohdr; /* round it up */
}
- if (max_linkhdr + max_protohdr > MCLBYTES)
+ if (max_linkhdr + max_protohdr > MCLBYTES) {
panic("tcp_init");
+ }
#undef TCP_MINPROTOHDR
/* Initialize time wait and timer lists */
if ((tcp_timer_list.mtx = lck_mtx_alloc_init(tcp_timer_list.mtx_grp,
tcp_timer_list.mtx_attr)) == NULL) {
panic("failed to allocate memory for tcp_timer_list.mtx\n");
- };
+ }
+ ;
tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL);
if (tcp_timer_list.call == NULL) {
panic("failed to allocate call entry 1 in tcp_init\n");
tcp_uptime_lock = lck_spin_alloc_init(tcp_uptime_mtx_grp,
tcp_uptime_mtx_attr);
- /* Initialize TCP LRO data structures */
- tcp_lro_init();
-
/* Initialize TCP Cache */
tcp_cache_init();
+ tcp_mpkl_log_object = MPKL_CREATE_LOGOBJECT("com.apple.xnu.tcp");
+ if (tcp_mpkl_log_object == NULL) {
+ panic("MPKL_CREATE_LOGOBJECT failed");
+ }
+
+ logging_config = atm_get_diagnostic_config();
+ if (logging_config & 0x80000000) {
+ tcp_log_privacy = 1;
+ }
+
+ PE_parse_boot_argn("tcp_log", &tcp_log_enable_flags, sizeof(tcp_log_enable_flags));
+
/*
- * If more than 60 MB of mbuf pool is available, increase the
+ * If more than 4GB of actual memory is available, increase the
* maximum allowed receive and send socket buffer size.
*/
- if (nmbclusters > 30720) {
- #if CONFIG_EMBEDDED
- tcp_autorcvbuf_max = 2 * 1024 * 1024;
- tcp_autosndbuf_max = 2 * 1024 * 1024;
- #else
- tcp_autorcvbuf_max = 1024 * 1024;
- tcp_autosndbuf_max = 1024 * 1024;
- #endif /* CONFIG_EMBEDDED */
+ if (mem_actual >= (1ULL << (GBSHIFT + 2))) {
+ tcp_autorcvbuf_max = 4 * 1024 * 1024;
+ tcp_autosndbuf_max = 4 * 1024 * 1024;
+
SYSCTL_SKMEM_UPDATE_FIELD(tcp.autorcvbufmax, tcp_autorcvbuf_max);
SYSCTL_SKMEM_UPDATE_FIELD(tcp.autosndbufmax, tcp_autosndbuf_max);
-
- /*
- * Receive buffer max for cellular interfaces supporting
- * Carrier Aggregation is higher
- */
- tcp_autorcvbuf_max_ca = 2 * 1024 * 1024;
}
}
struct inpcb *inp = tp->t_inpcb;
struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
-#if INET6
if ((inp->inp_vflag & INP_IPV6) != 0) {
struct ip6_hdr *ip6;
ip6 = (struct ip6_hdr *)ip_ptr;
ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
- (inp->inp_flow & IPV6_FLOWINFO_MASK);
+ (inp->inp_flow & IPV6_FLOWINFO_MASK);
ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
- (IPV6_VERSION & IPV6_VERSION_MASK);
+ (IPV6_VERSION & IPV6_VERSION_MASK);
ip6->ip6_plen = htons(sizeof(struct tcphdr));
ip6->ip6_nxt = IPPROTO_TCP;
ip6->ip6_hlim = 0;
ip6->ip6_src = inp->in6p_laddr;
ip6->ip6_dst = inp->in6p_faddr;
tcp_hdr->th_sum = in6_pseudo(&inp->in6p_laddr, &inp->in6p_faddr,
- htonl(sizeof (struct tcphdr) + IPPROTO_TCP));
- } else
-#endif
- {
+ htonl(sizeof(struct tcphdr) + IPPROTO_TCP));
+ } else {
struct ip *ip = (struct ip *) ip_ptr;
ip->ip_vhl = IP_VHL_BORING;
struct tcptemp *n;
m = m_get(M_DONTWAIT, MT_HEADER);
- if (m == NULL)
- return (0);
+ if (m == NULL) {
+ return NULL;
+ }
m->m_len = sizeof(struct tcptemp);
n = mtod(m, struct tcptemp *);
tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
- return (n);
+ return n;
}
/*
*/
void
tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
- tcp_seq ack, tcp_seq seq, int flags, struct tcp_respond_args *tra)
+ tcp_seq ack, tcp_seq seq, uint8_t flags, struct tcp_respond_args *tra)
{
- int tlen;
+ uint16_t tlen;
int win = 0;
struct route *ro = 0;
struct route sro;
struct ip *ip;
struct tcphdr *nth;
-#if INET6
struct route_in6 *ro6 = 0;
struct route_in6 sro6;
struct ip6_hdr *ip6;
int isipv6;
-#endif /* INET6 */
struct ifnet *outif;
int sotc = SO_TC_UNSPEC;
+ bool check_qos_marking_again = FALSE;
-#if INET6
isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
ip6 = ipgen;
-#endif /* INET6 */
ip = ipgen;
if (tp) {
+ check_qos_marking_again = tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE ? FALSE : TRUE;
if (!(flags & TH_RST)) {
win = tcp_sbspace(tp);
- if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale)
+ if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) {
win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
+ }
}
-#if INET6
- if (isipv6)
+ if (isipv6) {
ro6 = &tp->t_inpcb->in6p_route;
- else
-#endif /* INET6 */
- ro = &tp->t_inpcb->inp_route;
+ } else {
+ ro = &tp->t_inpcb->inp_route;
+ }
} else {
-#if INET6
if (isipv6) {
ro6 = &sro6;
bzero(ro6, sizeof(*ro6));
- } else
-#endif /* INET6 */
- {
+ } else {
ro = &sro;
bzero(ro, sizeof(*ro));
}
}
if (m == 0) {
- m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */
- if (m == NULL)
+ m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */
+ if (m == NULL) {
return;
+ }
tlen = 0;
m->m_data += max_linkhdr;
-#if INET6
if (isipv6) {
VERIFY((MHLEN - max_linkhdr) >=
- (sizeof (*ip6) + sizeof (*nth)));
+ (sizeof(*ip6) + sizeof(*nth)));
bcopy((caddr_t)ip6, mtod(m, caddr_t),
sizeof(struct ip6_hdr));
ip6 = mtod(m, struct ip6_hdr *);
nth = (struct tcphdr *)(void *)(ip6 + 1);
- } else
-#endif /* INET6 */
- {
+ } else {
VERIFY((MHLEN - max_linkhdr) >=
- (sizeof (*ip) + sizeof (*nth)));
+ (sizeof(*ip) + sizeof(*nth)));
bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
ip = mtod(m, struct ip *);
nth = (struct tcphdr *)(void *)(ip + 1);
}
bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
#if MPTCP
- if ((tp) && (tp->t_mpflags & TMPF_RESET))
+ if ((tp) && (tp->t_mpflags & TMPF_RESET)) {
flags = (TH_RST | TH_ACK);
- else
+ } else
#endif
flags = TH_ACK;
} else {
m->m_data = (caddr_t)ipgen;
/* m_len is set later */
tlen = 0;
-#define xchg(a, b, type) { type t; t = a; a = b; b = t; }
-#if INET6
+#define xchg(a, b, type) { type t; t = a; a = b; b = t; }
if (isipv6) {
/* Expect 32-bit aligned IP on strict-align platforms */
IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6);
xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
nth = (struct tcphdr *)(void *)(ip6 + 1);
- } else
-#endif /* INET6 */
- {
+ } else {
/* Expect 32-bit aligned IP on strict-align platforms */
IP_HDR_STRICT_ALIGNMENT_CHECK(ip);
xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
xchg(nth->th_dport, nth->th_sport, n_short);
#undef xchg
}
-#if INET6
if (isipv6) {
- ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
- tlen));
- tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
- } else
-#endif
- {
- tlen += sizeof (struct tcpiphdr);
+ ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) +
+ tlen));
+ tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
+ } else {
+ tlen += sizeof(struct tcpiphdr);
ip->ip_len = tlen;
- ip->ip_ttl = ip_defttl;
+ ip->ip_ttl = (uint8_t)ip_defttl;
}
m->m_len = tlen;
m->m_pkthdr.len = tlen;
m->m_pkthdr.rcvif = 0;
-#if CONFIG_MACF_NET
- if (tp != NULL && tp->t_inpcb != NULL) {
- /*
- * Packet is associated with a socket, so allow the
- * label of the response to reflect the socket label.
- */
- mac_mbuf_label_associate_inpcb(tp->t_inpcb, m);
- } else {
- /*
- * Packet is not associated with a socket, so possibly
- * update the label in place.
- */
- mac_netinet_tcp_reply(m);
+ if (tra->keep_alive) {
+ m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
}
-#endif
nth->th_seq = htonl(seq);
nth->th_ack = htonl(ack);
nth->th_x2 = 0;
- nth->th_off = sizeof (struct tcphdr) >> 2;
+ nth->th_off = sizeof(struct tcphdr) >> 2;
nth->th_flags = flags;
- if (tp)
+ if (tp) {
nth->th_win = htons((u_short) (win >> tp->rcv_scale));
- else
+ } else {
nth->th_win = htons((u_short)win);
+ }
nth->th_urp = 0;
-#if INET6
if (isipv6) {
nth->th_sum = 0;
nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
- htonl((tlen - sizeof (struct ip6_hdr)) + IPPROTO_TCP));
+ htonl((tlen - sizeof(struct ip6_hdr)) + IPPROTO_TCP));
m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
ro6 && ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
- } else
-#endif /* INET6 */
- {
+ } else {
nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
- htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
+ htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
m->m_pkthdr.csum_flags = CSUM_TCP;
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
}
#if TCPDEBUG
- if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
+ if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) {
tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
+ }
#endif
#if NECP
- necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0);
+ necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0, 0);
#endif /* NECP */
#if IPSEC
if (tp != NULL && tp->t_inpcb->inp_sp != NULL &&
- ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
+ ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
m_freem(m);
return;
}
svc_flags |= PKT_SCF_IPV6;
}
sotc = tp->t_inpcb->inp_socket->so_traffic_class;
- set_packet_service_class(m, tp->t_inpcb->inp_socket,
- sotc, svc_flags);
+ if ((flags & TH_RST) == 0) {
+ set_packet_service_class(m, tp->t_inpcb->inp_socket,
+ sotc, svc_flags);
+ } else {
+ m_set_service_class(m, MBUF_SC_BK_SYS);
+ }
/* Embed flowhash and flow control flags */
m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
m->m_pkthdr.pkt_flowid = tp->t_inpcb->inp_flowhash;
m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
m->m_pkthdr.pkt_proto = IPPROTO_TCP;
+ m->m_pkthdr.tx_tcp_pid = tp->t_inpcb->inp_socket->last_pid;
+ m->m_pkthdr.tx_tcp_e_pid = tp->t_inpcb->inp_socket->e_pid;
+
+ if (flags & TH_RST) {
+ m->m_pkthdr.comp_gencnt = tp->t_comp_gencnt;
+ }
+ } else {
+ if (flags & TH_RST) {
+ m->m_pkthdr.comp_gencnt = TCP_ACK_COMPRESSION_DUMMY;
+ m_set_service_class(m, MBUF_SC_BK_SYS);
+ }
}
-#if INET6
if (isipv6) {
- struct ip6_out_args ip6oa = { tra->ifscope, { 0 },
- IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR, 0,
- SO_TC_UNSPEC, _NET_SERVICE_TYPE_UNSPEC};
-
- if (tra->ifscope != IFSCOPE_NONE)
+ struct ip6_out_args ip6oa;
+ bzero(&ip6oa, sizeof(ip6oa));
+ ip6oa.ip6oa_boundif = tra->ifscope;
+ ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
+ ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
+ ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
+
+ if (tra->ifscope != IFSCOPE_NONE) {
ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
- if (tra->nocell)
+ }
+ if (tra->nocell) {
ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
- if (tra->noexpensive)
+ }
+ if (tra->noexpensive) {
ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
- if (tra->awdl_unrestricted)
+ }
+ if (tra->noconstrained) {
+ ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
+ }
+ if (tra->awdl_unrestricted) {
ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED;
- if (tra->intcoproc_allowed)
+ }
+ if (tra->intcoproc_allowed) {
ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED;
+ }
ip6oa.ip6oa_sotc = sotc;
if (tp != NULL) {
- if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED))
+ if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
+ }
+ ip6oa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
+ if (check_qos_marking_again) {
+ ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY;
+ }
ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
}
(void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL,
NULL, &ip6oa);
+ if (check_qos_marking_again) {
+ struct inpcb *inp = tp->t_inpcb;
+ inp->inp_policyresult.results.qos_marking_gencount = ip6oa.qos_marking_gencount;
+ if (ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) {
+ inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
+ } else {
+ inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
+ }
+ }
+
if (tp != NULL && ro6 != NULL && ro6->ro_rt != NULL &&
(outif = ro6->ro_rt->rt_ifp) !=
tp->t_inpcb->in6p_last_outifp) {
tp->t_inpcb->in6p_last_outifp = outif;
}
- if (ro6 == &sro6)
+ if (ro6 == &sro6) {
ROUTE_RELEASE(ro6);
- } else
-#endif /* INET6 */
- {
- struct ip_out_args ipoa = { tra->ifscope, { 0 },
- IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR, 0,
- SO_TC_UNSPEC, _NET_SERVICE_TYPE_UNSPEC };
-
- if (tra->ifscope != IFSCOPE_NONE)
+ }
+ } else {
+ struct ip_out_args ipoa;
+ bzero(&ipoa, sizeof(ipoa));
+ ipoa.ipoa_boundif = tra->ifscope;
+ ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
+ ipoa.ipoa_sotc = SO_TC_UNSPEC;
+ ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
+
+ if (tra->ifscope != IFSCOPE_NONE) {
ipoa.ipoa_flags |= IPOAF_BOUND_IF;
- if (tra->nocell)
+ }
+ if (tra->nocell) {
ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
- if (tra->noexpensive)
+ }
+ if (tra->noexpensive) {
ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
- if (tra->awdl_unrestricted)
+ }
+ if (tra->noconstrained) {
+ ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
+ }
+ if (tra->awdl_unrestricted) {
ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
+ }
ipoa.ipoa_sotc = sotc;
if (tp != NULL) {
- if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED))
+ if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
+ }
+ if (!(tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
+ ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
+ }
+ ipoa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount;
ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
}
if (ro != &sro) {
*/
(void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
+ if (check_qos_marking_again) {
+ struct inpcb *inp = tp->t_inpcb;
+ inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount;
+ if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) {
+ inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
+ } else {
+ inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
+ }
+ }
if (tp != NULL && sro.ro_rt != NULL &&
(outif = sro.ro_rt->rt_ifp) !=
tp->t_inpcb->inp_last_outifp) {
tp->t_inpcb->inp_last_outifp = outif;
-
}
if (ro != &sro) {
/* Synchronize cached PCB route */
struct inp_tp *it;
struct tcpcb *tp;
struct socket *so = inp->inp_socket;
-#if INET6
int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
-#endif /* INET6 */
+ uint32_t random_32;
calculate_tcp_clock();
if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
- it = (struct inp_tp *)(void *)inp;
- tp = &it->tcb;
+ it = (struct inp_tp *)(void *)inp;
+ tp = &it->tcb;
} else {
- tp = (struct tcpcb *)(void *)inp->inp_saved_ppcb;
+ tp = (struct tcpcb *)(void *)inp->inp_saved_ppcb;
}
bzero((char *) tp, sizeof(struct tcpcb));
LIST_INIT(&tp->t_segq);
- tp->t_maxseg = tp->t_maxopd =
-#if INET6
- isipv6 ? tcp_v6mssdflt :
-#endif /* INET6 */
- tcp_mssdflt;
+ tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt;
- if (tcp_do_rfc1323)
- tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
- if (tcp_do_sack)
- tp->t_flagsext |= TF_SACK_ENABLE;
+ tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP);
+ tp->t_flagsext |= TF_SACK_ENABLE;
TAILQ_INIT(&tp->snd_holes);
SLIST_INIT(&tp->t_rxt_segments);
tp->t_rttmin = tcp_TCPTV_MIN;
tp->t_rxtcur = TCPTV_RTOBASE;
- if (tcp_use_newreno)
+ if (tcp_use_newreno) {
/* use newreno by default */
tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX;
- else
+ } else {
tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX;
+ }
tcp_cc_allocate_state(tp);
- if (CC_ALGO(tp)->init != NULL)
+ if (CC_ALGO(tp)->init != NULL) {
CC_ALGO(tp)->init(tp);
+ }
- tp->snd_cwnd = TCP_CC_CWND_INIT_BYTES;
+ tp->snd_cwnd = tcp_initial_cwnd(tp);
tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->t_rcvtime = tcp_now;
tp->tentry.timer_start = tcp_now;
+ tp->rcv_unackwin = tcp_now;
tp->t_persist_timeout = tcp_max_persist_timeout;
tp->t_persist_stop = 0;
tp->t_flagsext |= TF_RCVUNACK_WAITSS;
- tp->t_rexmtthresh = tcprexmtthresh;
+ tp->t_rexmtthresh = (uint8_t)tcprexmtthresh;
+ tp->rfbuf_ts = tcp_now;
+ tp->rfbuf_space = tcp_initial_cwnd(tp);
+ tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
/* Enable bandwidth measurement on this connection */
tp->t_flagsext |= TF_MEASURESNDBW;
if (tp->t_bwmeas == NULL) {
tp->t_bwmeas = tcp_bwmeas_alloc(tp);
- if (tp->t_bwmeas == NULL)
+ if (tp->t_bwmeas == NULL) {
tp->t_flagsext &= ~TF_MEASURESNDBW;
+ }
}
/* Clear time wait tailq entry */
tp->t_twentry.tqe_next = NULL;
tp->t_twentry.tqe_prev = NULL;
+ read_frandom(&random_32, sizeof(random_32));
+ if (__probable(tcp_do_ack_compression)) {
+ tp->t_comp_gencnt = random_32;
+ if (tp->t_comp_gencnt <= TCP_ACK_COMPRESSION_DUMMY) {
+ tp->t_comp_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1;
+ }
+ tp->t_comp_lastinc = tcp_now;
+ }
+
+ if (__probable(tcp_randomize_timestamps)) {
+ tp->t_ts_offset = random_32;
+ }
+
/*
* IPv4 TTL initialization is necessary for an IPv6 socket as well,
* because the socket may be bound to an IPv6 wildcard address,
* which may match an IPv4-mapped IPv6 address.
*/
- inp->inp_ip_ttl = ip_defttl;
+ inp->inp_ip_ttl = (uint8_t)ip_defttl;
inp->inp_ppcb = (caddr_t)tp;
- return (tp); /* XXX */
+ return tp; /* XXX */
}
/*
if (TCPS_HAVERCVDSYN(tp->t_state)) {
DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
- struct tcpcb *, tp, int32_t, TCPS_CLOSED);
+ struct tcpcb *, tp, int32_t, TCPS_CLOSED);
tp->t_state = TCPS_CLOSED;
(void) tcp_output(tp);
tcpstat.tcps_drops++;
- } else
+ } else {
tcpstat.tcps_conndrops++;
- if (errno == ETIMEDOUT && tp->t_softerror)
+ }
+ if (errno == ETIMEDOUT && tp->t_softerror) {
errno = tp->t_softerror;
- so->so_error = errno;
- return (tcp_close(tp));
+ }
+ so->so_error = (u_short)errno;
+
+ TCP_LOG_CONNECTION_SUMMARY(tp);
+
+ return tcp_close(tp);
}
void
u_int32_t rtt = rt->rt_rmx.rmx_rtt;
int isnetlocal = (tp->t_flags & TF_LOCAL);
- if (rtt != 0) {
+ TCP_LOG_RTM_RTT(tp, rt);
+
+ if (rtt != 0 && tcp_init_rtt_from_cache != 0) {
/*
* XXX the lock bit for RTT indicates that the value
* is also a minimum value; this is subject to time.
*/
- if (rt->rt_rmx.rmx_locks & RTV_RTT)
+ if (rt->rt_rmx.rmx_locks & RTV_RTT) {
tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ);
- else
+ } else {
tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN :
TCPTV_REXMTMIN;
+ }
+
tp->t_srtt =
rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
tcpstat.tcps_usedrtt++;
+
if (rt->rt_rmx.rmx_rttvar) {
tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
(RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
tp->t_rttvar =
tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
}
+
+ /*
+ * The RTO formula in the route metric case is based on:
+ * 4 * srtt + 8 * rttvar
+ * modulo the min, max and slop
+ */
TCPT_RANGESET(tp->t_rxtcur,
- ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
- tp->t_rttmin, TCPTV_REXMTMAX,
- TCP_ADD_REXMTSLOP(tp));
+ ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
+ tp->t_rttmin, TCPTV_REXMTMAX,
+ TCP_ADD_REXMTSLOP(tp));
}
+
+ TCP_LOG_RTT_INFO(tp);
}
static inline void
{
struct inpcb *inp;
struct socket *so;
- if (tp == NULL || ifs == NULL)
+ if (tp == NULL || ifs == NULL) {
return;
+ }
bzero(ifs, sizeof(*ifs));
inp = tp->t_inpcb;
} else {
ifs->bw_sndbw_max = 0;
}
- if (tp->t_bwmeas!= NULL && tp->t_bwmeas->bw_rcvbw_max > 0) {
+ if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_rcvbw_max > 0) {
ifs->bw_rcvbw_max = tp->t_bwmeas->bw_rcvbw_max;
} else {
ifs->bw_rcvbw_max = 0;
/* SACK episodes */
stat->sack_episodes += ifs->sack_recovery_episodes;
- if (ifs->connreset)
+ if (ifs->connreset) {
stat->rst_drop++;
+ }
}
static inline void
if (ifs->bw_sndbw_max > 0) {
/* convert from bytes per ms to bits per second */
ifs->bw_sndbw_max *= 8000;
- stat->lim_ul_max_bandwidth = max(stat->lim_ul_max_bandwidth,
+ stat->lim_ul_max_bandwidth = MAX(stat->lim_ul_max_bandwidth,
ifs->bw_sndbw_max);
}
if (ifs->bw_rcvbw_max > 0) {
/* convert from bytes per ms to bits per second */
ifs->bw_rcvbw_max *= 8000;
- stat->lim_dl_max_bandwidth = max(stat->lim_dl_max_bandwidth,
+ stat->lim_dl_max_bandwidth = MAX(stat->lim_dl_max_bandwidth,
ifs->bw_rcvbw_max);
}
if (stat->lim_rtt_min == 0) {
stat->lim_rtt_min = ifs->rttmin;
} else {
- stat->lim_rtt_min = min(stat->lim_rtt_min, ifs->rttmin);
+ stat->lim_rtt_min = MIN(stat->lim_rtt_min, ifs->rttmin);
}
/* connection timeouts */
stat->lim_conn_attempts++;
- if (ifs->conntimeout)
+ if (ifs->conntimeout) {
stat->lim_conn_timeouts++;
+ }
/* bytes sent using background delay-based algorithms */
stat->lim_bk_txpkts += ifs->bk_txpackets;
-
}
/*
{
struct inpcb *inp = tp->t_inpcb;
struct socket *so = inp->inp_socket;
-#if INET6
int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
-#endif /* INET6 */
struct route *ro;
struct rtentry *rt;
int dosavessthresh;
struct ifnet_stats_per_flow ifs;
/* tcp_close was called previously, bail */
- if (inp->inp_ppcb == NULL)
- return (NULL);
+ if (inp->inp_ppcb == NULL) {
+ return NULL;
+ }
+
+ tcp_del_fsw_flow(tp);
tcp_canceltimers(tp);
KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0);
* with the cleanup.
*/
if ((tp->t_flags & TF_CLOSING) ||
- inp->inp_sndinprog_cnt > 0) {
+ inp->inp_sndinprog_cnt > 0) {
tp->t_flags |= TF_CLOSING;
- return (NULL);
+ return NULL;
}
+ TCP_LOG_CONNECTION_SUMMARY(tp);
+
DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
- struct tcpcb *, tp, int32_t, TCPS_CLOSED);
+ struct tcpcb *, tp, int32_t, TCPS_CLOSED);
-#if INET6
ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route);
-#else
- ro = &inp->inp_route;
-#endif
rt = ro->ro_rt;
- if (rt != NULL)
+ if (rt != NULL) {
RT_LOCK_SPIN(rt);
+ }
/*
* If we got enough samples through the srtt filter,
*/
if (tp->t_rttupdated >= 16) {
u_int32_t i = 0;
+ bool log_rtt = false;
-#if INET6
if (isipv6) {
struct sockaddr_in6 *sin6;
- if (rt == NULL)
+ if (rt == NULL) {
goto no_valid_rt;
+ }
sin6 = (struct sockaddr_in6 *)(void *)rt_key(rt);
- if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
goto no_valid_rt;
- }
- else
-#endif /* INET6 */
- if (ROUTE_UNUSABLE(ro) ||
+ }
+ } else if (ROUTE_UNUSABLE(ro) ||
SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) {
DTRACE_TCP4(state__change, void, NULL,
struct inpcb *, inp, struct tcpcb *, tp,
if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
i = tp->t_srtt *
(RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
- if (rt->rt_rmx.rmx_rtt && i)
+ if (rt->rt_rmx.rmx_rtt && i) {
/*
* filter this update to half the old & half
* the new values, converting scale.
*/
rt->rt_rmx.rmx_rtt =
(rt->rt_rmx.rmx_rtt + i) / 2;
- else
+ } else {
rt->rt_rmx.rmx_rtt = i;
+ }
tcpstat.tcps_cachedrtt++;
+ log_rtt = true;
}
if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
i = tp->t_rttvar *
(RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
- if (rt->rt_rmx.rmx_rttvar && i)
+ if (rt->rt_rmx.rmx_rttvar && i) {
rt->rt_rmx.rmx_rttvar =
(rt->rt_rmx.rmx_rttvar + i) / 2;
- else
+ } else {
rt->rt_rmx.rmx_rttvar = i;
+ }
tcpstat.tcps_cachedrttvar++;
+ log_rtt = true;
+ }
+ if (log_rtt) {
+ TCP_LOG_RTM_RTT(tp, rt);
+ TCP_LOG_RTT_INFO(tp);
}
/*
* The old comment here said:
* way to calculate the pipesize, it will have to do.
*/
i = tp->snd_ssthresh;
- if (rt->rt_rmx.rmx_sendpipe != 0)
+ if (rt->rt_rmx.rmx_sendpipe != 0) {
dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
- else
+ } else {
dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
+ }
if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
i != 0 && rt->rt_rmx.rmx_ssthresh != 0) ||
dosavessthresh) {
* packets then to packet data bytes.
*/
i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
- if (i < 2)
+ if (i < 2) {
i = 2;
+ }
i *= (u_int32_t)(tp->t_maxseg +
-#if INET6
- isipv6 ? sizeof (struct ip6_hdr) +
- sizeof (struct tcphdr) :
-#endif /* INET6 */
- sizeof (struct tcpiphdr));
- if (rt->rt_rmx.rmx_ssthresh)
+ isipv6 ? sizeof(struct ip6_hdr) +
+ sizeof(struct tcphdr) :
+ sizeof(struct tcpiphdr));
+ if (rt->rt_rmx.rmx_ssthresh) {
rt->rt_rmx.rmx_ssthresh =
(rt->rt_rmx.rmx_ssthresh + i) / 2;
- else
+ } else {
rt->rt_rmx.rmx_ssthresh = i;
+ }
tcpstat.tcps_cachedssthresh++;
}
}
/*
* Mark route for deletion if no information is cached.
*/
- if (rt != NULL && (so->so_flags & SOF_OVERFLOW) && tcp_lq_overflow) {
+ if (rt != NULL && (so->so_flags & SOF_OVERFLOW)) {
if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
rt->rt_rmx.rmx_rtt == 0) {
rt->rt_flags |= RTF_DELCLONE;
}
no_valid_rt:
- if (rt != NULL)
+ if (rt != NULL) {
RT_UNLOCK(rt);
+ }
/* free the reassembly queue, if any */
(void) tcp_freeq(tp);
}
tcp_rxtseg_clean(tp);
/* Free the packet list */
- if (tp->t_pktlist_head != NULL)
+ if (tp->t_pktlist_head != NULL) {
m_freem_list(tp->t_pktlist_head);
+ }
TCP_PKTLIST_CLEAR(tp);
- if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER)
- inp->inp_saved_ppcb = (caddr_t) tp;
+ if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) {
+ inp->inp_saved_ppcb = (caddr_t) tp;
+ }
tp->t_state = TCPS_CLOSED;
sodisconnectwakeup(so);
/*
- * Clean up any LRO state
+ * Make sure to clear the TCP Keep Alive Offload as it is
+ * ref counted on the interface
*/
- if (tp->t_flagsext & TF_LRO_OFFLOADED) {
- tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr,
- inp->inp_lport, inp->inp_fport);
- tp->t_flagsext &= ~TF_LRO_OFFLOADED;
- }
+ tcp_clear_keep_alive_offload(so);
/*
* If this is a socket that does not want to wakeup the device
*/
if ((so->so_options & SO_NOWAKEFROMSLEEP) &&
inp->inp_state != INPCB_STATE_DEAD &&
- !(inp->inp_flags2 & INP2_TIMEWAIT))
+ !(inp->inp_flags2 & INP2_TIMEWAIT)) {
socket_post_kev_msg_closed(so);
+ }
if (CC_ALGO(tp)->cleanup != NULL) {
CC_ALGO(tp)->cleanup(tp);
tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
}
-#if INET6
- if (SOCK_CHECK_DOM(so, PF_INET6))
+ if (SOCK_CHECK_DOM(so, PF_INET6)) {
in6_pcbdetach(inp);
- else
-#endif /* INET6 */
- in_pcbdetach(inp);
+ } else {
+ in_pcbdetach(inp);
+ }
/*
* Call soisdisconnected after detach because it might unlock the socket
tcpstat.tcps_closed++;
KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END,
tcpstat.tcps_closed, 0, 0, 0, 0);
- return (NULL);
+ return NULL;
}
int
rv = 1;
}
tp->t_reassqlen = 0;
- return (rv);
+ return rv;
}
-/*
- * Walk the tcpbs, if existing, and flush the reassembly queue,
- * if there is one when do_tcpdrain is enabled
- * Also defunct the extended background idle socket
- * Do it next time if the pcbinfo lock is in use
- */
void
tcp_drain(void)
{
struct inpcb *inp;
struct tcpcb *tp;
- if (!lck_rw_try_lock_exclusive(tcbinfo.ipi_lock))
+ if (!lck_rw_try_lock_exclusive(tcbinfo.ipi_lock)) {
return;
+ }
LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
- WNT_STOPUSING) {
+ WNT_STOPUSING) {
socket_lock(inp->inp_socket, 1);
if (in_pcb_checkstate(inp, WNT_RELEASE, 1)
- == WNT_STOPUSING) {
+ == WNT_STOPUSING) {
/* lost a race, try the next one */
socket_unlock(inp->inp_socket, 1);
continue;
}
tp = intotcpcb(inp);
- if (do_tcpdrain)
- tcp_freeq(tp);
-
so_drain_extended_bk_idle(inp->inp_socket);
socket_unlock(inp->inp_socket, 1);
}
}
lck_rw_done(tcbinfo.ipi_lock);
-
}
/*
{
struct tcpcb *tp;
- if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD))
+ if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) {
return; /* pcb is gone already */
-
+ }
tp = (struct tcpcb *)inp->inp_ppcb;
VERIFY(tp != NULL);
inp->inp_route.ro_rt = (struct rtentry *)NULL;
}
} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
- tp->t_softerror)
+ tp->t_softerror) {
tcp_drop(tp, error);
- else
+ } else {
tp->t_softerror = error;
-#if 0
- wakeup((caddr_t) &so->so_timeo);
- sorwakeup(so);
- sowwakeup(so);
-#endif
+ }
}
struct bwmeas *
tcp_bwmeas_alloc(struct tcpcb *tp)
{
struct bwmeas *elm;
- elm = zalloc(tcp_bwmeas_zone);
- if (elm == NULL)
- return (elm);
-
- bzero(elm, bwmeas_elm_size);
+ elm = zalloc_flags(tcp_bwmeas_zone, Z_ZERO | Z_WAITOK);
elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE;
elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg;
- return (elm);
+ return elm;
}
void
LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
if (inp->inp_gencnt <= gencnt &&
- inp->inp_state != INPCB_STATE_DEAD)
+ inp->inp_state != INPCB_STATE_DEAD) {
inp_list[i++] = inp;
- if (i >= n)
+ }
+ if (i >= n) {
break;
+ }
}
TAILQ_FOREACH(tp, &tcp_tw_tailq, t_twentry) {
inp = tp->t_inpcb;
if (inp->inp_gencnt <= gencnt &&
- inp->inp_state != INPCB_STATE_DEAD)
+ inp->inp_state != INPCB_STATE_DEAD) {
inp_list[i++] = inp;
- if (i >= n)
+ }
+ if (i >= n) {
break;
+ }
}
- return (i);
+ return i;
}
/*
if (req->oldptr == USER_ADDR_NULL) {
n = tcbinfo.ipi_count;
req->oldidx = 2 * (sizeof(xig))
- + (n + n/8) * sizeof(struct xtcpcb);
+ + (n + n / 8) * sizeof(struct xtcpcb);
lck_rw_done(tcbinfo.ipi_lock);
- return (0);
+ return 0;
}
if (req->newptr != USER_ADDR_NULL) {
lck_rw_done(tcbinfo.ipi_lock);
- return (EPERM);
+ return EPERM;
}
/*
error = SYSCTL_OUT(req, &xig, sizeof(xig));
if (error) {
lck_rw_done(tcbinfo.ipi_lock);
- return (error);
+ return error;
}
/*
* We are done if there is no pcb
*/
if (n == 0) {
lck_rw_done(tcbinfo.ipi_lock);
- return (0);
+ return 0;
}
- inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK);
+ inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
if (inp_list == 0) {
lck_rw_done(tcbinfo.ipi_lock);
- return (ENOMEM);
+ return ENOMEM;
}
n = get_tcp_inp_list(inp_list, n, gencnt);
inp = inp_list[i];
- if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
continue;
+ }
socket_lock(inp->inp_socket, 1);
if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
socket_unlock(inp->inp_socket, 1);
} else {
bzero((char *) &xt.xt_tp, sizeof(xt.xt_tp));
}
- if (inp->inp_socket)
+ if (inp->inp_socket) {
sotoxsocket(inp->inp_socket, &xt.xt_socket);
+ }
socket_unlock(inp->inp_socket, 1);
}
FREE(inp_list, M_TEMP);
lck_rw_done(tcbinfo.ipi_lock);
- return (error);
+ return error;
}
SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
- CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
- tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
-#if !CONFIG_EMBEDDED
+#if XNU_TARGET_OS_OSX
static void
tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
if (req->oldptr == USER_ADDR_NULL) {
n = tcbinfo.ipi_count;
req->oldidx = 2 * (sizeof(xig))
- + (n + n/8) * sizeof(struct xtcpcb64);
+ + (n + n / 8) * sizeof(struct xtcpcb64);
lck_rw_done(tcbinfo.ipi_lock);
- return (0);
+ return 0;
}
if (req->newptr != USER_ADDR_NULL) {
lck_rw_done(tcbinfo.ipi_lock);
- return (EPERM);
+ return EPERM;
}
/*
error = SYSCTL_OUT(req, &xig, sizeof(xig));
if (error) {
lck_rw_done(tcbinfo.ipi_lock);
- return (error);
+ return error;
}
/*
* We are done if there is no pcb
*/
if (n == 0) {
lck_rw_done(tcbinfo.ipi_lock);
- return (0);
+ return 0;
}
- inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK);
+ inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
if (inp_list == 0) {
lck_rw_done(tcbinfo.ipi_lock);
- return (ENOMEM);
+ return ENOMEM;
}
n = get_tcp_inp_list(inp_list, n, gencnt);
inp = inp_list[i];
- if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
continue;
+ }
socket_lock(inp->inp_socket, 1);
if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
socket_unlock(inp->inp_socket, 1);
inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
xt.xt_inpcb.inp_ppcb =
(uint64_t)VM_KERNEL_ADDRPERM(inp->inp_ppcb);
- if (inp->inp_ppcb != NULL)
+ if (inp->inp_ppcb != NULL) {
tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb,
&xt);
- if (inp->inp_socket)
+ }
+ if (inp->inp_socket) {
sotoxsocket64(inp->inp_socket,
&xt.xt_inpcb.xi_socket);
+ }
socket_unlock(inp->inp_socket, 1);
error = SYSCTL_OUT(req, &xt, sizeof(xt));
}
if (!error) {
- /*
- * Give the user an updated idea of our state.
- * If the generation differs from what we told
- * her before, she knows that something happened
- * while we were processing this request, and it
- * might be necessary to retry.
- */
- bzero(&xig, sizeof(xig));
- xig.xig_len = sizeof(xig);
- xig.xig_gen = tcbinfo.ipi_gencnt;
- xig.xig_sogen = so_gencnt;
- xig.xig_count = tcbinfo.ipi_count;
- error = SYSCTL_OUT(req, &xig, sizeof(xig));
+ /*
+ * Give the user an updated idea of our state.
+ * If the generation differs from what we told
+ * her before, she knows that something happened
+ * while we were processing this request, and it
+ * might be necessary to retry.
+ */
+ bzero(&xig, sizeof(xig));
+ xig.xig_len = sizeof(xig);
+ xig.xig_gen = tcbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = tcbinfo.ipi_count;
+ error = SYSCTL_OUT(req, &xig, sizeof(xig));
}
FREE(inp_list, M_TEMP);
lck_rw_done(tcbinfo.ipi_lock);
- return (error);
+ return error;
}
SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64,
- CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
- tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
-#endif /* !CONFIG_EMBEDDED */
+#endif /* XNU_TARGET_OS_OSX */
static int
tcp_pcblist_n SYSCTL_HANDLER_ARGS
error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo);
- return (error);
+ return error;
}
SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n,
- CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
- tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
+
+static int
+tcp_progress_indicators SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+
+ return ntstat_tcp_progress_indicators(req);
+}
+
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, progress,
+ CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
+ tcp_progress_indicators, "S", "Various items that indicate the current state of progress on the link");
__private_extern__ void
tcp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags,
bitstr_t *bitfield)
{
- inpcb_get_ports_used(ifindex, protocol, flags, bitfield,
- &tcbinfo);
+ inpcb_get_ports_used(ifindex, protocol, flags, bitfield,
+ &tcbinfo);
}
__private_extern__ uint32_t
tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
{
- return (inpcb_count_opportunistic(ifindex, &tcbinfo, flags));
+ return inpcb_count_opportunistic(ifindex, &tcbinfo, flags);
}
__private_extern__ uint32_t
tcp_find_anypcb_byaddr(struct ifaddr *ifa)
{
- return (inpcb_find_anypcb_byaddr(ifa, &tcbinfo));
+ return inpcb_find_anypcb_byaddr(ifa, &tcbinfo);
}
static void
u_short ifscope = IFSCOPE_NONE;
int mtu;
struct sockaddr_in icmpsrc = {
- sizeof (struct sockaddr_in),
- AF_INET, 0, { 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0 } };
+ .sin_len = sizeof(struct sockaddr_in),
+ .sin_family = AF_INET, .sin_port = 0, .sin_addr = { .s_addr = 0 },
+ .sin_zero = { 0, 0, 0, 0, 0, 0, 0, 0 }
+ };
struct icmp *icp = NULL;
icp = (struct icmp *)(void *)
* lock bit, indicating that we are no longer doing MTU
* discovery.
*/
- if (ROUTE_UNUSABLE(&(inp->inp_route)) == false)
+ if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) {
rt = inp->inp_route.ro_rt;
+ }
/*
* icmp6_mtudisc_update scopes the routing lookup
* Take the interface scope from cached route or
* the last outgoing interface from inp
*/
- if (rt != NULL)
+ if (rt != NULL) {
ifscope = (rt->rt_ifp != NULL) ?
rt->rt_ifp->if_index : IFSCOPE_NONE;
- else
+ } else {
ifscope = (inp->inp_last_outifp != NULL) ?
inp->inp_last_outifp->if_index : IFSCOPE_NONE;
+ }
if ((rt == NULL) ||
!(rt->rt_flags & RTF_HOST) ||
* < route's MTU. We may want to adopt
* that change.
*/
- if (mtu == 0)
+ if (mtu == 0) {
mtu = ip_next_mtu(rt->rt_rmx.
rmx_mtu, 1);
+ }
#if DEBUG_MTUDISC
printf("MTU for %s reduced to %d\n",
inet_ntop(AF_INET,
&icmpsrc.sin_addr, ipv4str,
- sizeof (ipv4str)), mtu);
+ sizeof(ipv4str)), mtu);
#endif
if (mtu < max(296, (tcp_minmss +
- sizeof (struct tcpiphdr)))) {
+ sizeof(struct tcpiphdr)))) {
rt->rt_rmx.rmx_locks |= RTV_MTU;
} else if (rt->rt_rmx.rmx_mtu > mtu) {
rt->rt_rmx.rmx_mtu = mtu;
void (*notify)(struct inpcb *, int) = tcp_notify;
faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
- if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
+ if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
return;
+ }
- if ((unsigned)cmd >= PRC_NCMDS)
+ if ((unsigned)cmd >= PRC_NCMDS) {
return;
+ }
/* Source quench is deprecated */
- if (cmd == PRC_QUENCH)
- return;
+ if (cmd == PRC_QUENCH) {
+ return;
+ }
- if (cmd == PRC_MSGSIZE)
+ if (cmd == PRC_MSGSIZE) {
notify = tcp_mtudisc;
- else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
- cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
- cmd == PRC_TIMXCEED_INTRANS) && ip)
+ } else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
+ cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
+ cmd == PRC_TIMXCEED_INTRANS) && ip) {
notify = tcp_drop_syn_sent;
+ }
/*
* Hostdead is ugly because it goes linearly through all PCBs.
* XXX: We never get this from ICMP, otherwise it makes an
* excellent DoS attack on machines with many connections.
*/
- else if (cmd == PRC_HOSTDEAD)
+ else if (cmd == PRC_HOSTDEAD) {
ip = NULL;
- else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd))
+ } else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
return;
+ }
if (ip == NULL) {
tp = intotcpcb(inp);
if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
- if (cmd == PRC_MSGSIZE)
+ if (cmd == PRC_MSGSIZE) {
tcp_handle_msgsize(ip, inp);
+ }
(*notify)(inp, inetctlerrmap[cmd]);
}
socket_unlock(inp->inp_socket, 1);
}
-#if INET6
void
tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
{
tcp_seq icmp_tcp_seq;
struct in6_addr *dst;
- struct tcphdr *th;
void (*notify)(struct inpcb *, int) = tcp_notify;
struct ip6_hdr *ip6;
struct mbuf *m;
unsigned int mtu;
unsigned int off;
+ struct tcp_ports {
+ uint16_t th_sport;
+ uint16_t th_dport;
+ } t_ports;
+
if (sa->sa_family != AF_INET6 ||
- sa->sa_len != sizeof(struct sockaddr_in6))
+ sa->sa_len != sizeof(struct sockaddr_in6)) {
return;
+ }
/* Source quench is deprecated */
- if (cmd == PRC_QUENCH)
+ if (cmd == PRC_QUENCH) {
return;
+ }
- if ((unsigned)cmd >= PRC_NCMDS)
+ if ((unsigned)cmd >= PRC_NCMDS) {
return;
+ }
/* if the parameter is from icmp6, decode it. */
if (d != NULL) {
} else {
m = NULL;
ip6 = NULL;
- off = 0; /* fool gcc */
+ off = 0; /* fool gcc */
sa6_src = &sa6_any;
dst = NULL;
}
- if (cmd == PRC_MSGSIZE)
+ if (cmd == PRC_MSGSIZE) {
notify = tcp_mtudisc;
- else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
+ } else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) &&
- ip6 != NULL)
+ ip6 != NULL) {
notify = tcp_drop_syn_sent;
+ }
/*
* Hostdead is ugly because it goes linearly through all PCBs.
* XXX: We never get this from ICMP, otherwise it makes an
* excellent DoS attack on machines with many connections.
*/
- else if (cmd == PRC_HOSTDEAD)
+ else if (cmd == PRC_HOSTDEAD) {
ip6 = NULL;
- else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd))
+ } else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
return;
+ }
if (ip6 == NULL) {
return;
}
+ /* Check if we can safely get the ports from the tcp hdr */
if (m == NULL ||
- (m->m_pkthdr.len < (int32_t) (off + offsetof(struct tcphdr, th_seq))))
+ (m->m_pkthdr.len <
+ (int32_t) (off + sizeof(struct tcp_ports)))) {
return;
+ }
+ bzero(&t_ports, sizeof(struct tcp_ports));
+ m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
- th = (struct tcphdr *)(void *)mtodo(m, off);
- icmp_tcp_seq = ntohl(th->th_seq);
+ off += sizeof(struct tcp_ports);
+ if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
+ return;
+ }
+ m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
+ icmp_tcp_seq = ntohl(icmp_tcp_seq);
if (cmd == PRC_MSGSIZE) {
mtu = ntohl(icmp6->icmp6_mtu);
* If no alternative MTU was proposed, or the proposed
* MTU was too small, set to the min.
*/
- if (mtu < IPV6_MMTU)
+ if (mtu < IPV6_MMTU) {
mtu = IPV6_MMTU - 8;
+ }
}
- inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, th->th_dport,
- &ip6->ip6_src, th->th_sport, 0, NULL);
+ inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, t_ports.th_dport,
+ &ip6->ip6_src, t_ports.th_sport, 0, NULL);
if (inp == NULL ||
inp->inp_socket == NULL) {
* is smaller than the current one.
*/
if (mtu < tp->t_maxseg +
- (sizeof (*th) + sizeof (*ip6)))
+ (sizeof(struct tcphdr) + sizeof(struct ip6_hdr))) {
(*notify)(inp, inetctlerrmap[cmd]);
- } else
+ }
+ } else {
(*notify)(inp, inetctlerrmap[cmd]);
+ }
}
}
socket_unlock(inp->inp_socket, 1);
}
-#endif /* INET6 */
/*
*
*/
-#define ISN_BYTES_PER_SECOND 1048576
+#define ISN_BYTES_PER_SECOND 1048576
tcp_seq
tcp_new_isn(struct tcpcb *tp)
tcp_seq new_isn;
struct timeval timenow;
u_char isn_secret[32];
- int isn_last_reseed = 0;
+ long isn_last_reseed = 0;
MD5_CTX isn_ctx;
/* Use arc4random for SYN-ACKs when not in exact RFC1948 mode. */
if (((tp->t_state == TCPS_LISTEN) || (tp->t_state == TCPS_TIME_WAIT)) &&
tcp_strict_rfc1948 == 0)
#ifdef __APPLE__
- return (RandomULong());
+ { return RandomULong(); }
#else
- return (arc4random());
+ { return arc4random(); }
#endif
getmicrotime(&timenow);
/* Seed if this is the first use, reseed if requested. */
if ((isn_last_reseed == 0) ||
((tcp_strict_rfc1948 == 0) && (tcp_isn_reseed_interval > 0) &&
- (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
- < (u_int)timenow.tv_sec))) {
+ (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval * hz)
+ < (u_int)timenow.tv_sec))) {
#ifdef __APPLE__
read_frandom(&isn_secret, sizeof(isn_secret));
#else
sizeof(u_short));
MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport,
sizeof(u_short));
-#if INET6
if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
sizeof(struct in6_addr));
MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
sizeof(struct in6_addr));
- } else
-#endif
- {
+ } else {
MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
sizeof(struct in_addr));
MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
MD5Final((u_char *) &md5_buffer, &isn_ctx);
new_isn = (tcp_seq) md5_buffer[0];
new_isn += timenow.tv_sec * (ISN_BYTES_PER_SECOND / hz);
- return (new_isn);
+ return new_isn;
}
{
struct tcpcb *tp = intotcpcb(inp);
- if (tp && tp->t_state == TCPS_SYN_SENT)
+ if (tp && tp->t_state == TCPS_SYN_SENT) {
tcp_drop(tp, errno);
+ }
}
/*
* This duplicates some code in the tcp_mss() function in tcp_input.c.
*/
void
-tcp_mtudisc(
- struct inpcb *inp,
- __unused int errno
-)
+tcp_mtudisc(struct inpcb *inp, __unused int errno)
{
struct tcpcb *tp = intotcpcb(inp);
struct rtentry *rt;
- struct rmxp_tao *taop;
struct socket *so = inp->inp_socket;
- int offered;
int mss;
u_int32_t mtu;
- u_int32_t protoHdrOverhead = sizeof (struct tcpiphdr);
-#if INET6
+ u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr);
int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
- if (isipv6)
+ /*
+ * Nothing left to send after the socket is defunct or TCP is in the closed state
+ */
+ if ((so->so_state & SS_DEFUNCT) || (tp != NULL && tp->t_state == TCPS_CLOSED)) {
+ return;
+ }
+
+ if (isipv6) {
protoHdrOverhead = sizeof(struct ip6_hdr) +
sizeof(struct tcphdr);
-#endif /* INET6 */
+ }
- if (tp) {
-#if INET6
- if (isipv6)
+ if (tp != NULL) {
+ if (isipv6) {
rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
- else
-#endif /* INET6 */
+ } else {
rt = tcp_rtlookup(inp, IFSCOPE_NONE);
+ }
if (!rt || !rt->rt_rmx.rmx_mtu) {
tp->t_maxopd = tp->t_maxseg =
-#if INET6
- isipv6 ? tcp_v6mssdflt :
-#endif /* INET6 */
- tcp_mssdflt;
+ isipv6 ? tcp_v6mssdflt :
+ tcp_mssdflt;
/* Route locked during lookup above */
- if (rt != NULL)
+ if (rt != NULL) {
RT_UNLOCK(rt);
+ }
return;
}
- taop = rmx_taop(rt->rt_rmx);
- offered = taop->tao_mssopt;
mtu = rt->rt_rmx.rmx_mtu;
/* Route locked during lookup above */
#endif /* NECP */
mss = mtu - protoHdrOverhead;
- if (offered)
- mss = min(mss, offered);
+ if (tp->t_maxopd) {
+ mss = min(mss, tp->t_maxopd);
+ }
/*
* XXX - The above conditional probably violates the TCP
* spec. The problem is that, since we don't know the
* will get recorded and the new parameters should get
* recomputed. For Further Study.
*/
- if (tp->t_maxopd <= mss)
+ if (tp->t_maxopd <= mss) {
return;
+ }
tp->t_maxopd = mss;
- if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
- (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
+ if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
+ (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) {
mss -= TCPOLEN_TSTAMP_APPA;
+ }
#if MPTCP
mss -= mptcp_adj_mss(tp, TRUE);
#endif
- if (so->so_snd.sb_hiwat < mss)
+ if (so->so_snd.sb_hiwat < mss) {
mss = so->so_snd.sb_hiwat;
+ }
tp->t_maxseg = mss;
+ ASSERT(tp->t_maxseg);
+
/*
* Reset the slow-start flight size as it may depends on the
* new MSS
*/
- if (CC_ALGO(tp)->cwnd_init != NULL)
+ if (CC_ALGO(tp)->cwnd_init != NULL) {
CC_ALGO(tp)->cwnd_init(tp);
+ }
tcpstat.tcps_mturesent++;
tp->t_rtttime = 0;
tp->snd_nxt = tp->snd_una;
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
ro = &inp->inp_route;
- if ((rt = ro->ro_rt) != NULL)
+ if ((rt = ro->ro_rt) != NULL) {
RT_LOCK(rt);
+ }
if (ROUTE_UNUSABLE(ro)) {
if (rt != NULL) {
ro->ro_dst.sa_family = AF_INET;
ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
((struct sockaddr_in *)(void *)&ro->ro_dst)->sin_addr =
- inp->inp_faddr;
+ inp->inp_faddr;
/*
* If the socket was bound to an interface, then
inp->inp_boundifp->if_index : input_ifscope;
rtalloc_scoped(ro, ifscope);
- if ((rt = ro->ro_rt) != NULL)
+ if ((rt = ro->ro_rt) != NULL) {
RT_LOCK(rt);
+ }
}
}
- if (rt != NULL)
+ if (rt != NULL) {
RT_LOCK_ASSERT_HELD(rt);
+ }
/*
* Update MTU discovery determination. Don't do it if:
tp = intotcpcb(inp);
if (!path_mtu_discovery || ((rt != NULL) &&
- (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
+ (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
tp->t_flags &= ~TF_PMTUD;
- else
+ } else {
tp->t_flags |= TF_PMTUD;
+ }
if (rt != NULL && rt->rt_ifp != NULL) {
somultipages(inp->inp_socket,
tcp_set_ecn(tp, rt->rt_ifp);
if (inp->inp_last_outifp == NULL) {
inp->inp_last_outifp = rt->rt_ifp;
-
}
}
/* Note if the peer is local */
if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
- (rt->rt_gateway->sa_family == AF_LINK ||
- rt->rt_ifp->if_flags & IFF_LOOPBACK ||
- in_localaddr(inp->inp_faddr))) {
+ (rt->rt_gateway->sa_family == AF_LINK ||
+ rt->rt_ifp->if_flags & IFF_LOOPBACK ||
+ in_localaddr(inp->inp_faddr))) {
tp->t_flags |= TF_LOCAL;
}
/*
* Caller needs to call RT_UNLOCK(rt).
*/
- return (rt);
+ return rt;
}
-#if INET6
struct rtentry *
tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope)
{
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
ro6 = &inp->in6p_route;
- if ((rt = ro6->ro_rt) != NULL)
+ if ((rt = ro6->ro_rt) != NULL) {
RT_LOCK(rt);
+ }
if (ROUTE_UNUSABLE(ro6)) {
if (rt != NULL) {
inp->inp_boundifp->if_index : input_ifscope;
rtalloc_scoped((struct route *)ro6, ifscope);
- if ((rt = ro6->ro_rt) != NULL)
+ if ((rt = ro6->ro_rt) != NULL) {
RT_LOCK(rt);
+ }
}
}
- if (rt != NULL)
+ if (rt != NULL) {
RT_LOCK_ASSERT_HELD(rt);
+ }
/*
* Update path MTU Discovery determination
*/
if (!path_mtu_discovery || ((rt != NULL) &&
- (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
+ (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
tp->t_flags &= ~TF_PMTUD;
- else
+ } else {
tp->t_flags |= TF_PMTUD;
+ }
if (rt != NULL && rt->rt_ifp != NULL) {
somultipages(inp->inp_socket,
if (inp->inp_last_outifp == NULL) {
inp->inp_last_outifp = rt->rt_ifp;
}
- }
- /* Note if the peer is local */
- if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
- (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
- IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
- rt->rt_gateway->sa_family == AF_LINK ||
- in6_localaddr(&inp->in6p_faddr))) {
- tp->t_flags |= TF_LOCAL;
+ /* Note if the peer is local */
+ if (!(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
+ (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
+ IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
+ rt->rt_gateway->sa_family == AF_LINK ||
+ in6_localaddr(&inp->in6p_faddr))) {
+ tp->t_flags |= TF_LOCAL;
+ }
}
/*
* Caller needs to call RT_UNLOCK(rt).
*/
- return (rt);
+ return rt;
}
-#endif /* INET6 */
#if IPSEC
/* compute ESP/AH header size for TCP, including outer IP header. */
struct mbuf *m;
size_t hdrsiz;
struct ip *ip;
-#if INET6
struct ip6_hdr *ip6 = NULL;
-#endif /* INET6 */
struct tcphdr *th;
- if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
- return (0);
- MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */
- if (!m)
- return (0);
+ if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) {
+ return 0;
+ }
+ MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */
+ if (!m) {
+ return 0;
+ }
-#if INET6
if ((inp->inp_vflag & INP_IPV6) != 0) {
ip6 = mtod(m, struct ip6_hdr *);
th = (struct tcphdr *)(void *)(ip6 + 1);
m->m_pkthdr.len = m->m_len =
- sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
+ sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
tcp_fillheaders(tp, ip6, th);
hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
- } else
-#endif /* INET6 */
- {
+ } else {
ip = mtod(m, struct ip *);
th = (struct tcphdr *)(ip + 1);
m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
}
m_free(m);
- return (hdrsiz);
+ return hdrsiz;
}
#endif /* IPSEC */
-/*
- * Return a pointer to the cached information about the remote host.
- * The cached information is stored in the protocol specific part of
- * the route metrics.
- */
-struct rmxp_tao *
-tcp_gettaocache(struct inpcb *inp)
-{
- struct rtentry *rt;
- struct rmxp_tao *taop;
-
-#if INET6
- if ((inp->inp_vflag & INP_IPV6) != 0)
- rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
- else
-#endif /* INET6 */
- rt = tcp_rtlookup(inp, IFSCOPE_NONE);
-
- /* Make sure this is a host route and is up. */
- if (rt == NULL ||
- (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) {
- /* Route locked during lookup above */
- if (rt != NULL)
- RT_UNLOCK(rt);
- return (NULL);
- }
-
- taop = rmx_taop(rt->rt_rmx);
- /* Route locked during lookup above */
- RT_UNLOCK(rt);
- return (taop);
-}
-
-/*
- * Clear all the TAO cache entries, called from tcp_init.
- *
- * XXX
- * This routine is just an empty one, because we assume that the routing
- * routing tables are initialized at the same time when TCP, so there is
- * nothing in the cache left over.
- */
-static void
-tcp_cleartaocache(void)
-{
-}
-
int
tcp_lock(struct socket *so, int refcount, void *lr)
{
void *lr_saved;
- if (lr == NULL)
+ if (lr == NULL) {
lr_saved = __builtin_return_address(0);
- else
+ } else {
lr_saved = lr;
+ }
retry:
if (so->so_pcb != NULL) {
if (so->so_flags & SOF_MP_SUBFLOW) {
struct mptcb *mp_tp = tptomptp(sototcpcb(so));
- VERIFY(mp_tp);
-
- mpte_lock_assert_notheld(mp_tp->mpt_mpte);
+ struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
- mpte_lock(mp_tp->mpt_mpte);
+ socket_lock(mp_so, refcount);
/*
* Check if we became non-MPTCP while waiting for the lock.
* If yes, we have to retry to grab the right lock.
*/
if (!(so->so_flags & SOF_MP_SUBFLOW)) {
- mpte_unlock(mp_tp->mpt_mpte);
+ socket_unlock(mp_so, refcount);
goto retry;
}
} else {
goto retry;
}
}
- } else {
+ } else {
panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s\n",
so, lr_saved, solockhistory_nr(so));
/* NOTREACHED */
solockhistory_nr(so));
/* NOTREACHED */
}
- if (refcount)
+ if (refcount) {
so->so_usecount++;
+ }
so->lock_lr[so->next_lock_lr] = lr_saved;
- so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
- return (0);
+ so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
+ return 0;
}
int
{
void *lr_saved;
- if (lr == NULL)
+ if (lr == NULL) {
lr_saved = __builtin_return_address(0);
- else
+ } else {
lr_saved = lr;
+ }
#ifdef MORE_TCPLOCK_DEBUG
printf("tcp_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x "
(uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)),
so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
#endif
- if (refcount)
+ if (refcount) {
so->so_usecount--;
+ }
if (so->so_usecount < 0) {
panic("tcp_unlock: so=%p usecount=%x lrh= %s\n",
/* NOTREACHED */
} else {
so->unlock_lr[so->next_unlock_lr] = lr_saved;
- so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
+ so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
if (so->so_flags & SOF_MP_SUBFLOW) {
struct mptcb *mp_tp = tptomptp(sototcpcb(so));
+ struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
- VERIFY(mp_tp);
- mpte_lock_assert_held(mp_tp->mpt_mpte);
+ socket_lock_assert_owned(mp_so);
- mpte_unlock(mp_tp->mpt_mpte);
+ socket_unlock(mp_so, refcount);
} else {
LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
LCK_MTX_ASSERT_OWNED);
lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
}
}
- return (0);
+ return 0;
}
lck_mtx_t *
{
struct inpcb *inp = sotoinpcb(so);
- if (so->so_pcb) {
- if (so->so_usecount < 0)
+ if (so->so_pcb) {
+ if (so->so_usecount < 0) {
panic("tcp_getlock: so=%p usecount=%x lrh= %s\n",
so, so->so_usecount, solockhistory_nr(so));
+ }
if (so->so_flags & SOF_MP_SUBFLOW) {
struct mptcb *mp_tp = tptomptp(sototcpcb(so));
+ struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
- return (mpte_getlock(mp_tp->mpt_mpte, flags));
+ return mp_so->so_proto->pr_getlock(mp_so, flags);
} else {
- return (&inp->inpcb_mtx);
+ return &inp->inpcb_mtx;
}
} else {
panic("tcp_getlock: so=%p NULL so_pcb %s\n",
so, solockhistory_nr(so));
- return (so->so_proto->pr_domain->dom_mtx);
+ return so->so_proto->pr_domain->dom_mtx;
}
}
u_int32_t rcvbuf = sb->sb_hiwat;
struct socket *so = tp->t_inpcb->inp_socket;
- if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so))
+ if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
return;
- /*
- * If message delivery is enabled, do not count
- * unordered bytes in receive buffer towards hiwat
- */
- if (so->so_flags & SOF_ENABLE_MSGS)
- rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes;
+ }
if (tcp_do_autorcvbuf == 1 &&
- tcp_cansbgrow(sb) &&
- (tp->t_flags & TF_SLOWLINK) == 0 &&
- (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 &&
- (rcvbuf - sb->sb_cc) < rcvbufinc &&
- rcvbuf < tcp_autorcvbuf_max &&
- (sb->sb_idealsize > 0 &&
- sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) {
+ tcp_cansbgrow(sb) &&
+ (tp->t_flags & TF_SLOWLINK) == 0 &&
+ (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 &&
+ (rcvbuf - sb->sb_cc) < rcvbufinc &&
+ rcvbuf < tcp_autorcvbuf_max &&
+ (sb->sb_idealsize > 0 &&
+ sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) {
sbreserve(sb,
min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max));
}
int32_t space;
int32_t pending = 0;
+ if (so->so_flags & SOF_MP_SUBFLOW) {
+ /* We still need to grow TCP's buffer to have a BDP-estimate */
+ tcp_sbrcv_grow_rwin(tp, sb);
+
+ return mptcp_sbspace(tptomptp(tp));
+ }
+
tcp_sbrcv_grow_rwin(tp, sb);
/* hiwat might have changed */
rcvbuf = sb->sb_hiwat;
- /*
- * If message delivery is enabled, do not count
- * unordered bytes in receive buffer towards hiwat mark.
- * This value is used to return correct rwnd that does
- * not reflect the extra unordered bytes added to the
- * receive socket buffer.
- */
- if (so->so_flags & SOF_ENABLE_MSGS)
- rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes;
-
space = ((int32_t) imin((rcvbuf - sb->sb_cc),
- (sb->sb_mbmax - sb->sb_mbcnt)));
- if (space < 0)
+ (sb->sb_mbmax - sb->sb_mbcnt)));
+ if (space < 0) {
space = 0;
+ }
#if CONTENT_FILTER
/* Compensate for data being processed by content filters */
pending = cfil_sock_data_space(sb);
#endif /* CONTENT_FILTER */
- if (pending > space)
+ if (pending > space) {
space = 0;
- else
+ } else {
space -= pending;
+ }
/*
* Avoid increasing window size if the current window
* we could break some apps (see rdar://5409343)
*/
- if (space < tp->t_maxseg)
- return (space);
+ if (space < tp->t_maxseg) {
+ return space;
+ }
/* Clip window size for slower link */
- if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0)
- return (imin(space, slowlink_wsize));
+ if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) {
+ return imin(space, slowlink_wsize);
+ }
- return (space);
+ return space;
}
/*
* Checks TCP Segment Offloading capability for a given connection
void
tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp)
{
-#if INET6
struct inpcb *inp;
int isipv6;
-#endif /* INET6 */
+ struct ifnet *tunnel_ifp = NULL;
+#define IFNET_TSO_MASK (IFNET_TSO_IPV6 | IFNET_TSO_IPV4)
+
+ tp->t_flags &= ~TF_TSO;
+
+ if (ifp == NULL) {
+ return;
+ }
+
#if MPTCP
/*
* We can't use TSO if this tcpcb belongs to an MPTCP session.
*/
if (tp->t_mpflags & TMPF_MPTCP_TRUE) {
- tp->t_flags &= ~TF_TSO;
return;
}
#endif
-#if INET6
inp = tp->t_inpcb;
isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
+ /*
+ * We can't use TSO if the TSO capability of the tunnel interface does
+ * not match the capability of another interface known by TCP
+ */
+ if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
+ u_int tunnel_if_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
+
+ if (tunnel_if_index != 0) {
+ ifnet_head_lock_shared();
+ tunnel_ifp = ifindex2ifnet[tunnel_if_index];
+ ifnet_head_done();
+ }
+
+ if (tunnel_ifp == NULL) {
+ return;
+ }
+
+ if ((ifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
+ if (tso_debug > 0) {
+ os_log(OS_LOG_DEFAULT,
+ "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with ifp %s",
+ __func__,
+ ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
+ tunnel_ifp->if_xname, ifp->if_xname);
+ }
+ return;
+ }
+ if (inp->inp_last_outifp != NULL &&
+ (inp->inp_last_outifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
+ if (tso_debug > 0) {
+ os_log(OS_LOG_DEFAULT,
+ "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_last_outifp %s",
+ __func__,
+ ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
+ tunnel_ifp->if_xname, inp->inp_last_outifp->if_xname);
+ }
+ return;
+ }
+ if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp != NULL &&
+ (inp->inp_boundifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) {
+ if (tso_debug > 0) {
+ os_log(OS_LOG_DEFAULT,
+ "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_boundifp %s",
+ __func__,
+ ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport),
+ tunnel_ifp->if_xname, inp->inp_boundifp->if_xname);
+ }
+ return;
+ }
+ }
+
if (isipv6) {
- if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV6)) {
+ if (ifp->if_hwassist & IFNET_TSO_IPV6) {
tp->t_flags |= TF_TSO;
- if (ifp->if_tso_v6_mtu != 0)
+ if (ifp->if_tso_v6_mtu != 0) {
tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
- else
+ } else {
tp->tso_max_segment_size = TCP_MAXWIN;
- } else
- tp->t_flags &= ~TF_TSO;
-
- } else
-#endif /* INET6 */
-
- {
- if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV4)) {
+ }
+ }
+ } else {
+ if (ifp->if_hwassist & IFNET_TSO_IPV4) {
tp->t_flags |= TF_TSO;
- if (ifp->if_tso_v4_mtu != 0)
+ if (ifp->if_tso_v4_mtu != 0) {
tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
- else
+ } else {
tp->tso_max_segment_size = TCP_MAXWIN;
- } else
- tp->t_flags &= ~TF_TSO;
+ }
+ if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
+ tp->tso_max_segment_size -=
+ CLAT46_HDR_EXPANSION_OVERHD;
+ }
+ }
+ }
+
+ if (tso_debug > 1) {
+ os_log(OS_LOG_DEFAULT, "%s: %u > %u TSO %d ifp %s",
+ __func__,
+ ntohs(tp->t_inpcb->inp_lport),
+ ntohs(tp->t_inpcb->inp_fport),
+ (tp->t_flags & TF_TSO) != 0,
+ ifp != NULL ? ifp->if_xname : "<NULL>");
}
}
-#define TIMEVAL_TO_TCPHZ(_tv_) ((_tv_).tv_sec * TCP_RETRANSHZ + \
- (_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC)
+#define TIMEVAL_TO_TCPHZ(_tv_) ((uint32_t)((_tv_).tv_sec * TCP_RETRANSHZ + \
+ (_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC))
/*
* Function to calculate the tcp clock. The tcp clock will get updated
calculate_tcp_clock(void)
{
struct timeval tv = tcp_uptime;
- struct timeval interval = {0, TCP_RETRANSHZ_TO_USEC};
+ struct timeval interval = {.tv_sec = 0, .tv_usec = TCP_RETRANSHZ_TO_USEC};
struct timeval now, hold_now;
uint32_t incr = 0;
* defined by the constant tcp_autorcvbuf_max.
*/
void
-tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so,
- u_int32_t rcvbuf_max)
+tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so)
{
- u_int32_t maxsockbufsize;
- if (!tcp_do_rfc1323) {
- tp->request_r_scale = 0;
- return;
- }
+ uint32_t maxsockbufsize;
- tp->request_r_scale = max(tcp_win_scale, tp->request_r_scale);
+ tp->request_r_scale = MAX((uint8_t)tcp_win_scale, tp->request_r_scale);
maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
- so->so_rcv.sb_hiwat : rcvbuf_max;
+ so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
+ /*
+ * Window scale should not exceed what is needed
+ * to send the max receive window size; adding 1 to TCP_MAXWIN
+ * ensures that.
+ */
while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
- (TCP_MAXWIN << tp->request_r_scale) < maxsockbufsize)
+ ((TCP_MAXWIN + 1) << tp->request_r_scale) < maxsockbufsize) {
tp->request_r_scale++;
- tp->request_r_scale = min(tp->request_r_scale, TCP_MAX_WINSHIFT);
-
+ }
+ tp->request_r_scale = MIN(tp->request_r_scale, TCP_MAX_WINSHIFT);
}
int
-tcp_notsent_lowat_check(struct socket *so) {
+tcp_notsent_lowat_check(struct socket *so)
+{
struct inpcb *inp = sotoinpcb(so);
struct tcpcb *tp = NULL;
int notsent = 0;
+
if (inp != NULL) {
tp = intotcpcb(inp);
}
+ if (tp == NULL) {
+ return 0;
+ }
+
notsent = so->so_snd.sb_cc -
- (tp->snd_nxt - tp->snd_una);
+ (tp->snd_nxt - tp->snd_una);
/*
* When we send a FIN or SYN, not_sent can be negative.
* get an error from send because cantsendmore will be set.
*/
if (notsent <= tp->t_notsent_lowat) {
- return (1);
+ return 1;
}
/*
* maxseg of data to write.
*/
if ((tp->t_flags & TF_NODELAY) == 0 &&
- notsent > 0 && notsent < tp->t_maxseg) {
- return (1);
+ notsent > 0 && notsent < tp->t_maxseg) {
+ return 1;
}
- return (0);
+ return 0;
}
void
tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end)
{
struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
- u_int32_t rxcount = 0;
+ uint16_t rxcount = 0;
- if (SLIST_EMPTY(&tp->t_rxt_segments))
+ if (SLIST_EMPTY(&tp->t_rxt_segments)) {
tp->t_dsack_lastuna = tp->snd_una;
+ }
/*
* First check if there is a segment already existing for this
* sequence space.
*/
SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
- if (SEQ_GT(rxseg->rx_start, start))
+ if (SEQ_GT(rxseg->rx_start, start)) {
break;
+ }
prev = rxseg;
}
next = rxseg;
rxcount = next->rx_count;
}
}
- if (!SEQ_LT(start, end))
+ if (!SEQ_LT(start, end)) {
return;
+ }
rxseg = (struct tcp_rxt_seg *) zalloc(tcp_rxt_seg_zone);
if (rxseg == NULL) {
tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
{
struct tcp_rxt_seg *rxseg;
- if (SLIST_EMPTY(&tp->t_rxt_segments))
- return (NULL);
+ if (SLIST_EMPTY(&tp->t_rxt_segments)) {
+ return NULL;
+ }
SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
if (SEQ_LEQ(rxseg->rx_start, start) &&
- SEQ_GEQ(rxseg->rx_end, end))
- return (rxseg);
- if (SEQ_GT(rxseg->rx_start, start))
+ SEQ_GEQ(rxseg->rx_end, end)) {
+ return rxseg;
+ }
+ if (SEQ_GT(rxseg->rx_start, start)) {
break;
+ }
+ }
+ return NULL;
+}
+
+void
+tcp_rxtseg_set_spurious(struct tcpcb *tp, tcp_seq start, tcp_seq end)
+{
+ struct tcp_rxt_seg *rxseg;
+ if (SLIST_EMPTY(&tp->t_rxt_segments)) {
+ return;
+ }
+
+ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
+ if (SEQ_GEQ(rxseg->rx_start, start) &&
+ SEQ_LEQ(rxseg->rx_end, end)) {
+ /*
+ * If the segment was retransmitted only once, mark it as
+ * spurious.
+ */
+ if (rxseg->rx_count == 1) {
+ rxseg->rx_flags |= TCP_RXT_SPURIOUS;
+ }
+ }
+
+ if (SEQ_GEQ(rxseg->rx_start, end)) {
+ break;
+ }
}
- return (NULL);
+ return;
}
void
boolean_t bad_rexmt;
struct tcp_rxt_seg *rxseg;
- if (SLIST_EMPTY(&tp->t_rxt_segments))
- return (FALSE);
+ if (SLIST_EMPTY(&tp->t_rxt_segments)) {
+ return FALSE;
+ }
/*
* If all of the segments in this window are not cumulatively
* acknowledged, then there can still be undetected packet loss.
* Do not restore congestion window in that case.
*/
- if (SEQ_LT(th_ack, tp->snd_recover))
- return (FALSE);
+ if (SEQ_LT(th_ack, tp->snd_recover)) {
+ return FALSE;
+ }
bad_rexmt = TRUE;
SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
- if (rxseg->rx_count > 1 ||
- !(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
+ if (!(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
bad_rexmt = FALSE;
break;
}
}
- return (bad_rexmt);
+ return bad_rexmt;
}
boolean_t
{
boolean_t dsack_for_tlp = FALSE;
struct tcp_rxt_seg *rxseg;
- if (SLIST_EMPTY(&tp->t_rxt_segments))
- return (FALSE);
+ if (SLIST_EMPTY(&tp->t_rxt_segments)) {
+ return FALSE;
+ }
SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
if (rxseg->rx_count == 1 &&
break;
}
}
- return (dsack_for_tlp);
+ return dsack_for_tlp;
}
u_int32_t
SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
total_size += (rxseg->rx_end - rxseg->rx_start) + 1;
}
- return (total_size);
+ return total_size;
}
void
tcp_get_connectivity_status(struct tcpcb *tp,
- struct tcp_conn_status *connstatus)
+ struct tcp_conn_status *connstatus)
{
- if (tp == NULL || connstatus == NULL)
+ if (tp == NULL || connstatus == NULL) {
return;
+ }
bzero(connstatus, sizeof(*connstatus));
if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) {
if (TCPS_HAVEESTABLISHED(tp->t_state)) {
connstatus->conn_probe_failed = 1;
}
}
- if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX)
+ if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) {
connstatus->read_probe_failed = 1;
+ }
if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL &&
- (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY))
+ (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) {
connstatus->probe_activated = 1;
+ }
}
boolean_t
tfo_enabled(const struct tcpcb *tp)
{
- return ((tp->t_flagsext & TF_FASTOPEN)? TRUE : FALSE);
+ return (tp->t_flagsext & TF_FASTOPEN)? TRUE : FALSE;
}
void
MGETHDR(m, M_WAIT, MT_HEADER);
if (m == NULL) {
- return (NULL);
+ return NULL;
}
m->m_pkthdr.pkt_proto = IPPROTO_TCP;
ip6->ip6_hlim = in6_selecthlim(inp, ifp);
ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
- if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src))
+ if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
ip6->ip6_src.s6_addr16[1] = 0;
- if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst))
+ }
+ if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
ip6->ip6_dst.s6_addr16[1] = 0;
+ }
}
th->th_flags = TH_ACK;
win = tcp_sbspace(tp);
- if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale))
- win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
+ if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) {
+ win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
+ }
th->th_win = htons((u_short) (win >> tp->rcv_scale));
if (is_probe) {
sizeof(struct ip6_hdr), sizeof(struct tcphdr));
}
- return (m);
+ return m;
}
void
if (ifp == NULL || frames_array == NULL ||
frames_array_count == 0 ||
frame_index >= frames_array_count ||
- frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE)
+ frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
return;
+ }
/*
* This function is called outside the regular TCP processing
struct mbuf *m = NULL;
struct tcpcb *tp = intotcpcb(inp);
- if (frame_index >= frames_array_count)
+ if (frame_index >= frames_array_count) {
break;
+ }
if (inp->inp_gencnt > gencnt ||
- inp->inp_state == INPCB_STATE_DEAD)
+ inp->inp_state == INPCB_STATE_DEAD) {
continue;
+ }
if ((so = inp->inp_socket) == NULL ||
- (so->so_state & SS_DEFUNCT))
+ (so->so_state & SS_DEFUNCT)) {
continue;
+ }
/*
* check for keepalive offload flag without socket
* lock to avoid a deadlock
continue;
}
if (inp->inp_ppcb == NULL ||
- in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+ in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
continue;
+ }
socket_lock(so, 1);
/* Release the want count */
if (inp->inp_ppcb == NULL ||
frame->ether_type = (inp->inp_vflag & INP_IPV4) ?
IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 :
IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
- frame->interval = tp->t_keepidle > 0 ? tp->t_keepidle :
- tcp_keepidle;
- frame->keep_cnt = TCP_CONN_KEEPCNT(tp);
- frame->keep_retry = TCP_CONN_KEEPINTVL(tp);
+ frame->interval = (uint16_t)(tp->t_keepidle > 0 ? tp->t_keepidle :
+ tcp_keepidle);
+ frame->keep_cnt = (uint8_t)TCP_CONN_KEEPCNT(tp);
+ frame->keep_retry = (uint16_t)TCP_CONN_KEEPINTVL(tp);
+ if (so->so_options & SO_NOWAKEFROMSLEEP) {
+ frame->flags |=
+ IFNET_KEEPALIVE_OFFLOAD_FLAG_NOWAKEFROMSLEEP;
+ }
frame->local_port = ntohs(inp->inp_lport);
frame->remote_port = ntohs(inp->inp_fport);
frame->local_seq = tp->snd_nxt;
frame->remote_seq = tp->rcv_nxt;
if (inp->inp_vflag & INP_IPV4) {
- frame->length = frame_data_offset +
- sizeof(struct ip) + sizeof(struct tcphdr);
+ ASSERT(frame_data_offset + sizeof(struct ip) + sizeof(struct tcphdr) <= UINT8_MAX);
+ frame->length = (uint8_t)(frame_data_offset +
+ sizeof(struct ip) + sizeof(struct tcphdr));
frame->reply_length = frame->length;
frame->addr_length = sizeof(struct in_addr);
} else {
struct in6_addr *ip6;
- frame->length = frame_data_offset +
- sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
+ ASSERT(frame_data_offset + sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= UINT8_MAX);
+ frame->length = (uint8_t)(frame_data_offset +
+ sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
frame->reply_length = frame->length;
frame->addr_length = sizeof(struct in6_addr);
ip6 = (struct in6_addr *)(void *)frame->local_addr;
bcopy(&inp->in6p_laddr, ip6, sizeof(struct in6_addr));
- if (IN6_IS_SCOPE_EMBED(ip6))
+ if (IN6_IS_SCOPE_EMBED(ip6)) {
ip6->s6_addr16[1] = 0;
+ }
ip6 = (struct in6_addr *)(void *)frame->remote_addr;
bcopy(&inp->in6p_faddr, ip6, sizeof(struct in6_addr));
- if (IN6_IS_SCOPE_EMBED(ip6))
+ if (IN6_IS_SCOPE_EMBED(ip6)) {
ip6->s6_addr16[1] = 0;
+ }
}
/*
*used_frames_count = frame_index;
}
+static bool
+inp_matches_kao_frame(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frame,
+ struct inpcb *inp)
+{
+ if (inp->inp_ppcb == NULL) {
+ return false;
+ }
+ /* Release the want count */
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ return false;
+ }
+ if (inp->inp_last_outifp == NULL ||
+ inp->inp_last_outifp->if_index != ifp->if_index) {
+ return false;
+ }
+ if (frame->local_port != ntohs(inp->inp_lport) ||
+ frame->remote_port != ntohs(inp->inp_fport)) {
+ return false;
+ }
+ if (inp->inp_vflag & INP_IPV4) {
+ if (memcmp(&inp->inp_laddr, frame->local_addr,
+ sizeof(struct in_addr)) != 0 ||
+ memcmp(&inp->inp_faddr, frame->remote_addr,
+ sizeof(struct in_addr)) != 0) {
+ return false;
+ }
+ } else if (inp->inp_vflag & INP_IPV6) {
+ if (memcmp(&inp->inp_laddr, frame->local_addr,
+ sizeof(struct in6_addr)) != 0 ||
+ memcmp(&inp->inp_faddr, frame->remote_addr,
+ sizeof(struct in6_addr)) != 0) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ return true;
+}
+
+int
+tcp_notify_kao_timeout(ifnet_t ifp,
+ struct ifnet_keepalive_offload_frame *frame)
+{
+ struct inpcb *inp = NULL;
+ struct socket *so = NULL;
+ bool found = false;
+
+ /*
+ * Unlock the list before posting event on the matching socket
+ */
+ lck_rw_lock_shared(tcbinfo.ipi_lock);
+
+ LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
+ if ((so = inp->inp_socket) == NULL ||
+ (so->so_state & SS_DEFUNCT)) {
+ continue;
+ }
+ if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
+ continue;
+ }
+ if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
+ continue;
+ }
+ if (inp->inp_ppcb == NULL ||
+ in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
+ continue;
+ }
+ socket_lock(so, 1);
+ if (inp_matches_kao_frame(ifp, frame, inp)) {
+ /*
+ * Keep the matching socket locked
+ */
+ found = true;
+ break;
+ }
+ socket_unlock(so, 1);
+ }
+ lck_rw_done(tcbinfo.ipi_lock);
+
+ if (found) {
+ ASSERT(inp != NULL);
+ ASSERT(so != NULL);
+ ASSERT(so == inp->inp_socket);
+ /*
+ * Drop the TCP connection like tcptimers() does
+ */
+ struct tcpcb *tp = inp->inp_ppcb;
+
+ tcpstat.tcps_keepdrops++;
+ soevent(so,
+ (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
+ tp = tcp_drop(tp, ETIMEDOUT);
+
+ tcpstat.tcps_ka_offload_drops++;
+ os_log_info(OS_LOG_DEFAULT, "%s: dropped lport %u fport %u\n",
+ __func__, frame->local_port, frame->remote_port);
+
+ socket_unlock(so, 1);
+ }
+
+ return 0;
+}
+
errno_t
tcp_notify_ack_id_valid(struct tcpcb *tp, struct socket *so,
u_int32_t notify_id)
{
struct tcp_notify_ack_marker *elm;
- if (so->so_snd.sb_cc == 0)
- return (ENOBUFS);
+ if (so->so_snd.sb_cc == 0) {
+ return ENOBUFS;
+ }
SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
/* Duplicate id is not allowed */
- if (elm->notify_id == notify_id)
- return (EINVAL);
+ if (elm->notify_id == notify_id) {
+ return EINVAL;
+ }
/* Duplicate position is not allowed */
- if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc)
- return (EINVAL);
+ if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) {
+ return EINVAL;
+ }
}
- return (0);
+ return 0;
}
errno_t
struct tcp_notify_ack_marker *nm, *elm = NULL;
struct socket *so = tp->t_inpcb->inp_socket;
- MALLOC(nm, struct tcp_notify_ack_marker *, sizeof (*nm),
+ MALLOC(nm, struct tcp_notify_ack_marker *, sizeof(*nm),
M_TEMP, M_WAIT | M_ZERO);
- if (nm == NULL)
- return (ENOMEM);
+ if (nm == NULL) {
+ return ENOMEM;
+ }
nm->notify_id = notify_id;
nm->notify_snd_una = tp->snd_una + so->so_snd.sb_cc;
SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
- if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una))
+ if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) {
break;
+ }
}
if (elm == NULL) {
SLIST_INSERT_AFTER(elm, nm, notify_next);
}
tp->t_notify_ack_count++;
- return (0);
+ return 0;
}
void
tcp_notify_ack_free(struct tcpcb *tp)
{
struct tcp_notify_ack_marker *elm, *next;
- if (SLIST_EMPTY(&tp->t_notify_ack))
+ if (SLIST_EMPTY(&tp->t_notify_ack)) {
return;
+ }
SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
SLIST_REMOVE(&tp->t_notify_ack, elm, tcp_notify_ack_marker,
struct tcp_notify_ack_complete *retid)
{
struct tcp_notify_ack_marker *elm;
- size_t complete = 0;
+ uint32_t complete = 0;
SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
- if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una))
+ if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
+ ASSERT(complete < UINT32_MAX);
complete++;
- else
+ } else {
break;
+ }
}
retid->notify_pending = tp->t_notify_ack_count - complete;
retid->notify_complete_count = min(TCP_MAX_NOTIFY_ACK, complete);
struct tcp_notify_ack_marker *elm, *next;
SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
- if (i >= retid->notify_complete_count)
+ if (i >= retid->notify_complete_count) {
break;
+ }
if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
retid->notify_complete_id[i++] = elm->notify_id;
SLIST_REMOVE(&tp->t_notify_ack, elm,
if (!SLIST_EMPTY(&tp->t_notify_ack)) {
struct tcp_notify_ack_marker *elm;
elm = SLIST_FIRST(&tp->t_notify_ack);
- if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una))
- return (true);
+ if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
+ return true;
+ }
}
}
- return (false);
+ return false;
}
inline int32_t
so->so_snd.sb_cc > 0) {
int32_t unsent, sent;
sent = tp->snd_max - th_ack;
- if (tp->t_flags & TF_SENTFIN)
+ if (tp->t_flags & TF_SENTFIN) {
sent--;
+ }
unsent = so->so_snd.sb_cc - sent;
- return (unsent);
+ return unsent;
}
- return (0);
+ return 0;
}
#define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \
if (_ipv4_) { \
- ifp->if_ipv4_stat->_stat_++; \
+ ifp->if_ipv4_stat->_stat_++; \
} else { \
- ifp->if_ipv6_stat->_stat_++; \
+ ifp->if_ipv6_stat->_stat_++; \
} \
}
#define FLOW_ECN_ENABLED(_flags_) \
((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON))
-void tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
+void
+tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
struct ifnet *ifp)
{
- if (ifp == NULL || !IF_FULLY_ATTACHED(ifp))
+ if (ifp == NULL || !IF_FULLY_ATTACHED(ifp)) {
return;
+ }
ifnet_lock_shared(ifp);
if (ifs->ecn_flags & TE_SETUPSENT) {
IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop);
}
}
- if (ifs->ecn_fallback_synloss)
+ if (ifs->ecn_fallback_synloss) {
IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss);
- if (ifs->ecn_fallback_droprst)
+ }
+ if (ifs->ecn_fallback_droprst) {
IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst);
- if (ifs->ecn_fallback_droprxmt)
+ }
+ if (ifs->ecn_fallback_droprxmt) {
IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt);
- if (ifs->ecn_fallback_ce)
+ }
+ if (ifs->ecn_fallback_ce) {
IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce);
- if (ifs->ecn_fallback_reorder)
+ }
+ if (ifs->ecn_fallback_reorder) {
IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder);
- if (ifs->ecn_recv_ce > 0)
+ }
+ if (ifs->ecn_recv_ce > 0) {
IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce);
- if (ifs->ecn_recv_ece > 0)
+ }
+ if (ifs->ecn_recv_ece > 0) {
IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece);
+ }
tcp_flow_lim_stats(ifs, &ifp->if_lim_stat);
ifnet_lock_done(ifp);
}
+