X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/39236c6e673c41db228275375ab7fdb0f837b292..490019cf9519204c5fb36b2fba54ceb983bb6b72:/bsd/netinet/tcp_subr.c diff --git a/bsd/netinet/tcp_subr.c b/bsd/netinet/tcp_subr.c index c9a7a6bb7..88d18875a 100644 --- a/bsd/netinet/tcp_subr.c +++ b/bsd/netinet/tcp_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -90,6 +90,7 @@ #include #include +#include #define tcp_minmssoverload fring #define _IP_VHL @@ -116,6 +117,7 @@ #include #include #include +#include #include #if INET6 @@ -134,12 +136,18 @@ #endif #endif /*IPSEC*/ +#if NECP +#include +#endif /* NECP */ + #undef tcp_minmssoverload #if CONFIG_MACF_NET #include #endif /* MAC_NET */ +#include +#include #include #include #include @@ -150,10 +158,6 @@ extern int tcp_lq_overflow; -/* temporary: for testing */ -#if IPSEC -extern int ipsec_bypass; -#endif extern struct tcptimerlist tcp_timer_list; extern struct tcptailq tcp_tw_tailq; @@ -170,6 +174,28 @@ SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, extern int tcp_do_autorcvbuf; +int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int , + struct sysctl_req *); +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, + CTLTYPE_STRING | CTLFLAG_WR, + 0 , 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key"); + +/* Current count of half-open TFO connections */ +int tcp_tfo_halfcnt = 0; + +/* Maximum of half-open TFO connection backlog */ +int tcp_tfo_backlog = 10; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, fastopen_backlog, CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_tfo_backlog, 0, "Backlog queue for half-open TFO connections"); + +int tcp_fastopen = TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_fastopen, 0, "Enable TCP Fastopen (RFC 7413)"); + +int tcp_tfo_fallback_min = 10; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, fastopen_fallback_min, CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_tfo_fallback_min, 0, "Mininum number of trials without TFO when in fallback mode"); + /* * Minimum MSS we accept and use. This prevents DoS attacks where * we are forced to a ridiculous low MSS like 20 and send hundreds @@ -181,10 +207,12 @@ extern int tcp_do_autorcvbuf; int tcp_minmss = TCP_MINMSS; SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); - -static int tcp_do_rfc1323 = 1; -SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); +int tcp_do_rfc1323 = 1; +#if (DEVELOPMENT || DEBUG) +SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1323 , 0, + "Enable rfc1323 (high performance TCP) extensions"); +#endif /* (DEVELOPMENT || DEBUG) */ // Not used static int tcp_do_rfc1644 = 0; @@ -207,15 +235,16 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED, & "Certain ICMP unreachable messages may abort connections in SYN_SENT"); static int tcp_strict_rfc1948 = 0; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW | CTLFLAG_LOCKED, +static int tcp_isn_reseed_interval = 0; +#if (DEVELOPMENT || DEBUG) +SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly"); -static int tcp_isn_reseed_interval = 0; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW | CTLFLAG_LOCKED, +SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); -static int tcp_background_io_enabled = 1; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_io_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_background_io_enabled, 0, "Background IO Enabled"); +#endif /* (DEVELOPMENT || DEBUG) */ int tcp_TCPTV_MIN = 100; /* 100ms minimum RTT */ SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED, @@ -229,31 +258,18 @@ __private_extern__ int tcp_use_randomport = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_use_randomport, 0, "Randomize TCP port numbers"); -extern struct tcp_cc_algo tcp_cc_newreno; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, newreno_sockets, CTLFLAG_RD | CTLFLAG_LOCKED, - &tcp_cc_newreno.num_sockets, 0, "Number of sockets using newreno"); - -extern struct tcp_cc_algo tcp_cc_ledbat; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_sockets, CTLFLAG_RD | CTLFLAG_LOCKED, - &tcp_cc_ledbat.num_sockets, 0, "Number of sockets using background transport"); - __private_extern__ int tcp_win_scale = 3; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED, +SYSCTL_INT(_net_inet_tcp, OID_AUTO, win_scale_factor, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_win_scale, 0, "Window scaling factor"); static void tcp_cleartaocache(void); static void tcp_notify(struct inpcb *, int); -static void tcp_cc_init(void); struct zone *sack_hole_zone; struct zone *tcp_reass_zone; struct zone *tcp_bwmeas_zone; -#if 0 -static unsigned int tcp_mptcp_dsnm_sz; -struct zone *tcp_mptcp_dsnm_zone; -#endif -/* The array containing pointers to currently implemented TCP CC algorithms */ -struct tcp_cc_algo* tcp_cc_algo_list[TCP_CC_ALGO_COUNT]; +struct zone *tcp_rxt_seg_zone; extern int slowlink_wsize; /* window correction for slow links */ extern int path_mtu_discovery; @@ -306,13 +322,76 @@ static lck_grp_t *tcp_uptime_mtx_grp = NULL; /* mutex group definition */ static lck_grp_attr_t *tcp_uptime_mtx_grp_attr = NULL; /* mutex group attributes */ int tcp_notsent_lowat_check(struct socket *so); +static aes_encrypt_ctx tfo_ctx; /* Crypto-context for TFO */ + +void +tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out, size_t blk_size) +{ + u_char in[CCAES_BLOCK_SIZE]; +#if INET6 + int isipv6 = inp->inp_vflag & INP_IPV6; +#endif + + VERIFY(blk_size == CCAES_BLOCK_SIZE); + + bzero(&in[0], CCAES_BLOCK_SIZE); + bzero(&out[0], CCAES_BLOCK_SIZE); + +#if INET6 + if (isipv6) + memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr)); + else +#endif /* INET6 */ + memcpy(in, &inp->inp_faddr, sizeof(struct in_addr)); + + aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx); +} + +__private_extern__ int +tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1, + __unused int arg2, struct sysctl_req *req) +{ + int error = 0; + /* TFO-key is expressed as a string in hex format (+1 to account for \0 char) */ + char keystring[TCP_FASTOPEN_KEYLEN * 2 + 1]; + u_int32_t key[TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)]; + int i; + + /* -1, because newlen is len without the terminating \0 character */ + if (req->newlen != (sizeof(keystring) - 1)) { + error = EINVAL; + goto exit; + } + + /* sysctl_io_string copies keystring into the oldptr of the sysctl_req. + * Make sure everything is zero, to avoid putting garbage in there or + * leaking the stack. + */ + bzero(keystring, sizeof(keystring)); + + error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL); + if (error) + goto exit; + + for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) { + /* We jump over the keystring in 8-character (4 byte in hex) steps */ + if (sscanf(&keystring[i * 8], "%8x", &key[i]) != 1) { + error = EINVAL; + goto exit; + } + } + + aes_encrypt_key128((u_char *)key, &tfo_ctx); + +exit: + return (error); +} int get_inpcb_str_size(void) { return sizeof(struct inpcb); } - int get_tcp_str_size(void) { return sizeof(struct tcpcb); @@ -320,16 +399,51 @@ int get_tcp_str_size(void) int tcp_freeq(struct tcpcb *tp); +static int scale_to_powerof2(int size); + /* - * Initialize TCP congestion control algorithms. - */ + * This helper routine returns one of the following scaled value of size: + * 1. Rounded down power of two value of size if the size value passed as + * argument is not a power of two and the rounded up value overflows. + * OR + * 2. Rounded up power of two value of size if the size value passed as + * argument is not a power of two and the rounded up value does not overflow + * OR + * 3. Same value as argument size if it is already a power of two. + */ +static int scale_to_powerof2(int size) { + /* Handle special case of size = 0 */ + int ret = size ? size : 1; + + if (!powerof2(ret)) { + while(!powerof2(size)) { + /* + * Clear out least significant + * set bit till size is left with + * its highest set bit at which point + * it is rounded down power of two. + */ + size = size & (size -1); + } -void -tcp_cc_init(void) + /* Check for overflow when rounding up */ + if (0 == (size << 1)) { + ret = size; + } else { + ret = size << 1; + } + } + + return ret; +} + +static void +tcp_tfo_init() { - bzero(&tcp_cc_algo_list, sizeof(tcp_cc_algo_list)); - tcp_cc_algo_list[TCP_CC_ALGO_NEWRENO_INDEX] = &tcp_cc_newreno; - tcp_cc_algo_list[TCP_CC_ALGO_BACKGROUND_INDEX] = &tcp_cc_ledbat; + u_char key[TCP_FASTOPEN_KEYLEN]; + + read_random(key, sizeof(key)); + aes_encrypt_key128(key, &tfo_ctx); } /* @@ -363,6 +477,8 @@ tcp_init(struct protosw *pp, struct domain *dp) read_random(&tcp_now, sizeof(tcp_now)); tcp_now = tcp_now & 0x3fffffff; /* Starts tcp internal clock at a random value */ + tcp_tfo_init(); + LIST_INIT(&tcb); tcbinfo.ipi_listhead = &tcb; @@ -384,10 +500,24 @@ tcp_init(struct protosw *pp, struct domain *dp) /* NOTREACHED */ } + if (tcp_tcbhashsize == 0) { + /* Set to default */ + tcp_tcbhashsize = 512; + } + if (!powerof2(tcp_tcbhashsize)) { - printf("WARNING: TCB hash size not a power of 2\n"); - tcp_tcbhashsize = 512; /* safe default */ + int old_hash_size = tcp_tcbhashsize; + tcp_tcbhashsize = scale_to_powerof2(tcp_tcbhashsize); + /* Lower limit of 16 */ + if (tcp_tcbhashsize < 16) { + tcp_tcbhashsize = 16; + } + printf("WARNING: TCB hash size not a power of 2, " + "scaled from %d to %d.\n", + old_hash_size, + tcp_tcbhashsize); } + tcbinfo.ipi_hashbase = hashinit(tcp_tcbhashsize, M_PCB, &tcbinfo.ipi_hashmask); tcbinfo.ipi_porthashbase = hashinit(tcp_tcbhashsize, M_PCB, &tcbinfo.ipi_porthashmask); @@ -397,6 +527,7 @@ tcp_init(struct protosw *pp, struct domain *dp) zone_change(tcbinfo.ipi_zone, Z_EXPAND, TRUE); tcbinfo.ipi_gc = tcp_gc; + tcbinfo.ipi_timer = tcp_itimer; in_pcbinfo_attach(&tcbinfo); str_size = P2ROUNDUP(sizeof(struct sackhole), sizeof(u_int64_t)); @@ -404,9 +535,8 @@ tcp_init(struct protosw *pp, struct domain *dp) zone_change(sack_hole_zone, Z_CALLERACCT, FALSE); zone_change(sack_hole_zone, Z_EXPAND, TRUE); - tcp_reass_maxseg = nmbclusters / 16; str_size = P2ROUNDUP(sizeof(struct tseg_qent), sizeof(u_int64_t)); - tcp_reass_zone = zinit(str_size, (tcp_reass_maxseg + 1) * str_size, + tcp_reass_zone = zinit(str_size, (nmbclusters >> 4) * str_size, 0, "tcp_reass_zone"); if (tcp_reass_zone == NULL) { panic("%s: failed allocating tcp_reass_zone", __func__); @@ -424,6 +554,17 @@ tcp_init(struct protosw *pp, struct domain *dp) zone_change(tcp_bwmeas_zone, Z_CALLERACCT, FALSE); zone_change(tcp_bwmeas_zone, Z_EXPAND, TRUE); + str_size = P2ROUNDUP(sizeof(struct tcp_ccstate), sizeof(u_int64_t)); + tcp_cc_zone = zinit(str_size, 20000 * str_size, 0, "tcp_cc_zone"); + zone_change(tcp_cc_zone, Z_CALLERACCT, FALSE); + zone_change(tcp_cc_zone, Z_EXPAND, TRUE); + + str_size = P2ROUNDUP(sizeof(struct tcp_rxt_seg), sizeof(u_int64_t)); + tcp_rxt_seg_zone = zinit(str_size, 10000 * str_size, 0, + "tcp_rxt_seg_zone"); + zone_change(tcp_rxt_seg_zone, Z_CALLERACCT, FALSE); + zone_change(tcp_rxt_seg_zone, Z_EXPAND, TRUE); + #if INET6 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) #else /* INET6 */ @@ -451,8 +592,6 @@ tcp_init(struct protosw *pp, struct domain *dp) if ((tcp_timer_list.mtx = lck_mtx_alloc_init(tcp_timer_list.mtx_grp, tcp_timer_list.mtx_attr)) == NULL) { panic("failed to allocate memory for tcp_timer_list.mtx\n"); }; - tcp_timer_list.fast_quantum = TCP_FASTTIMER_QUANTUM; - tcp_timer_list.slow_quantum = TCP_SLOWTIMER_QUANTUM; if ((tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL)) == NULL) { panic("failed to allocate call entry 1 in tcp_init\n"); } @@ -465,11 +604,20 @@ tcp_init(struct protosw *pp, struct domain *dp) tcp_uptime_mtx_attr = lck_attr_alloc_init(); tcp_uptime_lock = lck_spin_alloc_init(tcp_uptime_mtx_grp, tcp_uptime_mtx_attr); - /* Initialize TCP congestion control algorithms list */ - tcp_cc_init(); - /* Initialize TCP LRO data structures */ tcp_lro_init(); + + /* Initialize TCP Cache */ + tcp_cache_init(); + + /* + * If more than 60 MB of mbuf pool is available, increase the + * maximum allowed receive and send socket buffer size. + */ + if (nmbclusters > 30720) { + tcp_autorcvbuf_max = 1024 * 1024; + tcp_autosndbuf_max = 1024 * 1024; + } } /* @@ -568,19 +716,10 @@ tcp_maketemplate(tp) * NOTE: If m != NULL, then ti must point to *inside* the mbuf. */ void -tcp_respond( - struct tcpcb *tp, - void *ipgen, - register struct tcphdr *th, - register struct mbuf *m, - tcp_seq ack, - tcp_seq seq, - int flags, - unsigned int ifscope, - unsigned int nocell - ) +tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, + tcp_seq ack, tcp_seq seq, int flags, struct tcp_respond_args *tra) { - register int tlen; + int tlen; int win = 0; struct route *ro = 0; struct route sro; @@ -751,8 +890,14 @@ tcp_respond( if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); #endif + +#if NECP + necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0); +#endif /* NECP */ + #if IPSEC - if (ipsec_bypass == 0 && ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) { + if (tp != NULL && tp->t_inpcb->inp_sp != NULL && + ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) { m_freem(m); return; } @@ -780,13 +925,17 @@ tcp_respond( #if INET6 if (isipv6) { - struct ip6_out_args ip6oa = { ifscope, { 0 }, + struct ip6_out_args ip6oa = { tra->ifscope, { 0 }, IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR, 0 }; - if (ifscope != IFSCOPE_NONE) + if (tra->ifscope != IFSCOPE_NONE) ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; - if (nocell) + if (tra->nocell) ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR; + if (tra->noexpensive) + ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE; + if (tra->awdl_unrestricted) + ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED; (void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa); @@ -801,13 +950,17 @@ tcp_respond( } else #endif /* INET6 */ { - struct ip_out_args ipoa = { ifscope, { 0 }, + struct ip_out_args ipoa = { tra->ifscope, { 0 }, IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR, 0 }; - if (ifscope != IFSCOPE_NONE) + if (tra->ifscope != IFSCOPE_NONE) ipoa.ipoa_flags |= IPOAF_BOUND_IF; - if (nocell) + if (tra->nocell) ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; + if (tra->noexpensive) + ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; + if (tra->awdl_unrestricted) + ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED; if (ro != &sro) { /* Copy the cached route and take an extra reference */ @@ -851,7 +1004,7 @@ tcp_newtcpcb(inp) calculate_tcp_clock(); - if (!so->cached_in_sock_layer) { + if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) { it = (struct inp_tp *)(void *)inp; tp = &it->tcb; } else { @@ -872,6 +1025,7 @@ tcp_newtcpcb(inp) tp->t_flagsext |= TF_SACK_ENABLE; TAILQ_INIT(&tp->snd_holes); + SLIST_INIT(&tp->t_rxt_segments); tp->t_inpcb = inp; /* XXX */ /* * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no @@ -883,15 +1037,18 @@ tcp_newtcpcb(inp) tp->t_rttmin = tcp_TCPTV_MIN; tp->t_rxtcur = TCPTV_RTOBASE; - /* Initialize congestion control algorithm for this connection - * to newreno by default - */ - tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX; - if (CC_ALGO(tp)->init != NULL) { + if (tcp_use_newreno) + /* use newreno by default */ + tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX; + else + tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX; + + tcp_cc_allocate_state(tp); + + if (CC_ALGO(tp)->init != NULL) CC_ALGO(tp)->init(tp); - } - tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; + tp->snd_cwnd = TCP_CC_CWND_INIT_BYTES; tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT; tp->t_rcvtime = tcp_now; @@ -977,6 +1134,51 @@ tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt) } } +static inline void +tcp_update_ecn_perf_stats(struct tcpcb *tp, + struct if_tcp_ecn_perf_stat *stat) +{ + u_int64_t curval, oldval; + struct inpcb *inp = tp->t_inpcb; + stat->total_txpkts += inp->inp_stat->txpackets; + stat->total_rxpkts += inp->inp_stat->rxpackets; + stat->total_rxmitpkts += tp->t_stat.rxmitpkts; + stat->total_oopkts += tp->t_rcvoopack; + stat->total_reorderpkts += (tp->t_reordered_pkts + tp->t_pawsdrop + + tp->t_dsack_sent + tp->t_dsack_recvd); + + /* Average RTT */ + curval = (tp->t_srtt >> TCP_RTT_SHIFT); + if (curval > 0 && tp->t_rttupdated >= 16) { + if (stat->rtt_avg == 0) { + stat->rtt_avg = curval; + } else { + oldval = stat->rtt_avg; + stat->rtt_avg = + ((oldval << 4) - oldval + curval) >> 4; + } + } + + /* RTT variance */ + curval = tp->t_rttvar >> TCP_RTTVAR_SHIFT; + if (curval > 0 && tp->t_rttupdated >= 16) { + if (stat->rtt_var == 0) { + stat->rtt_var = curval; + } else { + oldval = stat->rtt_var; + stat->rtt_var = + ((oldval << 4) - oldval + curval) >> 4; + } + } + + /* Total number of SACK recovery episodes */ + stat->sack_episodes += tp->t_sack_recovery_episode; + + if (inp->inp_socket->so_error == ECONNRESET) + stat->rst_drop++; + return; +} + /* * Close a TCP control block: * discard all space held by the tcp @@ -997,7 +1199,7 @@ tcp_close(tp) int dosavessthresh; /* tcp_close was called previously, bail */ - if ( inp->inp_ppcb == NULL) + if (inp->inp_ppcb == NULL) return(NULL); tcp_canceltimers(tp); @@ -1024,10 +1226,6 @@ tcp_close(tp) DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, struct tcpcb *, tp, int32_t, TCPS_CLOSED); - if (CC_ALGO(tp)->cleanup != NULL) { - CC_ALGO(tp)->cleanup(tp); - } - #if INET6 ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route); #else @@ -1065,13 +1263,10 @@ tcp_close(tp) #endif /* INET6 */ if (ROUTE_UNUSABLE(ro) || SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) { - if (tp->t_state >= TCPS_CLOSE_WAIT) { - DTRACE_TCP4(state__change, - void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, - TCPS_CLOSING); - tp->t_state = TCPS_CLOSING; - } + DTRACE_TCP4(state__change, void, NULL, + struct inpcb *, inp, struct tcpcb *, tp, + int32_t, TCPS_CLOSED); + tp->t_state = TCPS_CLOSED; goto no_valid_rt; } @@ -1167,11 +1362,96 @@ no_valid_rt: /* free the reassembly queue, if any */ (void) tcp_freeq(tp); + /* Collect ECN related statistics */ + if (tp->ecn_flags & TE_SETUPSENT) { + if (tp->ecn_flags & TE_CLIENT_SETUP) { + INP_INC_IFNET_STAT(inp, ecn_client_setup); + if (TCP_ECN_ENABLED(tp)) { + INP_INC_IFNET_STAT(inp, + ecn_client_success); + } else if (tp->ecn_flags & TE_LOST_SYN) { + INP_INC_IFNET_STAT(inp, ecn_syn_lost); + } else { + INP_INC_IFNET_STAT(inp, + ecn_peer_nosupport); + } + } else { + INP_INC_IFNET_STAT(inp, ecn_server_setup); + if (TCP_ECN_ENABLED(tp)) { + INP_INC_IFNET_STAT(inp, + ecn_server_success); + } else if (tp->ecn_flags & TE_LOST_SYNACK) { + INP_INC_IFNET_STAT(inp, + ecn_synack_lost); + } else { + INP_INC_IFNET_STAT(inp, + ecn_peer_nosupport); + } + } + } else { + INP_INC_IFNET_STAT(inp, ecn_off_conn); + } + if (TCP_ECN_ENABLED(tp)) { + if (tp->ecn_flags & TE_RECV_ECN_CE) { + tcpstat.tcps_ecn_conn_recv_ce++; + INP_INC_IFNET_STAT(inp, ecn_conn_recv_ce); + } + if (tp->ecn_flags & TE_RECV_ECN_ECE) { + tcpstat.tcps_ecn_conn_recv_ece++; + INP_INC_IFNET_STAT(inp, ecn_conn_recv_ece); + } + if (tp->ecn_flags & (TE_RECV_ECN_CE | TE_RECV_ECN_ECE)) { + if (tp->t_stat.txretransmitbytes > 0 || + tp->t_stat.rxoutoforderbytes > 0) { + tcpstat.tcps_ecn_conn_pl_ce++; + INP_INC_IFNET_STAT(inp, ecn_conn_plce); + } else { + tcpstat.tcps_ecn_conn_nopl_ce++; + INP_INC_IFNET_STAT(inp, ecn_conn_noplce); + } + } else { + if (tp->t_stat.txretransmitbytes > 0 || + tp->t_stat.rxoutoforderbytes > 0) { + tcpstat.tcps_ecn_conn_plnoce++; + INP_INC_IFNET_STAT(inp, ecn_conn_plnoce); + } + } + } + + /* Aggregate performance stats */ + if (inp->inp_last_outifp != NULL && !(tp->t_flags & TF_LOCAL)) { + struct ifnet *ifp = inp->inp_last_outifp; + ifnet_lock_shared(ifp); + if ((ifp->if_refflags & (IFRF_ATTACHED | IFRF_DETACHING)) == + IFRF_ATTACHED) { + if (inp->inp_vflag & INP_IPV6) { + ifp->if_ipv6_stat->timestamp = net_uptime(); + if (TCP_ECN_ENABLED(tp)) { + tcp_update_ecn_perf_stats(tp, + &ifp->if_ipv6_stat->ecn_on); + } else { + tcp_update_ecn_perf_stats(tp, + &ifp->if_ipv6_stat->ecn_off); + } + } else { + ifp->if_ipv4_stat->timestamp = net_uptime(); + if (TCP_ECN_ENABLED(tp)) { + tcp_update_ecn_perf_stats(tp, + &ifp->if_ipv4_stat->ecn_on); + } else { + tcp_update_ecn_perf_stats(tp, + &ifp->if_ipv4_stat->ecn_off); + } + } + } + ifnet_lock_done(ifp); + } + tcp_free_sackholes(tp); if (tp->t_bwmeas != NULL) { tcp_bwmeas_free(tp); } - + tcp_rxtseg_clean(tp); /* Free the packet list */ if (tp->t_pktlist_head != NULL) m_freem_list(tp->t_pktlist_head); @@ -1179,12 +1459,19 @@ no_valid_rt: #if MPTCP /* Clear MPTCP state */ + if ((so->so_flags & SOF_MPTCP_TRUE) || + (so->so_flags & SOF_MP_SUBFLOW)) { + soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_DELETEOK)); + } tp->t_mpflags = 0; + tp->t_mptcb = NULL; #endif /* MPTCP */ - if (so->cached_in_sock_layer) + if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) inp->inp_saved_ppcb = (caddr_t) tp; + tp->t_state = TCPS_CLOSED; + /* Issue a wakeup before detach so that we don't miss * a wakeup */ @@ -1194,12 +1481,41 @@ no_valid_rt: * Clean up any LRO state */ if (tp->t_flagsext & TF_LRO_OFFLOADED) { - tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr, - inp->inp_lport, - inp->inp_fport); + tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr, + inp->inp_lport, inp->inp_fport); tp->t_flagsext &= ~TF_LRO_OFFLOADED; } - tp->t_state = TCPS_CLOSED; + + /* + * If this is a socket that does not want to wakeup the device + * for it's traffic, the application might need to know that the + * socket is closed, send a notification. + */ + if ((so->so_options & SO_NOWAKEFROMSLEEP) && + inp->inp_state != INPCB_STATE_DEAD && + !(inp->inp_flags2 & INP2_TIMEWAIT)) + socket_post_kev_msg_closed(so); + + if (CC_ALGO(tp)->cleanup != NULL) { + CC_ALGO(tp)->cleanup(tp); + } + + if (tp->t_ccstate != NULL) { + zfree(tcp_cc_zone, tp->t_ccstate); + tp->t_ccstate = NULL; + } + tp->tcp_cc_index = TCP_CC_ALGO_NONE; + + /* Can happen if we close the socket before receiving the third ACK */ + if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) { + OSDecrementAtomic(&tcp_tfo_halfcnt); + + /* Panic if something has gone terribly wrong. */ + VERIFY(tcp_tfo_halfcnt >= 0); + + tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID; + } + #if INET6 if (SOCK_CHECK_DOM(so, PF_INET6)) in6_pcbdetach(inp); @@ -1210,7 +1526,8 @@ no_valid_rt: /* Call soisdisconnected after detach because it might unlock the socket */ soisdisconnected(so); tcpstat.tcps_closed++; - KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, + tcpstat.tcps_closed, 0, 0, 0, 0); return(NULL); } @@ -1226,45 +1543,50 @@ tcp_freeq(tp) LIST_REMOVE(q, tqe_q); m_freem(q->tqe_m); zfree(tcp_reass_zone, q); - tcp_reass_qsize--; rv = 1; } + tp->t_reassqlen = 0; return (rv); } + +/* + * Walk the tcpbs, if existing, and flush the reassembly queue, + * if there is one when do_tcpdrain is enabled + * Also defunct the extended background idle socket + * Do it next time if the pcbinfo lock is in use + */ void tcp_drain() { - if (do_tcpdrain) - { - struct inpcb *inp; - struct tcpcb *tp; - /* - * Walk the tcpbs, if existing, and flush the reassembly queue, - * if there is one... - * Do it next time if the pcbinfo lock is in use - */ - if (!lck_rw_try_lock_exclusive(tcbinfo.ipi_lock)) - return; + struct inpcb *inp; + struct tcpcb *tp; - LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) { - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != - WNT_STOPUSING) { - tcp_lock(inp->inp_socket, 1, 0); - if (in_pcb_checkstate(inp, WNT_RELEASE, 1) - == WNT_STOPUSING) { - /* lost a race, try the next one */ - tcp_unlock(inp->inp_socket, 1, 0); - continue; - } - tp = intotcpcb(inp); - tcp_freeq(tp); + if (!lck_rw_try_lock_exclusive(tcbinfo.ipi_lock)) + return; + + LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) { + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != + WNT_STOPUSING) { + tcp_lock(inp->inp_socket, 1, 0); + if (in_pcb_checkstate(inp, WNT_RELEASE, 1) + == WNT_STOPUSING) { + /* lost a race, try the next one */ tcp_unlock(inp->inp_socket, 1, 0); - } - } - lck_rw_done(tcbinfo.ipi_lock); + continue; + } + tp = intotcpcb(inp); + + if (do_tcpdrain) + tcp_freeq(tp); + so_drain_extended_bk_idle(inp->inp_socket); + + tcp_unlock(inp->inp_socket, 1, 0); + } } + lck_rw_done(tcbinfo.ipi_lock); + } /* @@ -1341,16 +1663,16 @@ tcp_bwmeas_free(struct tcpcb* tp) static void tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp) { - int i; - - otp->t_segq = (u_int32_t)(uintptr_t)tp->t_segq.lh_first; + otp->t_segq = (uint32_t)VM_KERNEL_ADDRPERM(tp->t_segq.lh_first); otp->t_dupacks = tp->t_dupacks; - for (i = 0; i < TCPT_NTIMERS_EXT; i++) - otp->t_timer[i] = tp->t_timer[i]; - otp->t_inpcb = (_TCPCB_PTR(struct inpcb *))(uintptr_t)tp->t_inpcb; + otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT]; + otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST]; + otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP]; + otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL]; + otp->t_inpcb = (_TCPCB_PTR(struct inpcb *))VM_KERNEL_ADDRPERM(tp->t_inpcb); otp->t_state = tp->t_state; otp->t_flags = tp->t_flags; - otp->t_force = tp->t_force; + otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0; otp->snd_una = tp->snd_una; otp->snd_max = tp->snd_max; otp->snd_nxt = tp->snd_nxt; @@ -1389,8 +1711,8 @@ tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp) otp->ts_recent = tp->ts_recent; otp->ts_recent_age = tp->ts_recent_age; otp->last_ack_sent = tp->last_ack_sent; - otp->cc_send = tp->cc_send; - otp->cc_recv = tp->cc_recv; + otp->cc_send = 0; + otp->cc_recv = 0; otp->snd_recover = tp->snd_recover; otp->snd_cwnd_prev = tp->snd_cwnd_prev; otp->snd_ssthresh_prev = tp->snd_ssthresh_prev; @@ -1517,22 +1839,23 @@ tcp_pcblist SYSCTL_HANDLER_ARGS return error; } -SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, +SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); static void tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp) { - int i; - - otp->t_segq = (u_int32_t)(uintptr_t)tp->t_segq.lh_first; + otp->t_segq = (uint32_t)VM_KERNEL_ADDRPERM(tp->t_segq.lh_first); otp->t_dupacks = tp->t_dupacks; - for (i = 0; i < TCPT_NTIMERS_EXT; i++) - otp->t_timer[i] = tp->t_timer[i]; + otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT]; + otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST]; + otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP]; + otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL]; otp->t_state = tp->t_state; otp->t_flags = tp->t_flags; - otp->t_force = tp->t_force; + otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0; otp->snd_una = tp->snd_una; otp->snd_max = tp->snd_max; otp->snd_nxt = tp->snd_nxt; @@ -1571,8 +1894,8 @@ tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp) otp->ts_recent = tp->ts_recent; otp->ts_recent_age = tp->ts_recent_age; otp->last_ack_sent = tp->last_ack_sent; - otp->cc_send = tp->cc_send; - otp->cc_recv = tp->cc_recv; + otp->cc_send = 0; + otp->cc_recv = 0; otp->snd_recover = tp->snd_recover; otp->snd_cwnd_prev = tp->snd_cwnd_prev; otp->snd_ssthresh_prev = tp->snd_ssthresh_prev; @@ -1664,7 +1987,7 @@ tcp_pcblist64 SYSCTL_HANDLER_ARGS bzero(&xt, sizeof(xt)); xt.xt_len = sizeof xt; inpcb_to_xinpcb64(inp, &xt.xt_inpcb); - xt.xt_inpcb.inp_ppcb = (u_int64_t)(uintptr_t)inp->inp_ppcb; + xt.xt_inpcb.inp_ppcb = (uint64_t)VM_KERNEL_ADDRPERM(inp->inp_ppcb); if (inp->inp_ppcb != NULL) tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb, &xt); if (inp->inp_socket) @@ -1692,7 +2015,8 @@ tcp_pcblist64 SYSCTL_HANDLER_ARGS return error; } -SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections"); @@ -1708,15 +2032,17 @@ tcp_pcblist_n SYSCTL_HANDLER_ARGS } -SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections"); __private_extern__ void -tcp_get_ports_used(uint32_t ifindex, int protocol, uint32_t wildcardok, +tcp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags, bitstr_t *bitfield) { - inpcb_get_ports_used(ifindex, protocol, wildcardok, bitfield, &tcbinfo); + inpcb_get_ports_used(ifindex, protocol, flags, + bitfield, &tcbinfo); } __private_extern__ uint32_t @@ -1749,6 +2075,9 @@ tcp_ctlinput(cmd, sa, vip) if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) return; + if ((unsigned)cmd >= PRC_NCMDS) + return; + if (cmd == PRC_MSGSIZE) notify = tcp_mtudisc; else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || @@ -1762,7 +2091,7 @@ tcp_ctlinput(cmd, sa, vip) /* Source quench is deprecated */ else if (cmd == PRC_QUENCH) return; - else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0) + else if (inetctlerrmap[cmd] == 0) return; if (ip) { struct tcphdr th; @@ -1867,10 +2196,12 @@ tcp6_ctlinput(cmd, sa, d) sa->sa_len != sizeof(struct sockaddr_in6)) return; + if ((unsigned)cmd >= PRC_NCMDS) + return; + if (cmd == PRC_MSGSIZE) notify = tcp_mtudisc; - else if (!PRC_IS_REDIRECT(cmd) && - ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) + else if (!PRC_IS_REDIRECT(cmd) && (inet6ctlerrmap[cmd] == 0)) return; /* Source quench is deprecated */ else if (cmd == PRC_QUENCH) @@ -2053,6 +2384,7 @@ tcp_mtudisc( struct socket *so = inp->inp_socket; int offered; int mss; + u_int32_t mtu; #if INET6 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; #endif /* INET6 */ @@ -2078,7 +2410,17 @@ tcp_mtudisc( } taop = rmx_taop(rt->rt_rmx); offered = taop->tao_mssopt; - mss = rt->rt_rmx.rmx_mtu - + mtu = rt->rt_rmx.rmx_mtu; + + /* Route locked during lookup above */ + RT_UNLOCK(rt); + +#if NECP + // Adjust MTU if necessary. + mtu = necp_socket_get_effective_mtu(inp, mtu); +#endif /* NECP */ + + mss = mtu - #if INET6 (isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : @@ -2089,9 +2431,6 @@ tcp_mtudisc( #endif /* INET6 */ ; - /* Route locked during lookup above */ - RT_UNLOCK(rt); - if (offered) mss = min(mss, offered); /* @@ -2223,10 +2562,13 @@ tcp_rtlookup(inp, input_ifscope) somultipages(inp->inp_socket, (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES)); tcp_set_tso(tp, rt->rt_ifp); + soif2kcl(inp->inp_socket, + (rt->rt_ifp->if_eflags & IFEF_2KCL)); + tcp_set_ecn(tp, rt->rt_ifp); } /* Note if the peer is local */ - if (rt != NULL && + if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) && (rt->rt_gateway->sa_family == AF_LINK || rt->rt_ifp->if_flags & IFF_LOOPBACK || in_localaddr(inp->inp_faddr))) { @@ -2327,10 +2669,13 @@ tcp_rtlookup6(inp, input_ifscope) somultipages(inp->inp_socket, (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES)); tcp_set_tso(tp, rt->rt_ifp); + soif2kcl(inp->inp_socket, + (rt->rt_ifp->if_eflags & IFEF_2KCL)); + tcp_set_ecn(tp, rt->rt_ifp); } /* Note if the peer is local */ - if (rt != NULL && + if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) && (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) || IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) || rt->rt_gateway->sa_family == AF_LINK || @@ -2524,15 +2869,17 @@ tcp_getlock( } } -/* Determine if we can grow the recieve socket buffer to avoid sending +/* + * Determine if we can grow the recieve socket buffer to avoid sending * a zero window update to the peer. We allow even socket buffers that * have fixed size (set by the application) to grow if the resource * constraints are met. They will also be trimmed after the application * reads data. */ static void -tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb) { - u_int32_t rcvbufinc = tp->t_maxseg << tcp_autorcvbuf_inc_shift; +tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb) +{ + u_int32_t rcvbufinc = tp->t_maxseg << 4; u_int32_t rcvbuf = sb->sb_hiwat; struct socket *so = tp->t_inpcb->inp_socket; @@ -2546,9 +2893,13 @@ tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb) { if (tcp_do_autorcvbuf == 1 && tcp_cansbgrow(sb) && (tp->t_flags & TF_SLOWLINK) == 0 && + (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 && (rcvbuf - sb->sb_cc) < rcvbufinc && - (rcvbuf < tcp_autorcvbuf_max)) { - sbreserve(sb, (sb->sb_hiwat + rcvbufinc)); + rcvbuf < tcp_autorcvbuf_max && + (sb->sb_idealsize > 0 && + sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) { + sbreserve(sb, + min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max)); } } @@ -2559,6 +2910,7 @@ tcp_sbspace(struct tcpcb *tp) u_int32_t rcvbuf = sb->sb_hiwat; int32_t space; struct socket *so = tp->t_inpcb->inp_socket; + int32_t pending = 0; /* * If message delivery is enabled, do not count @@ -2577,6 +2929,15 @@ tcp_sbspace(struct tcpcb *tp) if (space < 0) space = 0; +#if CONTENT_FILTER + /* Compensate for data being processed by content filters */ + pending = cfil_sock_data_space(sb); +#endif /* CONTENT_FILTER */ + if (pending > space) + space = 0; + else + space -= pending; + /* Avoid increasing window size if the current window * is already very low, we could be in "persist" mode and * we could break some apps (see rdar://5409343) @@ -2593,11 +2954,16 @@ tcp_sbspace(struct tcpcb *tp) return space; } /* - * Checks TCP Segment Offloading capability for a given connection and interface pair. + * Checks TCP Segment Offloading capability for a given connection + * and interface pair. */ void tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp) { +#if INET6 + struct inpcb *inp; + int isipv6; +#endif /* INET6 */ #if MPTCP /* * We can't use TSO if this tcpcb belongs to an MPTCP session. @@ -2608,8 +2974,8 @@ tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp) } #endif #if INET6 - struct inpcb *inp = tp->t_inpcb; - int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; + inp = tp->t_inpcb; + isipv6 = (inp->inp_vflag & INP_IPV6) != 0; if (isipv6) { if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV6)) { @@ -2668,10 +3034,10 @@ calculate_tcp_clock() /* time to update the clock */ lck_spin_lock(tcp_uptime_lock); if (timevalcmp(&tcp_uptime, &now, >=)) { - /* clock got updated while we were waiting for the lock */ + /* clock got updated while waiting for the lock */ lck_spin_unlock(tcp_uptime_lock); return; - } + } microuptime(&now); hold_now = now; @@ -2697,6 +3063,10 @@ calculate_tcp_clock() void tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so) { u_int32_t maxsockbufsize; + if (!tcp_do_rfc1323) { + tp->request_r_scale = 0; + return; + } tp->request_r_scale = max(tcp_win_scale, tp->request_r_scale); maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ? @@ -2741,5 +3111,197 @@ tcp_notsent_lowat_check(struct socket *so) { return(0); } +void +tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end) { + struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL; + u_int32_t rxcount = 0; + + if (SLIST_EMPTY(&tp->t_rxt_segments)) + tp->t_dsack_lastuna = tp->snd_una; + /* + * First check if there is a segment already existing for this + * sequence space. + */ + + SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { + if (SEQ_GT(rxseg->rx_start, start)) + break; + prev = rxseg; + } + next = rxseg; + + /* check if prev seg is for this sequence */ + if (prev != NULL && SEQ_LEQ(prev->rx_start, start) && + SEQ_GEQ(prev->rx_end, end)) { + prev->rx_count++; + return; + } + + /* + * There are a couple of possibilities at this point. + * 1. prev overlaps with the beginning of this sequence + * 2. next overlaps with the end of this sequence + * 3. there is no overlap. + */ + + if (prev != NULL && SEQ_GT(prev->rx_end, start)) { + if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) { + start = prev->rx_end + 1; + prev->rx_count++; + } else { + prev->rx_end = (start - 1); + rxcount = prev->rx_count; + } + } + + if (next != NULL && SEQ_LT(next->rx_start, end)) { + if (SEQ_LEQ(next->rx_end, end)) { + end = next->rx_start - 1; + next->rx_count++; + } else { + next->rx_start = end + 1; + rxcount = next->rx_count; + } + } + if (!SEQ_LT(start, end)) + return; + + rxseg = (struct tcp_rxt_seg *) zalloc(tcp_rxt_seg_zone); + if (rxseg == NULL) { + return; + } + bzero(rxseg, sizeof(*rxseg)); + rxseg->rx_start = start; + rxseg->rx_end = end; + rxseg->rx_count = rxcount + 1; + + if (prev != NULL) { + SLIST_INSERT_AFTER(prev, rxseg, rx_link); + } else { + SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link); + } + return; +} + +struct tcp_rxt_seg * +tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end) +{ + struct tcp_rxt_seg *rxseg; + if (SLIST_EMPTY(&tp->t_rxt_segments)) + return (NULL); + + SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { + if (SEQ_LEQ(rxseg->rx_start, start) && + SEQ_GEQ(rxseg->rx_end, end)) + return (rxseg); + if (SEQ_GT(rxseg->rx_start, start)) + break; + } + return (NULL); +} + +void +tcp_rxtseg_clean(struct tcpcb *tp) +{ + struct tcp_rxt_seg *rxseg, *next; + + SLIST_FOREACH_SAFE(rxseg, &tp->t_rxt_segments, rx_link, next) { + SLIST_REMOVE(&tp->t_rxt_segments, rxseg, + tcp_rxt_seg, rx_link); + zfree(tcp_rxt_seg_zone, rxseg); + } + tp->t_dsack_lastuna = tp->snd_max; +} + +boolean_t +tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack) +{ + boolean_t bad_rexmt; + struct tcp_rxt_seg *rxseg; + + if (SLIST_EMPTY(&tp->t_rxt_segments)) + return (FALSE); + + /* + * If all of the segments in this window are not cumulatively + * acknowledged, then there can still be undetected packet loss. + * Do not restore congestion window in that case. + */ + if (SEQ_LT(th_ack, tp->snd_recover)) + return (FALSE); + + bad_rexmt = TRUE; + SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { + if (rxseg->rx_count > 1 || + !(rxseg->rx_flags & TCP_RXT_SPURIOUS)) { + bad_rexmt = FALSE; + break; + } + } + return (bad_rexmt); +} + +boolean_t +tcp_rxtseg_dsack_for_tlp(struct tcpcb *tp) +{ + boolean_t dsack_for_tlp = FALSE; + struct tcp_rxt_seg *rxseg; + if (SLIST_EMPTY(&tp->t_rxt_segments)) + return (FALSE); + + SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { + if (rxseg->rx_count == 1 && + SLIST_NEXT(rxseg,rx_link) == NULL && + (rxseg->rx_flags & TCP_RXT_DSACK_FOR_TLP)) { + dsack_for_tlp = TRUE; + break; + } + } + return (dsack_for_tlp); +} + +u_int32_t +tcp_rxtseg_total_size(struct tcpcb *tp) { + struct tcp_rxt_seg *rxseg; + u_int32_t total_size = 0; + + SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { + total_size += (rxseg->rx_end - rxseg->rx_start) + 1; + } + return (total_size); +} + +void +tcp_get_connectivity_status(struct tcpcb *tp, + struct tcp_conn_status *connstatus) +{ + if (tp == NULL || connstatus == NULL) + return; + bzero(connstatus, sizeof(*connstatus)); + if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) { + if (TCPS_HAVEESTABLISHED(tp->t_state)) { + connstatus->write_probe_failed = 1; + } else { + connstatus->conn_probe_failed = 1; + } + } + if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) + connstatus->read_probe_failed = 1; + if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL + && (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) + connstatus->probe_activated = 1; + return; +} + +boolean_t +tfo_enabled(const struct tcpcb *tp) +{ + return !!(tp->t_flagsext & TF_FASTOPEN); +} + +void +tcp_disable_tfo(struct tcpcb *tp) +{ + tp->t_flagsext &= ~TF_FASTOPEN; +} -/* DSEP Review Done pl-20051213-v02 @3253,@3391,@3400 */