/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/protosw.h>
#include <sys/random.h>
#include <sys/syslog.h>
+#include <sys/mcache.h>
#include <kern/locks.h>
#include <kern/zalloc.h>
#include <netinet/tcp_seq.h>
#include <netinet/tcp_timer.h>
#include <netinet/tcp_var.h>
+#include <netinet/tcp_cc.h>
+#include <kern/thread_call.h>
+
#if INET6
#include <netinet6/tcp6_var.h>
#endif
#include <libkern/crypto/md5.h>
#include <sys/kdebug.h>
+#include <mach/sdt.h>
+
+#include <netinet/lro_ext.h>
#define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
#endif
int tcp_mssdflt = TCP_MSS;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
#if INET6
int tcp_v6mssdflt = TCP6_MSS;
SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
- CTLFLAG_RW, &tcp_v6mssdflt , 0,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_v6mssdflt , 0,
"Default TCP Maximum Segment Size for IPv6");
#endif
+extern int tcp_do_autorcvbuf;
+
/*
* Minimum MSS we accept and use. This prevents DoS attacks where
* we are forced to a ridiculous low MSS like 20 and send hundreds
* checking. This setting prevents us from sending too small packets.
*/
int tcp_minmss = TCP_MINMSS;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
/*
#else
__private_extern__ int tcp_minmssoverload = 0;
#endif
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
"be under the MINMSS Size");
static int tcp_do_rfc1323 = 1;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
+// Not used
static int tcp_do_rfc1644 = 0;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
static int do_tcpdrain = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW | CTLFLAG_LOCKED, &do_tcpdrain, 0,
"Enable tcp_drain routine for extra help when low on mbufs");
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
&tcbinfo.ipi_count, 0, "Number of active PCBs");
static int icmp_may_rst = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp_may_rst, 0,
"Certain ICMP unreachable messages may abort connections in SYN_SENT");
static int tcp_strict_rfc1948 = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
static int tcp_isn_reseed_interval = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
static int tcp_background_io_enabled = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_io_enabled, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_io_enabled, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_background_io_enabled, 0, "Background IO Enabled");
-int tcp_TCPTV_MIN = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_min, CTLFLAG_RW,
+int tcp_TCPTV_MIN = 100; /* 100ms minimum RTT */
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_TCPTV_MIN, 0, "min rtt value allowed");
+int tcp_rexmt_slop = TCPTV_REXMTSLOP;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmt_slop, CTLFLAG_RW,
+ &tcp_rexmt_slop, 0, "Slop added to retransmit timeout");
+
__private_extern__ int tcp_use_randomport = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, randomize_ports, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_use_randomport, 0, "Randomize TCP port numbers");
+extern struct tcp_cc_algo tcp_cc_newreno;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, newreno_sockets, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &tcp_cc_newreno.num_sockets, 0, "Number of sockets using newreno");
+
+extern struct tcp_cc_algo tcp_cc_ledbat;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_sockets, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &tcp_cc_ledbat.num_sockets, 0, "Number of sockets using background transport");
+
+__private_extern__ int tcp_win_scale = 3;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_win_scale, 0, "Window scaling factor");
+
static void tcp_cleartaocache(void);
static void tcp_notify(struct inpcb *, int);
+static void tcp_cc_init(void);
+
struct zone *sack_hole_zone;
+struct zone *tcp_reass_zone;
+struct zone *tcp_bwmeas_zone;
+
+/* The array containing pointers to currently implemented TCP CC algorithms */
+struct tcp_cc_algo* tcp_cc_algo_list[TCP_CC_ALGO_COUNT];
-extern unsigned int total_mb_cnt;
-extern unsigned int total_cl_cnt;
-extern int sbspace_factor;
-extern int tcp_sockthreshold;
extern int slowlink_wsize; /* window correction for slow links */
extern int path_mtu_discovery;
+extern u_int32_t tcp_autorcvbuf_max;
+extern u_int32_t tcp_autorcvbuf_inc_shift;
+static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb);
+
+#define TCP_BWMEAS_BURST_MINSIZE 6
+#define TCP_BWMEAS_BURST_MAXSIZE 25
+
+static uint32_t bwmeas_elm_size;
/*
* Target size of TCP PCB hash tables. Must be a power of two.
#endif
__private_extern__ int tcp_tcbhashsize = TCBHASHSIZE;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED,
&tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
/*
* to be changed, eventually, for greater efficiency).
*/
#define ALIGNMENT 32
-#define ALIGNM1 (ALIGNMENT - 1)
struct inp_tp {
- union {
- struct inpcb inp;
- char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
- } inp_tp_u;
- struct tcpcb tcb;
+ struct inpcb inp;
+ struct tcpcb tcb __attribute__((aligned(ALIGNMENT)));
};
#undef ALIGNMENT
-#undef ALIGNM1
extern struct inpcbhead time_wait_slots[];
-extern u_int32_t *delack_bitmask;
+extern struct tcptimerlist tcp_timer_list;
int get_inpcb_str_size(void);
int get_tcp_str_size(void);
static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
+static lck_attr_t *tcp_uptime_mtx_attr = NULL; /* mutex attributes */
+static lck_grp_t *tcp_uptime_mtx_grp = NULL; /* mutex group definition */
+static lck_grp_attr_t *tcp_uptime_mtx_grp_attr = NULL; /* mutex group attributes */
+int tcp_notsent_lowat_check(struct socket *so);
+
+
int get_inpcb_str_size(void)
{
return sizeof(struct inpcb);
int tcp_freeq(struct tcpcb *tp);
+/*
+ * Initialize TCP congestion control algorithms.
+ */
+
+void
+tcp_cc_init(void)
+{
+ bzero(&tcp_cc_algo_list, sizeof(tcp_cc_algo_list));
+ tcp_cc_algo_list[TCP_CC_ALGO_NEWRENO_INDEX] = &tcp_cc_newreno;
+ tcp_cc_algo_list[TCP_CC_ALGO_BACKGROUND_INDEX] = &tcp_cc_ledbat;
+}
/*
* Tcp initialization
tcp_keepintvl = TCPTV_KEEPINTVL;
tcp_maxpersistidle = TCPTV_KEEP_IDLE;
tcp_msl = TCPTV_MSL;
- read_random(&tcp_now, sizeof(tcp_now));
- tcp_now = tcp_now & 0x3fffffff; /* Starts tcp internal 100ms clock at a random value */
+ microuptime(&tcp_uptime);
+ read_random(&tcp_now, sizeof(tcp_now));
+ tcp_now = tcp_now & 0x3fffffff; /* Starts tcp internal clock at a random value */
LIST_INIT(&tcb);
tcbinfo.listhead = &tcb;
tcbinfo.hashbase = hashinit(tcp_tcbhashsize, M_PCB, &tcbinfo.hashmask);
tcbinfo.porthashbase = hashinit(tcp_tcbhashsize, M_PCB,
&tcbinfo.porthashmask);
- str_size = (vm_size_t) sizeof(struct inp_tp);
+ str_size = P2ROUNDUP(sizeof(struct inp_tp), sizeof(u_int64_t));
tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "tcpcb");
+ zone_change(tcbinfo.ipi_zone, Z_CALLERACCT, FALSE);
+ zone_change(tcbinfo.ipi_zone, Z_EXPAND, TRUE);
+
+ str_size = P2ROUNDUP(sizeof(struct sackhole), sizeof(u_int64_t));
sack_hole_zone = zinit(str_size, 120000*str_size, 8192, "sack_hole zone");
+ zone_change(sack_hole_zone, Z_CALLERACCT, FALSE);
+ zone_change(sack_hole_zone, Z_EXPAND, TRUE);
+
tcp_reass_maxseg = nmbclusters / 16;
+ str_size = P2ROUNDUP(sizeof(struct tseg_qent), sizeof(u_int64_t));
+ tcp_reass_zone = zinit(str_size, (tcp_reass_maxseg + 1) * str_size,
+ 0, "tcp_reass_zone");
+ if (tcp_reass_zone == NULL) {
+ panic("%s: failed allocating tcp_reass_zone", __func__);
+ /* NOTREACHED */
+ }
+ zone_change(tcp_reass_zone, Z_CALLERACCT, FALSE);
+ zone_change(tcp_reass_zone, Z_EXPAND, TRUE);
+
+ bwmeas_elm_size = P2ROUNDUP(sizeof(struct bwmeas), sizeof(u_int64_t));
+ tcp_bwmeas_zone = zinit(bwmeas_elm_size, (100 * bwmeas_elm_size), 0, "tcp_bwmeas_zone");
+ if (tcp_bwmeas_zone == NULL) {
+ panic("%s: failed allocating tcp_bwmeas_zone", __func__);
+ /* NOTREACHED */
+ }
+ zone_change(tcp_bwmeas_zone, Z_CALLERACCT, FALSE);
+ zone_change(tcp_bwmeas_zone, Z_EXPAND, TRUE);
#if INET6
#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
#else /* INET6 */
#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
#endif /* INET6 */
- if (max_protohdr < TCP_MINPROTOHDR)
- max_protohdr = TCP_MINPROTOHDR;
- if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
+ if (max_protohdr < TCP_MINPROTOHDR) {
+ _max_protohdr = TCP_MINPROTOHDR;
+ _max_protohdr = max_protohdr; /* round it up */
+ }
+ if (max_linkhdr + max_protohdr > MHLEN)
panic("tcp_init");
#undef TCP_MINPROTOHDR
- /*
+ /*
* allocate lock group attribute and group for tcp pcb mutexes
*/
- pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
+ pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
pcbinfo->mtx_grp = lck_grp_alloc_init("tcppcb", pcbinfo->mtx_grp_attr);
/*
return; /* pretty much dead if this fails... */
}
- delack_bitmask = _MALLOC((4 * tcp_tcbhashsize)/32, M_PCB, M_WAITOK);
- if (delack_bitmask == 0)
- panic("Delack Memory");
-
- for (i=0; i < (tcbinfo.hashsize / 32); i++)
- delack_bitmask[i] = 0;
-
for (i=0; i < N_TIME_WAIT_SLOTS; i++) {
LIST_INIT(&time_wait_slots[i]);
}
- timeout(tcp_fasttimo, NULL, hz/TCP_RETRANSHZ);
+ bzero(&tcp_timer_list, sizeof(tcp_timer_list));
+ LIST_INIT(&tcp_timer_list.lhead);
+ /*
+ * allocate lock group attribute, group and attribute for the tcp timer list
+ */
+ tcp_timer_list.mtx_grp_attr = lck_grp_attr_alloc_init();
+ tcp_timer_list.mtx_grp = lck_grp_alloc_init("tcptimerlist", tcp_timer_list.mtx_grp_attr);
+ tcp_timer_list.mtx_attr = lck_attr_alloc_init();
+ if ((tcp_timer_list.mtx = lck_mtx_alloc_init(tcp_timer_list.mtx_grp, tcp_timer_list.mtx_attr)) == NULL) {
+ panic("failed to allocate memory for tcp_timer_list.mtx\n");
+ };
+ tcp_timer_list.fast_quantum = TCP_FASTTIMER_QUANTUM;
+ tcp_timer_list.slow_quantum = TCP_SLOWTIMER_QUANTUM;
+ if ((tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL)) == NULL) {
+ panic("failed to allocate call entry 1 in tcp_init\n");
+ }
+
+ /*
+ * allocate lock group attribute, group and attribute for tcp_uptime_lock
+ */
+ tcp_uptime_mtx_grp_attr = lck_grp_attr_alloc_init();
+ tcp_uptime_mtx_grp = lck_grp_alloc_init("tcpuptime", tcp_uptime_mtx_grp_attr);
+ tcp_uptime_mtx_attr = lck_attr_alloc_init();
+ tcp_uptime_lock = lck_spin_alloc_init(tcp_uptime_mtx_grp, tcp_uptime_mtx_attr);
+
+ /* Initialize TCP congestion control algorithms list */
+ tcp_cc_init();
+
+ /* Initialize TCP LRO data structures */
+ tcp_lro_init();
}
/*
ip6->ip6_plen = sizeof(struct tcphdr);
ip6->ip6_src = inp->in6p_laddr;
ip6->ip6_dst = inp->in6p_faddr;
- tcp_hdr->th_sum = 0;
+ tcp_hdr->th_sum = in6_cksum_phdr(&inp->in6p_laddr,
+ &inp->in6p_faddr, htonl(sizeof(struct tcphdr)),
+ htonl(IPPROTO_TCP));
} else
#endif
{
tcp_seq ack,
tcp_seq seq,
int flags,
- unsigned int ifscope
+ unsigned int ifscope,
+ unsigned int nocell
)
{
register int tlen;
struct ip6_hdr *ip6;
int isipv6;
#endif /* INET6 */
+ struct ifnet *outif;
#if INET6
isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
bcopy((caddr_t)ip6, mtod(m, caddr_t),
sizeof(struct ip6_hdr));
ip6 = mtod(m, struct ip6_hdr *);
- nth = (struct tcphdr *)(ip6 + 1);
+ nth = (struct tcphdr *)(void *)(ip6 + 1);
} else
#endif /* INET6 */
{
bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
ip = mtod(m, struct ip *);
- nth = (struct tcphdr *)(ip + 1);
+ nth = (struct tcphdr *)(void *)(ip + 1);
}
bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
flags = TH_ACK;
#define xchg(a,b,type) { type t; t=a; a=b; b=t; }
#if INET6
if (isipv6) {
+ /* Expect 32-bit aligned IP on strict-align platforms */
+ IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6);
xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
- nth = (struct tcphdr *)(ip6 + 1);
+ nth = (struct tcphdr *)(void *)(ip6 + 1);
} else
#endif /* INET6 */
{
+ /* Expect 32-bit aligned IP on strict-align platforms */
+ IP_HDR_STRICT_ALIGNMENT_CHECK(ip);
xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
- nth = (struct tcphdr *)(ip + 1);
+ nth = (struct tcphdr *)(void *)(ip + 1);
}
if (th != nth) {
/*
#if INET6
if (isipv6) {
nth->th_sum = 0;
- nth->th_sum = in6_cksum(m, IPPROTO_TCP,
- sizeof(struct ip6_hdr),
- tlen - sizeof(struct ip6_hdr));
+ nth->th_sum = in6_cksum_phdr(&ip6->ip6_src,
+ &ip6->ip6_dst, htons((u_short)(tlen - sizeof(struct ip6_hdr))),
+ htonl(IPPROTO_TCP));
+ m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
ro6 && ro6->ro_rt ?
ro6->ro_rt->rt_ifp :
return;
}
#endif
-#if PKT_PRIORITY
- if (tp != NULL && soisbackground(tp->t_inpcb->inp_socket))
- m_prio_background(m);
-#endif /* PKT_PRIORITY */
+
+ if (tp != NULL) {
+ u_int32_t svc_flags = 0;
+ if (isipv6) {
+ svc_flags |= PKT_SCF_IPV6;
+ }
+ set_packet_service_class(m, tp->t_inpcb->inp_socket,
+ MBUF_SC_UNSPEC, svc_flags);
+
+ /* Embed flowhash and flow control flags */
+ m->m_pkthdr.m_flowhash = tp->t_inpcb->inp_flowhash;
+ m->m_pkthdr.m_fhflags |=
+ (PF_TAG_TCP | PF_TAG_FLOWHASH | PF_TAG_FLOWADV);
+ }
+
#if INET6
if (isipv6) {
- (void)ip6_output(m, NULL, ro6, 0, NULL, NULL, 0);
- if (ro6 == &sro6 && ro6->ro_rt) {
- rtfree(ro6->ro_rt);
- ro6->ro_rt = NULL;
+ struct ip6_out_args ip6oa = { ifscope, { 0 },
+ IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR };
+
+ if (ifscope != IFSCOPE_NONE)
+ ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
+ if (nocell)
+ ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
+
+ (void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL,
+ NULL, &ip6oa);
+ if (ro6->ro_rt != NULL) {
+ if (ro6 == &sro6) {
+ rtfree(ro6->ro_rt);
+ ro6->ro_rt = NULL;
+ } else if ((outif = ro6->ro_rt->rt_ifp) !=
+ tp->t_inpcb->in6p_last_outifp) {
+ tp->t_inpcb->in6p_last_outifp = outif;
+ }
}
} else
#endif /* INET6 */
{
- struct ip_out_args ipoa = { ifscope };
+ struct ip_out_args ipoa = { ifscope, { 0 },
+ IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR };
+
+ if (ifscope != IFSCOPE_NONE)
+ ipoa.ipoa_flags |= IPOAF_BOUND_IF;
+ if (nocell)
+ ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
if (ro != &sro) {
/* Copy the cached route and take an extra reference */
(void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
if (ro != &sro) {
+ if (sro.ro_rt != NULL &&
+ (outif = sro.ro_rt->rt_ifp) !=
+ tp->t_inpcb->inp_last_outifp)
+ tp->t_inpcb->inp_last_outifp = outif;
/* Synchronize cached PCB route */
inp_route_copyin(tp->t_inpcb, &sro);
} else if (sro.ro_rt != NULL) {
int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
#endif /* INET6 */
+ calculate_tcp_clock();
+
if (so->cached_in_sock_layer == 0) {
- it = (struct inp_tp *)inp;
+ it = (struct inp_tp *)(void *)inp;
tp = &it->tcb;
}
else
- tp = (struct tcpcb *) inp->inp_saved_ppcb;
-
+ tp = (struct tcpcb *)(void *)inp->inp_saved_ppcb;
+
bzero((char *) tp, sizeof(struct tcpcb));
LIST_INIT(&tp->t_segq);
tp->t_maxseg = tp->t_maxopd =
tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
tp->t_rttmin = tcp_TCPTV_MIN;
tp->t_rxtcur = TCPTV_RTOBASE;
+
+ /* Initialize congestion control algorithm for this connection
+ * to newreno by default
+ */
+ tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX;
+ if (CC_ALGO(tp)->init != NULL) {
+ CC_ALGO(tp)->init(tp);
+ }
+
tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
- tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
- tp->t_rcvtime = 0;
- tp->t_bw_rtttime = 0;
+ tp->t_rcvtime = tcp_now;
+ tp->tentry.timer_start = tcp_now;
+ tp->t_persist_timeout = tcp_max_persist_timeout;
+ tp->t_persist_stop = 0;
+ tp->t_flagsext |= TF_RCVUNACK_WAITSS;
/*
* IPv4 TTL initialization is necessary for an IPv6 socket as well,
* because the socket may be bound to an IPv6 wildcard address,
int errno;
{
struct socket *so = tp->t_inpcb->inp_socket;
-
+#if CONFIG_DTRACE
+ struct inpcb *inp = tp->t_inpcb;
+#endif
+
if (TCPS_HAVERCVDSYN(tp->t_state)) {
+ DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
+ struct tcpcb *, tp, int32_t, TCPS_CLOSED);
tp->t_state = TCPS_CLOSED;
(void) tcp_output(tp);
tcpstat.tcps_drops++;
return (tcp_close(tp));
}
+void
+tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt)
+{
+ u_int32_t rtt = rt->rt_rmx.rmx_rtt;
+ int isnetlocal = (tp->t_flags & TF_LOCAL);
+
+ if (rtt != 0) {
+ /*
+ * XXX the lock bit for RTT indicates that the value
+ * is also a minimum value; this is subject to time.
+ */
+ if (rt->rt_rmx.rmx_locks & RTV_RTT)
+ tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ);
+ else
+ tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN : TCPTV_REXMTMIN;
+ tp->t_srtt = rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
+ tcpstat.tcps_usedrtt++;
+ if (rt->rt_rmx.rmx_rttvar) {
+ tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
+ (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
+ tcpstat.tcps_usedrttvar++;
+ } else {
+ /* default variation is +- 1 rtt */
+ tp->t_rttvar =
+ tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
+ }
+ TCPT_RANGESET(tp->t_rxtcur,
+ ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
+ tp->t_rttmin, TCPTV_REXMTMAX,
+ TCP_ADD_REXMTSLOP(tp));
+ }
+}
+
/*
* Close a TCP control block:
* discard all space held by the tcp
int dosavessthresh;
if ( inp->inp_ppcb == NULL) /* tcp_close was called previously, bail */
- return NULL;
-
- /* Clear the timers before we delete the PCB. */
- {
- int i;
- for (i = 0; i < TCPT_NTIMERS; i++) {
- tp->t_timer[i] = 0;
- }
- }
+ return(NULL);
+ tcp_canceltimers(tp);
KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0);
- switch (tp->t_state)
- {
- case TCPS_ESTABLISHED:
- case TCPS_FIN_WAIT_1:
- case TCPS_CLOSING:
- case TCPS_CLOSE_WAIT:
- case TCPS_LAST_ACK:
- break;
- }
/*
* If another thread for this tcp is currently in ip (indicated by
* point both flags should be cleared and we can proceed further
* with the cleanup.
*/
- if (tp->t_flags & (TF_CLOSING|TF_SENDINPROG)) {
+ if ((tp->t_flags & TF_CLOSING) ||
+ inp->inp_sndinprog_cnt > 0) {
tp->t_flags |= TF_CLOSING;
return (NULL);
}
+ if (CC_ALGO(tp)->cleanup != NULL) {
+ CC_ALGO(tp)->cleanup(tp);
+ }
+
#if INET6
rt = isipv6 ? inp->in6p_route.ro_rt : inp->inp_route.ro_rt;
#else
if (rt == NULL)
goto no_valid_rt;
- sin6 = (struct sockaddr_in6 *)rt_key(rt);
+ sin6 = (struct sockaddr_in6 *)(void *)rt_key(rt);
if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
goto no_valid_rt;
}
else
#endif /* INET6 */
if (rt == NULL || !(rt->rt_flags & RTF_UP) ||
- ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr ==
+ ((struct sockaddr_in *)(void *)rt_key(rt))->sin_addr.s_addr ==
INADDR_ANY || rt->generation_id != route_generation) {
- if (tp->t_state >= TCPS_CLOSE_WAIT)
+ if (tp->t_state >= TCPS_CLOSE_WAIT) {
+ DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
+ struct tcpcb *, tp, int32_t, TCPS_CLOSING);
tp->t_state = TCPS_CLOSING;
+ }
goto no_valid_rt;
}
(void) tcp_freeq(tp);
tcp_free_sackholes(tp);
+ if (tp->t_bwmeas != NULL) {
+ tcp_bwmeas_free(tp);
+ }
/* Free the packet list */
if (tp->t_pktlist_head != NULL)
if (so->cached_in_sock_layer)
inp->inp_saved_ppcb = (caddr_t) tp;
#endif
+ /* Issue a wakeup before detach so that we don't miss
+ * a wakeup
+ */
+ sodisconnectwakeup(so);
+
+ /*
+ * Clean up any LRO state
+ */
+ if (tp->t_flagsext & TF_LRO_OFFLOADED) {
+ tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr,
+ inp->inp_lport,
+ inp->inp_fport);
+ tp->t_flagsext &= ~TF_LRO_OFFLOADED;
+ }
- soisdisconnected(so);
#if INET6
if (INP_CHECK_SOCKAF(so, AF_INET6))
in6_pcbdetach(inp);
else
#endif /* INET6 */
in_pcbdetach(inp);
+
+ /* Call soisdisconnected after detach because it might unlock the socket */
+ soisdisconnected(so);
tcpstat.tcps_closed++;
KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed,0,0,0,0);
- return ((struct tcpcb *)0);
+ return(NULL);
}
int
while((q = LIST_FIRST(&tp->t_segq)) != NULL) {
LIST_REMOVE(q, tqe_q);
m_freem(q->tqe_m);
- FREE(q, M_TSEGQ);
+ zfree(tcp_reass_zone, q);
tcp_reass_qsize--;
rv = 1;
}
!= NULL) {
LIST_REMOVE(te, tqe_q);
m_freem(te->tqe_m);
- FREE(te, M_TSEGQ);
+ zfree(tcp_reass_zone, te);
tcp_reass_qsize--;
}
}
#endif
}
+struct bwmeas*
+tcp_bwmeas_alloc(struct tcpcb *tp)
+{
+ struct bwmeas *elm;
+ elm = zalloc(tcp_bwmeas_zone);
+ if (elm == NULL)
+ return(elm);
+
+ bzero(elm, bwmeas_elm_size);
+ elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE;
+ elm->bw_maxsizepkts = TCP_BWMEAS_BURST_MAXSIZE;
+ elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg;
+ elm->bw_maxsize = elm->bw_maxsizepkts * tp->t_maxseg;
+ return(elm);
+}
+
+void
+tcp_bwmeas_free(struct tcpcb* tp)
+{
+ zfree(tcp_bwmeas_zone, tp->t_bwmeas);
+ tp->t_bwmeas = NULL;
+ tp->t_flagsext &= ~(TF_MEASURESNDBW);
+}
+
/*
* tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
* The otcpcb data structure is passed to user space and must not change.
otp->t_segq = (u_int32_t)(uintptr_t)tp->t_segq.lh_first;
otp->t_dupacks = tp->t_dupacks;
- for (i = 0; i < TCPT_NTIMERS; i++)
+ for (i = 0; i < TCPT_NTIMERS_EXT; i++)
otp->t_timer[i] = tp->t_timer[i];
otp->t_inpcb = (_TCPCB_PTR(struct inpcb *))(uintptr_t)tp->t_inpcb;
otp->t_state = tp->t_state;
inpcb_to_compat(inp, &xt.xt_inp);
inp_ppcb = inp->inp_ppcb;
if (inp_ppcb != NULL) {
- tcpcb_to_otcpcb((struct tcpcb *)inp_ppcb,
+ tcpcb_to_otcpcb((struct tcpcb *)(void *)inp_ppcb,
&xt.xt_tp);
} else {
bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
return error;
}
-SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
#if !CONFIG_EMBEDDED
otp->t_segq = (u_int32_t)(uintptr_t)tp->t_segq.lh_first;
otp->t_dupacks = tp->t_dupacks;
- for (i = 0; i < TCPT_NTIMERS; i++)
+ for (i = 0; i < TCPT_NTIMERS_EXT; i++)
otp->t_timer[i] = tp->t_timer[i];
otp->t_state = tp->t_state;
otp->t_flags = tp->t_flags;
for (i = 0; i < n; i++) {
inp = inp_list[i];
if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
- struct xtcpcb64 xt;
-
- bzero(&xt, sizeof(xt));
- xt.xt_len = sizeof xt;
- inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
- xt.xt_inpcb.inp_ppcb = (u_int64_t)(uintptr_t)inp->inp_ppcb;
- if (inp->inp_ppcb != NULL)
- tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb, &xt);
- if (inp->inp_socket)
- sotoxsocket64(inp->inp_socket, &xt.xt_inpcb.xi_socket);
- error = SYSCTL_OUT(req, &xt, sizeof xt);
+ struct xtcpcb64 xt;
+
+ bzero(&xt, sizeof(xt));
+ xt.xt_len = sizeof xt;
+ inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
+ xt.xt_inpcb.inp_ppcb = (u_int64_t)(uintptr_t)inp->inp_ppcb;
+ if (inp->inp_ppcb != NULL)
+ tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb, &xt);
+ if (inp->inp_socket)
+ sotoxsocket64(inp->inp_socket, &xt.xt_inpcb.xi_socket);
+ error = SYSCTL_OUT(req, &xt, sizeof xt);
}
}
if (!error) {
- /*
- * Give the user an updated idea of our state.
- * If the generation differs from what we told
- * her before, she knows that something happened
- * while we were processing this request, and it
- * might be necessary to retry.
- */
- bzero(&xig, sizeof(xig));
- xig.xig_len = sizeof xig;
- xig.xig_gen = tcbinfo.ipi_gencnt;
- xig.xig_sogen = so_gencnt;
- xig.xig_count = tcbinfo.ipi_count;
- error = SYSCTL_OUT(req, &xig, sizeof xig);
+ /*
+ * Give the user an updated idea of our state.
+ * If the generation differs from what we told
+ * her before, she knows that something happened
+ * while we were processing this request, and it
+ * might be necessary to retry.
+ */
+ bzero(&xig, sizeof(xig));
+ xig.xig_len = sizeof xig;
+ xig.xig_gen = tcbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = tcbinfo.ipi_count;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
}
FREE(inp_list, M_TEMP);
lck_rw_done(tcbinfo.mtx);
return error;
}
-SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64, CTLFLAG_RD, 0, 0,
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
#endif /* !CONFIG_EMBEDDED */
+static int
+tcp_pcblist_n SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error = 0;
+
+ error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo);
+
+ return error;
+}
+
+
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
+
+
+__private_extern__ void
+tcp_get_ports_used(unsigned int ifindex, uint8_t *bitfield)
+{
+ inpcb_get_ports_used(ifindex, bitfield, &tcbinfo);
+}
+
+__private_extern__ uint32_t
+tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
+{
+ return inpcb_count_opportunistic(ifindex, &tcbinfo, flags);
+}
+
void
tcp_ctlinput(cmd, sa, vip)
int cmd;
{
tcp_seq icmp_tcp_seq;
struct ip *ip = vip;
- struct tcphdr *th;
struct in_addr faddr;
struct inpcb *inp;
struct tcpcb *tp;
-
- void (*notify)(struct inpcb *, int) = tcp_notify;
- struct icmp *icp;
+ void (*notify)(struct inpcb *, int) = tcp_notify;
- faddr = ((struct sockaddr_in *)sa)->sin_addr;
+ faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
return;
else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)
return;
if (ip) {
- icp = (struct icmp *)((caddr_t)ip
- - offsetof(struct icmp, icmp_ip));
- th = (struct tcphdr *)((caddr_t)ip
- + (IP_VHL_HL(ip->ip_vhl) << 2));
- inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
- ip->ip_src, th->th_sport, 0, NULL);
+ struct tcphdr th;
+ struct icmp *icp;
+
+ icp = (struct icmp *)(void *)
+ ((caddr_t)ip - offsetof(struct icmp, icmp_ip));
+ bcopy(((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2)),
+ &th, sizeof (th));
+ inp = in_pcblookup_hash(&tcbinfo, faddr, th.th_dport,
+ ip->ip_src, th.th_sport, 0, NULL);
if (inp != NULL && inp->inp_socket != NULL) {
tcp_lock(inp->inp_socket, 1, 0);
if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
tcp_unlock(inp->inp_socket, 1, 0);
return;
}
- icmp_tcp_seq = htonl(th->th_seq);
+ icmp_tcp_seq = htonl(th.th_seq);
tp = intotcpcb(inp);
if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
in6_pcbnotify(&tcbinfo, sa, th.th_dport,
(struct sockaddr *)ip6cp->ip6c_src,
- th.th_sport, cmd, notify);
+ th.th_sport, cmd, NULL, notify);
} else {
in6_pcbnotify(&tcbinfo, sa, 0,
- (struct sockaddr *)(size_t)sa6_src, 0, cmd, notify);
+ (struct sockaddr *)(size_t)sa6_src, 0, cmd, NULL, notify);
}
}
#endif /* INET6 */
if (tp) {
#if INET6
if (isipv6)
- rt = tcp_rtlookup6(inp);
+ rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
else
#endif /* INET6 */
rt = tcp_rtlookup(inp, IFSCOPE_NONE);
tp->t_maxseg = mss;
+ /*
+ * Reset the slow-start flight size as it may depends on the new MSS
+ */
+ if (CC_ALGO(tp)->cwnd_init != NULL)
+ CC_ALGO(tp)->cwnd_init(tp);
tcpstat.tcps_mturesent++;
tp->t_rtttime = 0;
tp->snd_nxt = tp->snd_una;
ro->ro_dst.sa_family = AF_INET;
ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
- ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
+ ((struct sockaddr_in *)(void *)&ro->ro_dst)->sin_addr =
inp->inp_faddr;
/*
* input_ifscope is IFSCOPE_NONE).
*/
ifscope = (inp->inp_flags & INP_BOUND_IF) ?
- inp->inp_boundif : input_ifscope;
+ inp->inp_boundifp->if_index : input_ifscope;
if (rt != NULL)
RT_UNLOCK(rt);
- rtalloc_scoped_ign(ro, 0, ifscope);
+ rtalloc_scoped(ro, ifscope);
if ((rt = ro->ro_rt) != NULL)
RT_LOCK(rt);
}
#if INET6
struct rtentry *
-tcp_rtlookup6(inp)
+tcp_rtlookup6(inp, input_ifscope)
struct inpcb *inp;
+ unsigned int input_ifscope;
{
struct route_in6 *ro6;
struct rtentry *rt;
/* No route yet, so try to acquire one */
if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
struct sockaddr_in6 *dst6;
+ unsigned int ifscope;
dst6 = (struct sockaddr_in6 *)&ro6->ro_dst;
dst6->sin6_family = AF_INET6;
dst6->sin6_len = sizeof(*dst6);
dst6->sin6_addr = inp->in6p_faddr;
+
+ /*
+ * If the socket was bound to an interface, then
+ * the bound-to-interface takes precedence over
+ * the inbound interface passed in by the caller
+ * (if we get here as part of the output path then
+ * input_ifscope is IFSCOPE_NONE).
+ */
+ ifscope = (inp->inp_flags & INP_BOUND_IF) ?
+ inp->inp_boundifp->if_index : input_ifscope;
+
if (rt != NULL)
RT_UNLOCK(rt);
- rtalloc_ign((struct route *)ro6, 0);
+ rtalloc_scoped((struct route *)ro6, ifscope);
if ((rt = ro6->ro_rt) != NULL)
RT_LOCK(rt);
}
#if INET6
if ((inp->inp_vflag & INP_IPV6) != 0) {
ip6 = mtod(m, struct ip6_hdr *);
- th = (struct tcphdr *)(ip6 + 1);
+ th = (struct tcphdr *)(void *)(ip6 + 1);
m->m_pkthdr.len = m->m_len =
sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
tcp_fillheaders(tp, ip6, th);
#if INET6
if ((inp->inp_vflag & INP_IPV6) != 0)
- rt = tcp_rtlookup6(inp);
+ rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
else
#endif /* INET6 */
rt = tcp_rtlookup(inp, IFSCOPE_NONE);
lr_saved = lr;
if (so->so_pcb != NULL) {
- lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
+ lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
} else {
panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s\n",
so, lr_saved, solockhistory_nr(so));
#ifdef MORE_TCPLOCK_DEBUG
printf("tcp_unlock: so=%p sopcb=%p lock=%p ref=%x lr=%p\n",
- so, so->so_pcb, ((struct inpcb *)so->so_pcb)->inpcb_mtx,
+ so, so->so_pcb, &((struct inpcb *)so->so_pcb)->inpcb_mtx,
so->so_usecount, lr_saved);
#endif
if (refcount)
so, so->so_usecount, lr_saved, solockhistory_nr(so));
/* NOTREACHED */
} else {
- lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx,
+ lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
LCK_MTX_ASSERT_OWNED);
so->unlock_lr[so->next_unlock_lr] = lr_saved;
so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
- lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
+ lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
}
return (0);
}
if (so->so_usecount < 0)
panic("tcp_getlock: so=%p usecount=%x lrh= %s\n",
so, so->so_usecount, solockhistory_nr(so));
- return(inp->inpcb_mtx);
+ return(&inp->inpcb_mtx);
}
else {
panic("tcp_getlock: so=%p NULL so_pcb %s\n",
}
}
+/* Determine if we can grow the recieve socket buffer to avoid sending
+ * a zero window update to the peer. We allow even socket buffers that
+ * have fixed size (set by the application) to grow if the resource
+ * constraints are met. They will also be trimmed after the application
+ * reads data.
+ */
+static void
+tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb) {
+ u_int32_t rcvbufinc = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
+ if (tcp_do_autorcvbuf == 1 &&
+ tcp_cansbgrow(sb) &&
+ (tp->t_flags & TF_SLOWLINK) == 0 &&
+ (sb->sb_hiwat - sb->sb_cc) < rcvbufinc &&
+ (sb->sb_hiwat < tcp_autorcvbuf_max)) {
+ sbreserve(sb, (sb->sb_hiwat + rcvbufinc));
+ }
+}
+
int32_t
tcp_sbspace(struct tcpcb *tp)
{
struct sockbuf *sb = &tp->t_inpcb->inp_socket->so_rcv;
- int32_t space, newspace;
+ int32_t space;
+
+ tcp_sbrcv_grow_rwin(tp, sb);
space = ((int32_t) imin((sb->sb_hiwat - sb->sb_cc),
(sb->sb_mbmax - sb->sb_mbcnt)));
if (space < 0)
space = 0;
-#if TRAFFIC_MGT
- if (tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_REGULATE) {
- if (tcp_background_io_enabled &&
- tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_SUPPRESSED) {
- tp->t_flags |= TF_RXWIN0SENT;
- return 0; /* Triggers TCP window closing by responding there is no space */
- }
- }
-#endif /* TRAFFIC_MGT */
-
- /* Avoid inscreasing window size if the current window
+ /* Avoid increasing window size if the current window
* is already very low, we could be in "persist" mode and
* we could break some apps (see rdar://5409343)
*/
if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0 )
return imin(space, slowlink_wsize);
- /*
- * Check for ressources constraints before over-ajusting the amount of space we can
- * advertise in the TCP window size updates.
- */
-
- if (sbspace_factor && (tp->t_inpcb->inp_pcbinfo->ipi_count < tcp_sockthreshold) &&
- (total_mb_cnt / 8) < (mbstat.m_clusters / sbspace_factor)) {
- if (space < (int32_t)(sb->sb_maxused - sb->sb_cc)) {/* make sure we don't constrain the window if we have enough ressources */
- space = (int32_t) imax((sb->sb_maxused - sb->sb_cc), tp->rcv_maxbyps);
- }
- newspace = (int32_t) imax(((int32_t)sb->sb_maxused - sb->sb_cc), (int32_t)tp->rcv_maxbyps);
-
- if (newspace > space)
- space = newspace;
- }
return space;
}
/*
int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
if (isipv6) {
- /*
- * Radar 6921834: Disable TSO IPv6 because there is no support
- * for TSO & HW checksum in ip6_output yet
- */
-#if 0
if (ifp && ifp->if_hwassist & IFNET_TSO_IPV6) {
tp->t_flags |= TF_TSO;
if (ifp->if_tso_v6_mtu != 0)
} else
tp->t_flags &= ~TF_TSO;
-#endif
} else
#endif /* INET6 */
tp->t_flags &= ~TF_TSO;
}
}
+
+#define TIMEVAL_TO_TCPHZ(_tv_) ((_tv_).tv_sec * TCP_RETRANSHZ + (_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC)
+
+/* Function to calculate the tcp clock. The tcp clock will get updated
+ * at the boundaries of the tcp layer. This is done at 3 places:
+ * 1. Right before processing an input tcp packet
+ * 2. Whenever a connection wants to access the network using tcp_usrreqs
+ * 3. When a tcp timer fires or before tcp slow timeout
+ *
+ */
+
+void
+calculate_tcp_clock()
+{
+ struct timeval tv = tcp_uptime;
+ struct timeval interval = {0, TCP_RETRANSHZ_TO_USEC};
+ struct timeval now, hold_now;
+ uint32_t incr = 0;
+
+ timevaladd(&tv, &interval);
+ microuptime(&now);
+ if (timevalcmp(&now, &tv, >)) {
+ /* time to update the clock */
+ lck_spin_lock(tcp_uptime_lock);
+ if (timevalcmp(&tcp_uptime, &now, >=)) {
+ /* clock got updated while we were waiting for the lock */
+ lck_spin_unlock(tcp_uptime_lock);
+ return;
+ }
+
+ microuptime(&now);
+ hold_now = now;
+ tv = tcp_uptime;
+ timevalsub(&now, &tv);
+
+ incr = TIMEVAL_TO_TCPHZ(now);
+ if (incr > 0) {
+ tcp_uptime = hold_now;
+ tcp_now += incr;
+ }
+
+ lck_spin_unlock(tcp_uptime_lock);
+ }
+ return;
+}
+
+/* Compute receive window scaling that we are going to request
+ * for this connection based on sb_hiwat. Try to leave some
+ * room to potentially increase the window size upto a maximum
+ * defined by the constant tcp_autorcvbuf_max.
+ */
+void
+tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so) {
+ u_int32_t maxsockbufsize;
+
+ tp->request_r_scale = max(tcp_win_scale, tp->request_r_scale);
+ maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
+ so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
+
+ while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
+ (TCP_MAXWIN << tp->request_r_scale) < maxsockbufsize)
+ tp->request_r_scale++;
+ tp->request_r_scale = min(tp->request_r_scale, TCP_MAX_WINSHIFT);
+
+}
+
+int
+tcp_notsent_lowat_check(struct socket *so) {
+ struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = NULL;
+ int notsent = 0;
+ if (inp != NULL) {
+ tp = intotcpcb(inp);
+ }
+
+ notsent = so->so_snd.sb_cc -
+ (tp->snd_nxt - tp->snd_una);
+
+ /* When we send a FIN or SYN, not_sent can be negative.
+ * In that case also we need to send a write event to the
+ * process if it is waiting. In the FIN case, it will
+ * get an error from send because cantsendmore will be set.
+ */
+ if (notsent <= tp->t_notsent_lowat) {
+ return(1);
+ }
+
+ /* When Nagle's algorithm is not disabled, it is better
+ * to wakeup the client until there is atleast one
+ * maxseg of data to write.
+ */
+ if ((tp->t_flags & TF_NODELAY) == 0 &&
+ notsent > 0 && notsent < tp->t_maxseg) {
+ return(1);
+ }
+ return(0);
+}
+
+
/* DSEP Review Done pl-20051213-v02 @3253,@3391,@3400 */