X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..6601e61aa18bf4f09af135ff61fc7f4771d23b06:/bsd/netinet/tcp_timer.c diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c index 3819a60cf..effeffde5 100644 --- a/bsd/netinet/tcp_timer.c +++ b/bsd/netinet/tcp_timer.c @@ -52,20 +52,19 @@ * SUCH DAMAGE. * * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 + * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $ */ -#if ISFB31 -#include "opt_compat.h" -#include "opt_tcpdebug.h" -#endif #include #include #include +#include #include #include #include #include +#include #include /* before tcp_seq.h, for tcp_random18() */ @@ -73,9 +72,10 @@ #include #include -#include -#include #include +#if INET6 +#include +#endif #include #include #include @@ -91,50 +91,86 @@ #define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8)) #define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1) +/* + * NOTE - WARNING + * + * + * + * + */ +static int +sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS +{ + int error, s, tt; + + tt = *(int *)oidp->oid_arg1; + s = tt * 1000 / hz; + + error = sysctl_handle_int(oidp, &s, 0, req); + if (error || !req->newptr) + return (error); + + tt = s * hz / 1000; + if (tt < 1) + return (EINVAL); + + *(int *)oidp->oid_arg1 = tt; + return (0); +} + +int tcp_keepinit; +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW, + &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", ""); -int tcp_keepinit = TCPTV_KEEP_INIT; -SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, - CTLFLAG_RW, &tcp_keepinit , 0, ""); +int tcp_keepidle; +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW, + &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", ""); -int tcp_keepidle = TCPTV_KEEP_IDLE; -SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, - CTLFLAG_RW, &tcp_keepidle , 0, ""); +int tcp_keepintvl; +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW, + &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", ""); -static int tcp_keepintvl = TCPTV_KEEPINTVL; -SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, - CTLFLAG_RW, &tcp_keepintvl , 0, ""); +int tcp_delacktime; +SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, + CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", + "Time before a delayed ACK is sent"); + +int tcp_msl; +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW, + &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); static int always_keepalive = 0; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, - CTLFLAG_RW, &always_keepalive , 0, ""); +SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW, + &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections"); static int tcp_keepcnt = TCPTV_KEEPCNT; /* max idle probes */ -static int tcp_maxpersistidle = TCPTV_KEEP_IDLE; +int tcp_maxpersistidle; /* max idle time in persist */ int tcp_maxidle; - - struct inpcbhead time_wait_slots[N_TIME_WAIT_SLOTS]; int cur_tw_slot = 0; u_long *delack_bitmask; -u_long current_active_connections = 0; -u_long last_active_conn_count = 0; -void add_to_time_wait(tp) +void add_to_time_wait_locked(tp) struct tcpcb *tp; { int tw_slot; + /* pcb list should be locked when we get here */ +#if 0 + lck_mtx_assert(tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED); +#endif + LIST_REMOVE(tp->t_inpcb, inp_list); if (tp->t_timer[TCPT_2MSL] == 0) tp->t_timer[TCPT_2MSL] = 1; - tp->t_idle += tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1); + tp->t_rcvtime += tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1); tw_slot = (tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1)) + cur_tw_slot; if (tw_slot >= N_TIME_WAIT_SLOTS) tw_slot -= N_TIME_WAIT_SLOTS; @@ -142,6 +178,19 @@ void add_to_time_wait(tp) LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list); } +void add_to_time_wait(tp) + struct tcpcb *tp; +{ + struct inpcbinfo *pcbinfo = &tcbinfo; + + if (!lck_rw_try_lock_exclusive(pcbinfo->mtx)) { + tcp_unlock(tp->t_inpcb->inp_socket, 0, 0); + lck_rw_lock_exclusive(pcbinfo->mtx); + tcp_lock(tp->t_inpcb->inp_socket, 0, 0); + } + add_to_time_wait_locked(tp); + lck_rw_done(pcbinfo->mtx); +} @@ -152,66 +201,46 @@ void add_to_time_wait(tp) void tcp_fasttimo() { - register struct inpcb *inp; + struct inpcb *inp, *inpnxt; register struct tcpcb *tp; - register u_long i,j; - register u_long temp_mask; - register u_long elem_base = 0; - struct inpcbhead *head; - int s = splnet(); + struct inpcbinfo *pcbinfo = &tcbinfo; - static - int delack_checked = 0; + int delack_checked = 0, delack_done = 0; KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_START, 0,0,0,0,0); - if (!tcp_delack_enabled) + if (tcp_delack_enabled == 0) return; - if ((current_active_connections > DELACK_BITMASK_THRESH) && - (last_active_conn_count > DELACK_BITMASK_THRESH)) { - for (i=0; i < (tcbinfo.hashsize / 32); i++) { - if (delack_bitmask[i]) { - temp_mask = 1; - for (j=0; j < 32; j++) { - if (temp_mask & delack_bitmask[i]) { - head = &tcbinfo.hashbase[elem_base + j]; - for (inp=head->lh_first; inp != 0; inp = inp->inp_hash.le_next) { - delack_checked++; - if ((tp = (struct tcpcb *)inp->inp_ppcb) && (tp->t_flags & TF_DELACK)) { - tp->t_flags &= ~TF_DELACK; - tp->t_flags |= TF_ACKNOW; - tcpstat.tcps_delack++; - (void) tcp_output(tp); - } - } - } - temp_mask <<= 1; - } - delack_bitmask[i] = 0; - } - elem_base += 32; - } - } - else - { - for (inp = tcb.lh_first; inp != NULL; inp = inp->inp_list.le_next) { - if ((tp = (struct tcpcb *)inp->inp_ppcb) && - (tp->t_flags & TF_DELACK)) { - tp->t_flags &= ~TF_DELACK; - tp->t_flags |= TF_ACKNOW; - tcpstat.tcps_delack++; - (void) tcp_output(tp); - } - } - } + lck_rw_lock_shared(pcbinfo->mtx); - last_active_conn_count = current_active_connections; - KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_END, delack_checked,tcpstat.tcps_delack,0,0,0); - splx(s); + /* Walk the list of valid tcpcbs and send ACKS on the ones with DELACK bit set */ + for (inp = tcb.lh_first; inp != NULL; inp = inpnxt) { + inpnxt = inp->inp_list.le_next; + /* NOTE: it's OK to check the tp because the pcb can't be removed while we hold pcbinfo->mtx) */ + if ((tp = (struct tcpcb *)inp->inp_ppcb) && (tp->t_flags & TF_DELACK)) { + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + continue; + tcp_lock(inp->inp_socket, 1, 0); + if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { + tcp_unlock(inp->inp_socket, 1, 0); + continue; + } + if (tp->t_flags & TF_DELACK) { + delack_done++; + tp->t_flags &= ~TF_DELACK; + tp->t_flags |= TF_ACKNOW; + tcpstat.tcps_delack++; + (void) tcp_output(tp); + } + tcp_unlock(inp->inp_socket, 1, 0); + } + } + KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_END, delack_checked, delack_done, tcpstat.tcps_delack,0,0); + lck_rw_done(pcbinfo->mtx); } /* @@ -222,35 +251,50 @@ tcp_fasttimo() void tcp_slowtimo() { - register struct inpcb *ip, *ipnxt; - register struct tcpcb *tp; - register int i; - int s; + struct inpcb *inp, *inpnxt; + struct tcpcb *tp; + struct socket *so; + int i; #if TCPDEBUG int ostate; #endif #if KDEBUG static int tws_checked; #endif + struct inpcbinfo *pcbinfo = &tcbinfo; KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0,0,0,0,0); - s = splnet(); tcp_maxidle = tcp_keepcnt * tcp_keepintvl; - ip = tcb.lh_first; - if (ip == NULL) { - splx(s); - return; - } + lck_rw_lock_shared(pcbinfo->mtx); + /* * Search through tcb's and update active timers. */ - for (; ip != NULL; ip = ipnxt) { - ipnxt = ip->inp_list.le_next; - tp = intotcpcb(ip); - if (tp == 0 || tp->t_state == TCPS_LISTEN) + for (inp = tcb.lh_first; inp != NULL; inp = inpnxt) { + inpnxt = inp->inp_list.le_next; + + so = inp->inp_socket; + + if (so == &tcbinfo.nat_dummy_socket) + continue; + + if (in_pcb_checkstate(inp, WNT_ACQUIRE,0) == WNT_STOPUSING) continue; + + tcp_lock(so, 1, 0); + + if ((in_pcb_checkstate(inp, WNT_RELEASE,1) == WNT_STOPUSING) && so->so_usecount == 1) { + tcp_unlock(so, 1, 0); + continue; + } + tp = intotcpcb(inp); + if (tp == 0 || tp->t_state == TCPS_LISTEN) { + tcp_unlock(so, 1, 0); + continue; + } + for (i = 0; i < TCPT_NTIMERS; i++) { if (tp->t_timer[i] && --tp->t_timer[i] == 0) { #if TCPDEBUG @@ -269,12 +313,12 @@ tcp_slowtimo() #endif } } - tp->t_idle++; - tp->t_duration++; - if (tp->t_rtt) - tp->t_rtt++; + tp->t_rcvtime++; + tp->t_starttime++; + if (tp->t_rtttime) + tp->t_rtttime++; tpgone: - ; + tcp_unlock(so, 1, 0); } #if KDEBUG @@ -286,34 +330,106 @@ tpgone: * Process the items in the current time-wait slot */ - for (ip = time_wait_slots[cur_tw_slot].lh_first; ip; ip = ipnxt) + for (inp = time_wait_slots[cur_tw_slot].lh_first; inp; inp = inpnxt) { + inpnxt = inp->inp_list.le_next; #if KDEBUG tws_checked++; #endif - ipnxt = ip->inp_list.le_next; - tp = intotcpcb(ip); + + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + continue; + + tcp_lock(inp->inp_socket, 1, 0); + + if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) + goto twunlock; + + tp = intotcpcb(inp); + if (tp == NULL) { /* tp already closed, remove from list */ +#if TEMPDEBUG + printf("tcp_slowtimo: tp is null in time-wait slot!\n"); +#endif + goto twunlock; + } if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) { tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS; - tp->t_idle += N_TIME_WAIT_SLOTS; + tp->t_rcvtime += N_TIME_WAIT_SLOTS; } else tp->t_timer[TCPT_2MSL] = 0; - if (tp->t_timer[TCPT_2MSL] == 0) - tp = tcp_timers(tp, TCPT_2MSL); + if (tp->t_timer[TCPT_2MSL] == 0) + tp = tcp_timers(tp, TCPT_2MSL); /* tp can be returned null if tcp_close is called */ +twunlock: + tcp_unlock(inp->inp_socket, 1, 0); } - if (++cur_tw_slot >= N_TIME_WAIT_SLOTS) - cur_tw_slot = 0; + if (lck_rw_lock_shared_to_exclusive(pcbinfo->mtx) != 0) + lck_rw_lock_exclusive(pcbinfo->mtx); /* Upgrade failed, lost lock no take it again exclusive */ + - tcp_iss += TCP_ISSINCR/PR_SLOWHZ; /* increment iss */ -#if TCP_COMPAT_42 - if ((int)tcp_iss < 0) - tcp_iss = TCP_ISSINCR; /* XXX */ + for (inp = tcb.lh_first; inp != NULL; inp = inpnxt) { + inpnxt = inp->inp_list.le_next; + /* Ignore nat/SharedIP dummy pcbs */ + if (inp->inp_socket == &tcbinfo.nat_dummy_socket) + continue; + + if (inp->inp_wantcnt != WNT_STOPUSING) + continue; + + so = inp->inp_socket; + if (!lck_mtx_try_lock(inp->inpcb_mtx)) {/* skip if in use */ +#if TEMPDEBUG + printf("tcp_slowtimo so=%x STOPUSING but locked...\n", so); #endif - tcp_now++; /* for timestamps */ - splx(s); + continue; + } + + if (so->so_usecount == 0) + in_pcbdispose(inp); + else { + tp = intotcpcb(inp); + /* Check for embryonic socket stuck on listener queue (4023660) */ + if ((so->so_usecount == 1) && (tp->t_state == TCPS_CLOSED) && + (so->so_head != NULL) && (so->so_state & SS_INCOMP)) { + so->so_usecount--; + in_pcbdispose(inp); + } else + lck_mtx_unlock(inp->inpcb_mtx); + } + } + + /* Now cleanup the time wait ones */ + for (inp = time_wait_slots[cur_tw_slot].lh_first; inp; inp = inpnxt) + { + inpnxt = inp->inp_list.le_next; + + if (inp->inp_wantcnt != WNT_STOPUSING) + continue; + + so = inp->inp_socket; + if (!lck_mtx_try_lock(inp->inpcb_mtx)) /* skip if in use */ + continue; + if (so->so_usecount == 0) + in_pcbdispose(inp); + else { + tp = intotcpcb(inp); + /* Check for embryonic socket stuck on listener queue (4023660) */ + if ((so->so_usecount == 1) && (tp->t_state == TCPS_CLOSED) && + (so->so_head != NULL) && (so->so_state & SS_INCOMP)) { + so->so_usecount--; + in_pcbdispose(inp); + } else + lck_mtx_unlock(inp->inpcb_mtx); + } + } + + tcp_now++; + if (++cur_tw_slot >= N_TIME_WAIT_SLOTS) + cur_tw_slot = 0; + + lck_rw_done(pcbinfo->mtx); KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0); } @@ -330,6 +446,9 @@ tcp_canceltimers(tp) tp->t_timer[i] = 0; } +int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = + { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; + int tcp_backoff[TCP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; @@ -345,10 +464,18 @@ tcp_timers(tp, timer) { register int rexmt; struct socket *so_tmp; + struct tcptemp *t_template; + +#if TCPDEBUG + int ostate; +#endif + #if INET6 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0; #endif /* INET6 */ + so_tmp = tp->t_inpcb->inp_socket; + switch (timer) { /* @@ -358,13 +485,16 @@ tcp_timers(tp, timer) * control block. Otherwise, check again in a bit. */ case TCPT_2MSL: + tcp_free_sackholes(tp); if (tp->t_state != TCPS_TIME_WAIT && - tp->t_idle <= tcp_maxidle) { - tp->t_timer[TCPT_2MSL] = tcp_keepintvl; - add_to_time_wait(tp); + tp->t_rcvtime <= tcp_maxidle) { + tp->t_timer[TCPT_2MSL] = (unsigned long)tcp_keepintvl; + add_to_time_wait_locked(tp); } - else + else { tp = tcp_close(tp); + return(tp); + } break; /* @@ -373,20 +503,53 @@ tcp_timers(tp, timer) * to a longer retransmit interval and retransmit one segment. */ case TCPT_REXMT: + tcp_free_sackholes(tp); if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { tp->t_rxtshift = TCP_MAXRXTSHIFT; tcpstat.tcps_timeoutdrop++; - so_tmp = tp->t_inpcb->inp_socket; tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); postevent(so_tmp, 0, EV_TIMEOUT); break; } + + if (tp->t_rxtshift == 1) { + /* + * first retransmit; record ssthresh and cwnd so they can + * be recovered if this turns out to be a "bad" retransmit. + * A retransmit is considered "bad" if an ACK for this + * segment is received within RTT/2 interval; the assumption + * here is that the ACK was already in flight. See + * "On Estimating End-to-End Network Path Properties" by + * Allman and Paxson for more details. + */ + tp->snd_cwnd_prev = tp->snd_cwnd; + tp->snd_ssthresh_prev = tp->snd_ssthresh; + tp->snd_recover_prev = tp->snd_recover; + if (IN_FASTRECOVERY(tp)) + tp->t_flags |= TF_WASFRECOVERY; + else + tp->t_flags &= ~TF_WASFRECOVERY; + tp->t_badrxtwin = tcp_now + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); + } tcpstat.tcps_rexmttimeo++; - rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; + if (tp->t_state == TCPS_SYN_SENT) + rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift]; + else + rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; TCPT_RANGESET(tp->t_rxtcur, rexmt, - tp->t_rttmin, TCPTV_REXMTMAX); + tp->t_rttmin, TCPTV_REXMTMAX); tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + + /* + * Disable rfc1323 and rfc1644 if we havn't got any response to + * our third SYN to work-around some broken terminal servers + * (most of which have hopefully been retired) that have bad VJ + * header compression code which trashes TCP segments containing + * unknown-to-them TCP options. + */ + if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3)) + tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC); /* * If losing, let the lower level know and try for * a better route. Also, if we backed off this far, @@ -406,6 +569,11 @@ tcp_timers(tp, timer) tp->t_srtt = 0; } tp->snd_nxt = tp->snd_una; + /* + * Note: We overload snd_recover to function also as the + * snd_last variable described in RFC 2582 + */ + tp->snd_recover = tp->snd_max; /* * Force a segment to be sent. */ @@ -413,7 +581,7 @@ tcp_timers(tp, timer) /* * If timing a segment in this window, stop the timer. */ - tp->t_rtt = 0; + tp->t_rtttime = 0; /* * Close the congestion window down to one segment * (we'll open it by one segment for each ack we get). @@ -446,6 +614,7 @@ tcp_timers(tp, timer) tp->snd_ssthresh = win * tp->t_maxseg; tp->t_dupacks = 0; } + EXIT_FASTRECOVERY(tp); (void) tcp_output(tp); break; @@ -463,8 +632,8 @@ tcp_timers(tp, timer) * backoff that we would use if retransmitting. */ if (tp->t_rxtshift == TCP_MAXRXTSHIFT && - (tp->t_idle >= tcp_maxpersistidle || - tp->t_idle >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { + (tp->t_rcvtime >= tcp_maxpersistidle || + tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { tcpstat.tcps_persistdrop++; so_tmp = tp->t_inpcb->inp_socket; tp = tcp_drop(tp, ETIMEDOUT); @@ -487,8 +656,8 @@ tcp_timers(tp, timer) goto dropit; if ((always_keepalive || tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) && - tp->t_state <= TCPS_CLOSING) { - if (tp->t_idle >= tcp_keepidle + tcp_maxidle) + tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2) { + if (tp->t_rcvtime >= TCP_KEEPIDLE(tp) + (unsigned long)tcp_maxidle) goto dropit; /* * Send a packet designed to force a response @@ -503,44 +672,25 @@ tcp_timers(tp, timer) * correspondent TCP to respond. */ tcpstat.tcps_keepprobe++; -#if TCP_COMPAT_42 - /* - * The keepalive packet must have nonzero length - * to get a 4.2 host to respond. - */ -#if INET6 - if (isipv6) - tcp_respond(tp, (void *)&tp->t_template->tt_i6, - &tp->t_template->tt_t, - (struct mbuf *)NULL, - tp->rcv_nxt - 1, tp->snd_una - 1, - 0, isipv6); - else -#endif /* INET6 */ - tcp_respond(tp, (void *)&tp->t_template->tt_i, - &tp->t_template->tt_t, (struct mbuf *)NULL, - tp->rcv_nxt - 1, tp->snd_una - 1, 0, - isipv6); -#else -#if INET6 - if (isipv6) - tcp_respond(tp, (void *)&tp->t_template->tt_i6, - &tp->t_template->tt_t, - (struct mbuf *)NULL, tp->rcv_nxt, - tp->snd_una - 1, 0, isipv6); - else -#endif /* INET6 */ - tcp_respond(tp, (void *)&tp->t_template->tt_i, - &tp->t_template->tt_t, (struct mbuf *)NULL, - tp->rcv_nxt, tp->snd_una - 1, 0, isipv6); -#endif + t_template = tcp_maketemplate(tp); + if (t_template) { + tcp_respond(tp, t_template->tt_ipgen, + &t_template->tt_t, (struct mbuf *)NULL, + tp->rcv_nxt, tp->snd_una - 1, 0); + (void) m_free(dtom(t_template)); + } tp->t_timer[TCPT_KEEP] = tcp_keepintvl; } else - tp->t_timer[TCPT_KEEP] = tcp_keepidle; + tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp); break; + +#if TCPDEBUG + if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) + tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, + PRU_SLOWTIMO); +#endif dropit: tcpstat.tcps_keepdrops++; - so_tmp = tp->t_inpcb->inp_socket; tp = tcp_drop(tp, ETIMEDOUT); postevent(so_tmp, 0, EV_TIMEOUT); break;