]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet/tcp_timer.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_timer.c
CommitLineData
1c79356b 1/*
813fb2f6 2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39037602 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39037602 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39037602 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39037602 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
9bccf70c 61 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
1c79356b
A
62 */
63
1c79356b
A
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/kernel.h>
9bccf70c 68#include <sys/mbuf.h>
1c79356b
A
69#include <sys/sysctl.h>
70#include <sys/socket.h>
71#include <sys/socketvar.h>
72#include <sys/protosw.h>
b0d623f7 73#include <sys/domain.h>
6d2010ae
A
74#include <sys/mcache.h>
75#include <sys/queue.h>
91447636 76#include <kern/locks.h>
1c79356b 77#include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
39236c6e 78#include <mach/boolean.h>
1c79356b
A
79
80#include <net/route.h>
316670eb 81#include <net/if_var.h>
fe8ab488 82#include <net/ntstat.h>
1c79356b
A
83
84#include <netinet/in.h>
85#include <netinet/in_systm.h>
1c79356b 86#include <netinet/in_pcb.h>
9bccf70c
A
87#if INET6
88#include <netinet6/in6_pcb.h>
89#endif
1c79356b
A
90#include <netinet/ip_var.h>
91#include <netinet/tcp.h>
3e170ce0 92#include <netinet/tcp_cache.h>
1c79356b
A
93#include <netinet/tcp_fsm.h>
94#include <netinet/tcp_seq.h>
95#include <netinet/tcp_timer.h>
96#include <netinet/tcp_var.h>
6d2010ae 97#include <netinet/tcp_cc.h>
b0d623f7
A
98#if INET6
99#include <netinet6/tcp6_var.h>
100#endif
1c79356b
A
101#include <netinet/tcpip.h>
102#if TCPDEBUG
103#include <netinet/tcp_debug.h>
104#endif
105#include <sys/kdebug.h>
6d2010ae 106#include <mach/sdt.h>
39236c6e 107#include <netinet/mptcp_var.h>
1c79356b 108
fe8ab488
A
109/* Max number of times a stretch ack can be delayed on a connection */
110#define TCP_STRETCHACK_DELAY_THRESHOLD 5
111
3e170ce0
A
112/*
113 * If the host processor has been sleeping for too long, this is the threshold
114 * used to avoid sending stale retransmissions.
115 */
116#define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */
117
39236c6e
A
118/* tcp timer list */
119struct tcptimerlist tcp_timer_list;
120
121/* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
122struct tcptailq tcp_tw_tailq;
123
9bccf70c
A
124static int
125sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
126{
5ba3f43e 127#pragma unused(arg2)
9bccf70c 128 int error, s, tt;
1c79356b 129
5ba3f43e 130 tt = *(int *)arg1;
2d21ac55 131 s = tt * 1000 / TCP_RETRANSHZ;;
1c79356b 132
9bccf70c
A
133 error = sysctl_handle_int(oidp, &s, 0, req);
134 if (error || !req->newptr)
135 return (error);
136
2d21ac55 137 tt = s * TCP_RETRANSHZ / 1000;
9bccf70c
A
138 if (tt < 1)
139 return (EINVAL);
140
5ba3f43e
A
141 *(int *)arg1 = tt;
142 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
143 return (0);
9bccf70c 144}
1c79356b 145
5ba3f43e
A
146#if SYSCTL_SKMEM
147int tcp_keepinit = TCPTV_KEEP_INIT;
148SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
149 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
150 &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit),
151 sysctl_msec_to_ticks, "I", "");
152
153int tcp_keepidle = TCPTV_KEEP_IDLE;
154SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
155 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
156 &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle),
157 sysctl_msec_to_ticks, "I", "");
158
159int tcp_keepintvl = TCPTV_KEEPINTVL;
160SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
161 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
162 &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl),
163 sysctl_msec_to_ticks, "I", "");
164
165SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt,
166 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
167 int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive");
168
169int tcp_msl = TCPTV_MSL;
170SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
171 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
172 &tcp_msl, offsetof(skmem_sysctl, tcp.msl),
173 sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
174#else /* SYSCTL_SKMEM */
175int tcp_keepinit;
fe8ab488
A
176SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
177 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c
A
178 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
179
5ba3f43e 180int tcp_keepidle;
fe8ab488
A
181SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
182 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c
A
183 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
184
5ba3f43e 185int tcp_keepintvl;
fe8ab488
A
186SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
187 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c
A
188 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
189
5ba3f43e 190int tcp_keepcnt;
fe8ab488
A
191SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
192 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
193 &tcp_keepcnt, 0, "number of times to repeat keepalive");
39236c6e 194
5ba3f43e 195int tcp_msl;
fe8ab488
A
196SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
197 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c 198 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
5ba3f43e 199#endif /* SYSCTL_SKMEM */
1c79356b 200
39037602 201/*
fe8ab488
A
202 * Avoid DoS via TCP Robustness in Persist Condition
203 * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
204 * by allowing a system wide maximum persistence timeout value when in
205 * Zero Window Probe mode.
206 *
207 * Expressed in milliseconds to be consistent without timeout related
208 * values, the TCP socket option is in seconds.
6d2010ae 209 */
5ba3f43e
A
210#if SYSCTL_SKMEM
211u_int32_t tcp_max_persist_timeout = 0;
212SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
213 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
214 &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout),
215 sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP");
216#else /* SYSCTL_SKMEM */
6d2010ae 217u_int32_t tcp_max_persist_timeout = 0;
fe8ab488
A
218SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
219 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
39037602 220 &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
fe8ab488 221 "Maximum persistence timeout for ZWP");
5ba3f43e 222#endif /* SYSCTL_SKMEM */
6d2010ae 223
5ba3f43e
A
224SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive,
225 CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0,
226 "Assume SO_KEEPALIVE on all TCP connections");
1c79356b 227
fe8ab488
A
228/*
229 * This parameter determines how long the timer list will stay in fast or
39037602 230 * quick mode even though all connections are idle. In this state, the
fe8ab488 231 * timer will run more frequently anticipating new data.
6d2010ae 232 */
5ba3f43e
A
233SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax,
234 CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax,
235 TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode");
6d2010ae 236
b0d623f7
A
237/*
238 * See tcp_syn_backoff[] for interval values between SYN retransmits;
239 * the value set below defines the number of retransmits, before we
240 * disable the timestamp and window scaling options during subsequent
241 * SYN retransmits. Setting it to 0 disables the dropping off of those
242 * two options.
243 */
5ba3f43e
A
244SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres,
245 CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres,
246 10, "Number of retransmitted SYNs before disabling RFC 1323 "
fe8ab488 247 "options on local connections");
39236c6e 248
6d2010ae 249static int tcp_timer_advanced = 0;
fe8ab488
A
250SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
251 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
252 "Number of times one of the timers was advanced");
6d2010ae
A
253
254static int tcp_resched_timerlist = 0;
fe8ab488 255SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
39037602 256 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
6d2010ae
A
257 "Number of times timer list was rescheduled as part of processing a packet");
258
5ba3f43e
A
259SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection,
260 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1,
fe8ab488 261 "Path MTU Discovery Black Hole Detection");
b0d623f7 262
5ba3f43e
A
263SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss,
264 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200,
fe8ab488 265 "Path MTU Discovery Black Hole Detection lowered MSS");
b0d623f7 266
39037602
A
267static u_int32_t tcp_mss_rec_medium = 1200;
268static u_int32_t tcp_mss_rec_low = 512;
269
3e170ce0
A
270#define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */
271int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
3e170ce0 272
39236c6e
A
273/* performed garbage collection of "used" sockets */
274static boolean_t tcp_gc_done = FALSE;
275
fe8ab488 276/* max idle probes */
5ba3f43e 277int tcp_maxpersistidle = TCPTV_KEEP_IDLE;
1c79356b 278
fe8ab488
A
279/*
280 * TCP delack timer is set to 100 ms. Since the processing of timer list
281 * in fast mode will happen no faster than 100 ms, the delayed ack timer
282 * will fire some where between 100 and 200 ms.
6d2010ae
A
283 */
284int tcp_delack = TCP_RETRANSHZ / 10;
285
39236c6e
A
286#if MPTCP
287/*
288 * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
289 */
290int tcp_jack_rxmt = TCP_RETRANSHZ / 2;
291#endif /* MPTCP */
1c79356b 292
3e170ce0
A
293static boolean_t tcp_itimer_done = FALSE;
294
6d2010ae
A
295static void tcp_remove_timer(struct tcpcb *tp);
296static void tcp_sched_timerlist(uint32_t offset);
3e170ce0
A
297static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
298 u_int16_t probe_if_index);
6d2010ae
A
299static void tcp_sched_timers(struct tcpcb *tp);
300static inline void tcp_set_lotimer_index(struct tcpcb *);
fe8ab488 301__private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
39037602 302static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
fe8ab488 303__private_extern__ void tcp_report_stats(void);
6d2010ae 304
fe8ab488 305static u_int64_t tcp_last_report_time;
3e170ce0
A
306
307/*
308 * Structure to store previously reported stats so that we can send
309 * incremental changes in each report interval.
310 */
311struct tcp_last_report_stats {
312 u_int32_t tcps_connattempt;
313 u_int32_t tcps_accepts;
314 u_int32_t tcps_ecn_client_setup;
315 u_int32_t tcps_ecn_server_setup;
316 u_int32_t tcps_ecn_client_success;
317 u_int32_t tcps_ecn_server_success;
318 u_int32_t tcps_ecn_not_supported;
319 u_int32_t tcps_ecn_lost_syn;
320 u_int32_t tcps_ecn_lost_synack;
321 u_int32_t tcps_ecn_recv_ce;
322 u_int32_t tcps_ecn_recv_ece;
323 u_int32_t tcps_ecn_sent_ece;
324 u_int32_t tcps_ecn_conn_recv_ce;
325 u_int32_t tcps_ecn_conn_recv_ece;
326 u_int32_t tcps_ecn_conn_plnoce;
327 u_int32_t tcps_ecn_conn_pl_ce;
328 u_int32_t tcps_ecn_conn_nopl_ce;
4bd07ac2
A
329 u_int32_t tcps_ecn_fallback_synloss;
330 u_int32_t tcps_ecn_fallback_reorder;
331 u_int32_t tcps_ecn_fallback_ce;
3e170ce0
A
332
333 /* TFO-related statistics */
334 u_int32_t tcps_tfo_syn_data_rcv;
335 u_int32_t tcps_tfo_cookie_req_rcv;
336 u_int32_t tcps_tfo_cookie_sent;
337 u_int32_t tcps_tfo_cookie_invalid;
338 u_int32_t tcps_tfo_cookie_req;
339 u_int32_t tcps_tfo_cookie_rcv;
340 u_int32_t tcps_tfo_syn_data_sent;
341 u_int32_t tcps_tfo_syn_data_acked;
342 u_int32_t tcps_tfo_syn_loss;
343 u_int32_t tcps_tfo_blackhole;
39037602
A
344 u_int32_t tcps_tfo_cookie_wrong;
345 u_int32_t tcps_tfo_no_cookie_rcv;
346 u_int32_t tcps_tfo_heuristics_disable;
347 u_int32_t tcps_tfo_sndblackhole;
5ba3f43e
A
348
349 /* MPTCP-related statistics */
350 u_int32_t tcps_mptcp_handover_attempt;
351 u_int32_t tcps_mptcp_interactive_attempt;
352 u_int32_t tcps_mptcp_aggregate_attempt;
353 u_int32_t tcps_mptcp_fp_handover_attempt;
354 u_int32_t tcps_mptcp_fp_interactive_attempt;
355 u_int32_t tcps_mptcp_fp_aggregate_attempt;
356 u_int32_t tcps_mptcp_heuristic_fallback;
357 u_int32_t tcps_mptcp_fp_heuristic_fallback;
358 u_int32_t tcps_mptcp_handover_success_wifi;
359 u_int32_t tcps_mptcp_handover_success_cell;
360 u_int32_t tcps_mptcp_interactive_success;
361 u_int32_t tcps_mptcp_aggregate_success;
362 u_int32_t tcps_mptcp_fp_handover_success_wifi;
363 u_int32_t tcps_mptcp_fp_handover_success_cell;
364 u_int32_t tcps_mptcp_fp_interactive_success;
365 u_int32_t tcps_mptcp_fp_aggregate_success;
366 u_int32_t tcps_mptcp_handover_cell_from_wifi;
367 u_int32_t tcps_mptcp_handover_wifi_from_cell;
368 u_int32_t tcps_mptcp_interactive_cell_from_wifi;
369 u_int64_t tcps_mptcp_handover_cell_bytes;
370 u_int64_t tcps_mptcp_interactive_cell_bytes;
371 u_int64_t tcps_mptcp_aggregate_cell_bytes;
372 u_int64_t tcps_mptcp_handover_all_bytes;
373 u_int64_t tcps_mptcp_interactive_all_bytes;
374 u_int64_t tcps_mptcp_aggregate_all_bytes;
375 u_int32_t tcps_mptcp_back_to_wifi;
376 u_int32_t tcps_mptcp_wifi_proxy;
377 u_int32_t tcps_mptcp_cell_proxy;
3e170ce0
A
378};
379
fe8ab488 380
6d2010ae
A
381/* Returns true if the timer is on the timer list */
382#define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
383
8a3053a0 384/* Run the TCP timerlist atleast once every hour */
fe8ab488 385#define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
6d2010ae 386
2d21ac55 387
fe8ab488 388static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
39236c6e 389static boolean_t tcp_garbage_collect(struct inpcb *, int);
1c79356b 390
39037602
A
391#define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
392
393#define VERIFY_NEXT_LINK(elm,field) do { \
394 if (LIST_NEXT((elm),field) != NULL && \
395 LIST_NEXT((elm),field)->field.le_prev != \
396 &((elm)->field.le_next)) \
397 panic("Bad link elm %p next->prev != elm", (elm)); \
398} while(0)
399
400#define VERIFY_PREV_LINK(elm,field) do { \
401 if (*(elm)->field.le_prev != (elm)) \
402 panic("Bad link elm %p prev->next != elm", (elm)); \
403} while(0)
404
405#define TCP_SET_TIMER_MODE(mode, i) do { \
406 if (IS_TIMER_HZ_10MS(i)) \
407 (mode) |= TCP_TIMERLIST_10MS_MODE; \
408 else if (IS_TIMER_HZ_100MS(i)) \
409 (mode) |= TCP_TIMERLIST_100MS_MODE; \
410 else \
411 (mode) |= TCP_TIMERLIST_500MS_MODE; \
412} while(0)
413
414#if (DEVELOPMENT || DEBUG)
415SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
416 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
417 "Medium MSS based on recommendation in link status report");
418SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
419 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
420 "Low MSS based on recommendation in link status report");
421
422static int32_t tcp_change_mss_recommended = 0;
423static int
424sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
425{
426#pragma unused(oidp, arg1, arg2)
427 int i, err = 0, changed = 0;
428 struct ifnet *ifp;
429 struct if_link_status ifsr;
430 struct if_cellular_status_v1 *new_cell_sr;
431 err = sysctl_io_number(req, tcp_change_mss_recommended,
432 sizeof (int32_t), &i, &changed);
433 if (changed) {
434 ifnet_head_lock_shared();
435 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
436 if (IFNET_IS_CELLULAR(ifp)) {
437 bzero(&ifsr, sizeof (ifsr));
438 new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
439 ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
440 ifsr.ifsr_len = sizeof(*new_cell_sr);
441
442 /* Set MSS recommended */
443 new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
444 new_cell_sr->mss_recommended = i;
445 err = ifnet_link_status_report(ifp, new_cell_sr, sizeof (new_cell_sr));
446 if (err == 0) {
447 tcp_change_mss_recommended = i;
448 } else {
449 break;
450 }
451 }
452 }
453 ifnet_head_done();
454 }
455 return (err);
456}
457
458SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
459 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
460 0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
461
462SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
463 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
464 "Report stats interval");
465#endif /* (DEVELOPMENT || DEBUG) */
466
467/*
468 * Macro to compare two timers. If there is a reset of the sign bit,
469 * it is safe to assume that the timer has wrapped around. By doing
470 * signed comparision, we take care of wrap around such that the value
471 * with the sign bit reset is actually ahead of the other.
472 */
473inline int32_t
474timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) {
475 return (int32_t)((t1 + toff1) - (t2 + toff2));
476};
477
39236c6e
A
478/*
479 * Add to tcp timewait list, delay is given in milliseconds.
480 */
481static void
482add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
1c79356b 483{
39236c6e
A
484 struct inpcbinfo *pcbinfo = &tcbinfo;
485 struct inpcb *inp = tp->t_inpcb;
6d2010ae 486 uint32_t timer;
1c79356b 487
39236c6e 488 /* pcb list should be locked when we get here */
5ba3f43e 489 LCK_RW_ASSERT(pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
1c79356b 490
39236c6e
A
491 /* We may get here multiple times, so check */
492 if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
493 pcbinfo->ipi_twcount++;
494 inp->inp_flags2 |= INP2_TIMEWAIT;
39037602 495
39236c6e
A
496 /* Remove from global inp list */
497 LIST_REMOVE(inp, inp_list);
498 } else {
499 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
500 }
2d21ac55 501
39236c6e
A
502 /* Compute the time at which this socket can be closed */
503 timer = tcp_now + delay;
39037602 504
39236c6e 505 /* We will use the TCPT_2MSL timer for tracking this delay */
2d21ac55 506
39236c6e
A
507 if (TIMER_IS_ON_LIST(tp))
508 tcp_remove_timer(tp);
509 tp->t_timer[TCPT_2MSL] = timer;
1c79356b 510
39236c6e 511 TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
1c79356b
A
512}
513
39236c6e
A
514void
515add_to_time_wait(struct tcpcb *tp, uint32_t delay)
91447636 516{
39236c6e 517 struct inpcbinfo *pcbinfo = &tcbinfo;
fe8ab488
A
518 if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP)
519 socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
39236c6e 520
3e170ce0
A
521 /* 19182803: Notify nstat that connection is closing before waiting. */
522 nstat_pcb_detach(tp->t_inpcb);
523
39236c6e 524 if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) {
5ba3f43e 525 socket_unlock(tp->t_inpcb->inp_socket, 0);
39236c6e 526 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
5ba3f43e 527 socket_lock(tp->t_inpcb->inp_socket, 0);
91447636 528 }
6d2010ae 529 add_to_time_wait_locked(tp, delay);
39236c6e
A
530 lck_rw_done(pcbinfo->ipi_lock);
531
532 inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
91447636 533}
1c79356b 534
39236c6e
A
535/* If this is on time wait queue, remove it. */
536void
537tcp_remove_from_time_wait(struct inpcb *inp)
538{
539 struct tcpcb *tp = intotcpcb(inp);
540 if (inp->inp_flags2 & INP2_TIMEWAIT)
541 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
542}
543
544static boolean_t
b0d623f7 545tcp_garbage_collect(struct inpcb *inp, int istimewait)
2d21ac55 546{
39236c6e 547 boolean_t active = FALSE;
5ba3f43e 548 struct socket *so, *mp_so = NULL;
2d21ac55
A
549 struct tcpcb *tp;
550
b0d623f7
A
551 so = inp->inp_socket;
552 tp = intotcpcb(inp);
2d21ac55 553
5ba3f43e
A
554 if (so->so_flags & SOF_MP_SUBFLOW) {
555 mp_so = mptetoso(tptomptp(tp)->mpt_mpte);
556 if (!socket_try_lock(mp_so)) {
557 mp_so = NULL;
558 active = TRUE;
559 goto out;
560 }
561 mp_so->so_usecount++;
562 }
563
b0d623f7
A
564 /*
565 * Skip if still in use or busy; it would have been more efficient
566 * if we were to test so_usecount against 0, but this isn't possible
567 * due to the current implementation of tcp_dropdropablreq() where
568 * overflow sockets that are eligible for garbage collection have
569 * their usecounts set to 1.
570 */
5ba3f43e
A
571 if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) {
572 active = TRUE;
573 goto out;
574 }
2d21ac55 575
b0d623f7
A
576 /* Check again under the lock */
577 if (so->so_usecount > 1) {
39236c6e
A
578 if (inp->inp_wantcnt == WNT_STOPUSING)
579 active = TRUE;
6d2010ae 580 lck_mtx_unlock(&inp->inpcb_mtx);
5ba3f43e 581 goto out;
39236c6e
A
582 }
583
5ba3f43e
A
584 if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
585 tp->t_state != TCPS_CLOSED) {
39236c6e
A
586 /* Become a regular mutex */
587 lck_mtx_convert_spin(&inp->inpcb_mtx);
588 tcp_close(tp);
b0d623f7 589 }
2d21ac55 590
b0d623f7
A
591 /*
592 * Overflowed socket dropped from the listening queue? Do this
593 * only if we are called to clean up the time wait slots, since
594 * tcp_dropdropablreq() considers a socket to have been fully
595 * dropped after add_to_time_wait() is finished.
39236c6e
A
596 * Also handle the case of connections getting closed by the peer
597 * while in the queue as seen with rdar://6422317
598 *
b0d623f7 599 */
39236c6e 600 if (so->so_usecount == 1 &&
b0d623f7 601 ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
39236c6e
A
602 ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
603 (so->so_head != NULL) &&
604 ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) ==
605 (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) {
b0d623f7
A
606
607 if (inp->inp_state != INPCB_STATE_DEAD) {
608 /* Become a regular mutex */
6d2010ae 609 lck_mtx_convert_spin(&inp->inpcb_mtx);
b0d623f7 610#if INET6
39236c6e 611 if (SOCK_CHECK_DOM(so, PF_INET6))
b0d623f7
A
612 in6_pcbdetach(inp);
613 else
614#endif /* INET6 */
39236c6e 615 in_pcbdetach(inp);
2d21ac55 616 }
d190cdc3 617 VERIFY(so->so_usecount > 0);
b0d623f7 618 so->so_usecount--;
39236c6e
A
619 if (inp->inp_wantcnt == WNT_STOPUSING)
620 active = TRUE;
6d2010ae 621 lck_mtx_unlock(&inp->inpcb_mtx);
5ba3f43e 622 goto out;
b0d623f7 623 } else if (inp->inp_wantcnt != WNT_STOPUSING) {
6d2010ae 624 lck_mtx_unlock(&inp->inpcb_mtx);
5ba3f43e
A
625 active = FALSE;
626 goto out;
b0d623f7 627 }
2d21ac55 628
b0d623f7 629 /*
39037602
A
630 * We get here because the PCB is no longer searchable
631 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
632 * (usecount is 0). This covers all cases, including overflow
633 * sockets and those that are considered as "embryonic",
634 * i.e. created by sonewconn() in TCP input path, and have
39236c6e 635 * not yet been committed. For the former, we reduce the usecount
39037602 636 * to 0 as done by the code above. For the latter, the usecount
39236c6e 637 * would have reduced to 0 as part calling soabort() when the
b0d623f7
A
638 * socket is dropped at the end of tcp_input().
639 */
640 if (so->so_usecount == 0) {
6d2010ae
A
641 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
642 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
b0d623f7 643 /* Become a regular mutex */
6d2010ae 644 lck_mtx_convert_spin(&inp->inpcb_mtx);
39236c6e
A
645
646 /*
39037602 647 * If this tp still happens to be on the timer list,
ebb1b9f4
A
648 * take it out
649 */
650 if (TIMER_IS_ON_LIST(tp)) {
651 tcp_remove_timer(tp);
652 }
653
b0d623f7
A
654 if (inp->inp_state != INPCB_STATE_DEAD) {
655#if INET6
39236c6e 656 if (SOCK_CHECK_DOM(so, PF_INET6))
b0d623f7
A
657 in6_pcbdetach(inp);
658 else
659#endif /* INET6 */
39236c6e 660 in_pcbdetach(inp);
2d21ac55 661 }
5ba3f43e
A
662
663 if (mp_so) {
664 mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub);
665
666 /* so is now unlinked from mp_so - let's drop the lock */
667 socket_unlock(mp_so, 1);
668 mp_so = NULL;
669 }
670
b0d623f7 671 in_pcbdispose(inp);
5ba3f43e
A
672 active = FALSE;
673 goto out;
b0d623f7 674 }
39236c6e
A
675
676 lck_mtx_unlock(&inp->inpcb_mtx);
5ba3f43e
A
677 active = TRUE;
678
679out:
680 if (mp_so)
681 socket_unlock(mp_so, 1);
682
683 return (active);
2d21ac55
A
684}
685
39236c6e
A
686/*
687 * TCP garbage collector callback (inpcb_timer_func_t).
688 *
689 * Returns the number of pcbs that will need to be gc-ed soon,
690 * returnining > 0 will keep timer active.
691 */
1c79356b 692void
39236c6e 693tcp_gc(struct inpcbinfo *ipi)
1c79356b 694{
4a3eedf9 695 struct inpcb *inp, *nxt;
39236c6e 696 struct tcpcb *tw_tp, *tw_ntp;
1c79356b
A
697#if TCPDEBUG
698 int ostate;
699#endif
b0d623f7 700#if KDEBUG
2d21ac55 701 static int tws_checked = 0;
b0d623f7 702#endif
2d21ac55 703
39236c6e 704 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
1c79356b 705
39236c6e
A
706 /*
707 * Update tcp_now here as it may get used while
708 * processing the slow timer.
709 */
6d2010ae 710 calculate_tcp_clock();
8ad349bb 711
39236c6e
A
712 /*
713 * Garbage collect socket/tcpcb: We need to acquire the list lock
6d2010ae 714 * exclusively to do this
2d21ac55
A
715 */
716
39236c6e
A
717 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
718 /* don't sweat it this time; cleanup was done last time */
719 if (tcp_gc_done == TRUE) {
2d21ac55 720 tcp_gc_done = FALSE;
39236c6e
A
721 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
722 tws_checked, cur_tw_slot, 0, 0, 0);
723 /* Lock upgrade failed, give up this round */
724 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
725 return;
2d21ac55 726 }
39236c6e
A
727 /* Upgrade failed, lost lock now take it again exclusive */
728 lck_rw_lock_exclusive(ipi->ipi_lock);
2d21ac55
A
729 }
730 tcp_gc_done = TRUE;
1c79356b 731
39236c6e
A
732 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
733 if (tcp_garbage_collect(inp, 0))
734 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
735 }
2d21ac55 736
39236c6e
A
737 /* Now cleanup the time wait ones */
738 TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
739 /*
39037602 740 * We check the timestamp here without holding the
39236c6e
A
741 * socket lock for better performance. If there are
742 * any pcbs in time-wait, the timer will get rescheduled.
743 * Hence some error in this check can be tolerated.
15129b1c
A
744 *
745 * Sometimes a socket on time-wait queue can be closed if
746 * 2MSL timer expired but the application still has a
39037602 747 * usecount on it.
39236c6e 748 */
39037602 749 if (tw_tp->t_state == TCPS_CLOSED ||
15129b1c 750 TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
39236c6e
A
751 if (tcp_garbage_collect(tw_tp->t_inpcb, 1))
752 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
2d21ac55 753 }
91447636
A
754 }
755
39236c6e
A
756 /* take into account pcbs that are still in time_wait_slots */
757 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount);
91447636 758
39236c6e 759 lck_rw_done(ipi->ipi_lock);
1c79356b 760
39236c6e
A
761 /* Clean up the socache while we are here */
762 if (so_cache_timer())
763 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
91447636 764
39236c6e
A
765 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
766 cur_tw_slot, 0, 0, 0);
767
768 return;
1c79356b
A
769}
770
771/*
772 * Cancel all timers for TCP tp.
773 */
774void
39037602 775tcp_canceltimers(struct tcpcb *tp)
1c79356b 776{
39037602 777 int i;
1c79356b 778
6d2010ae 779 tcp_remove_timer(tp);
1c79356b
A
780 for (i = 0; i < TCPT_NTIMERS; i++)
781 tp->t_timer[i] = 0;
6d2010ae
A
782 tp->tentry.timer_start = tcp_now;
783 tp->tentry.index = TCPT_NONE;
1c79356b
A
784}
785
9bccf70c
A
786int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
787 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
788
1c79356b
A
789int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
790 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
791
792static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
793
39037602
A
794void
795tcp_rexmt_save_state(struct tcpcb *tp)
39236c6e
A
796{
797 u_int32_t fsize;
798 if (TSTMP_SUPPORTED(tp)) {
799 /*
39037602 800 * Since timestamps are supported on the connection,
39236c6e
A
801 * we can do recovery as described in rfc 4015.
802 */
803 fsize = tp->snd_max - tp->snd_una;
804 tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
805 tp->snd_recover_prev = tp->snd_recover;
806 } else {
807 /*
808 * Timestamp option is not supported on this connection.
809 * Record ssthresh and cwnd so they can
810 * be recovered if this turns out to be a "bad" retransmit.
39037602 811 * A retransmit is considered "bad" if an ACK for this
39236c6e 812 * segment is received within RTT/2 interval; the assumption
39037602 813 * here is that the ACK was already in flight. See
39236c6e
A
814 * "On Estimating End-to-End Network Path Properties" by
815 * Allman and Paxson for more details.
816 */
817 tp->snd_cwnd_prev = tp->snd_cwnd;
818 tp->snd_ssthresh_prev = tp->snd_ssthresh;
819 tp->snd_recover_prev = tp->snd_recover;
820 if (IN_FASTRECOVERY(tp))
821 tp->t_flags |= TF_WASFRECOVERY;
822 else
823 tp->t_flags &= ~TF_WASFRECOVERY;
824 }
825 tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
826 tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
827 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
828}
829
fe8ab488
A
830/*
831 * Revert to the older segment size if there is an indication that PMTU
832 * blackhole detection was not needed.
833 */
39037602
A
834void
835tcp_pmtud_revert_segment_size(struct tcpcb *tp)
fe8ab488
A
836{
837 int32_t optlen;
838
839 VERIFY(tp->t_pmtud_saved_maxopd > 0);
39037602
A
840 tp->t_flags |= TF_PMTUD;
841 tp->t_flags &= ~TF_BLACKHOLE;
fe8ab488
A
842 optlen = tp->t_maxopd - tp->t_maxseg;
843 tp->t_maxopd = tp->t_pmtud_saved_maxopd;
844 tp->t_maxseg = tp->t_maxopd - optlen;
845 /*
39037602 846 * Reset the slow-start flight size as it
fe8ab488
A
847 * may depend on the new MSS
848 */
849 if (CC_ALGO(tp)->cwnd_init != NULL)
850 CC_ALGO(tp)->cwnd_init(tp);
851 tp->t_pmtud_start_ts = 0;
852 tcpstat.tcps_pmtudbh_reverted++;
5ba3f43e
A
853
854 /* change MSS according to recommendation, if there was one */
855 tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL);
fe8ab488
A
856}
857
1c79356b
A
858/*
859 * TCP timer processing.
860 */
861struct tcpcb *
39037602 862tcp_timers(struct tcpcb *tp, int timer)
1c79356b 863{
fe8ab488 864 int32_t rexmt, optlen = 0, idle_time = 0;
316670eb 865 struct socket *so;
9bccf70c 866 struct tcptemp *t_template;
55e303ae
A
867#if TCPDEBUG
868 int ostate;
869#endif
870
1c79356b
A
871#if INET6
872 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
873#endif /* INET6 */
3e170ce0
A
874 u_int64_t accsleep_ms;
875 u_int32_t last_sleep_ms = 0;
1c79356b 876
316670eb 877 so = tp->t_inpcb->inp_socket;
6d2010ae 878 idle_time = tcp_now - tp->t_rcvtime;
9bccf70c 879
1c79356b
A
880 switch (timer) {
881
882 /*
883 * 2 MSL timeout in shutdown went off. If we're closed but
884 * still waiting for peer to close and connection has been idle
2d21ac55
A
885 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
886 * delete connection control block.
887 * Otherwise, (this case shouldn't happen) check again in a bit
888 * we keep the socket in the main list in that case.
1c79356b
A
889 */
890 case TCPT_2MSL:
8ad349bb 891 tcp_free_sackholes(tp);
1c79356b 892 if (tp->t_state != TCPS_TIME_WAIT &&
2d21ac55 893 tp->t_state != TCPS_FIN_WAIT_2 &&
39236c6e 894 ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
39037602 895 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
39236c6e
A
896 (u_int32_t)TCP_CONN_KEEPINTVL(tp));
897 } else {
1c79356b 898 tp = tcp_close(tp);
91447636
A
899 return(tp);
900 }
1c79356b
A
901 break;
902
903 /*
904 * Retransmission timer went off. Message has not
905 * been acked within retransmit interval. Back off
906 * to a longer retransmit interval and retransmit one segment.
907 */
908 case TCPT_REXMT:
39037602
A
909 absolutetime_to_nanoseconds(mach_absolutetime_asleep,
910 &accsleep_ms);
911 accsleep_ms = accsleep_ms / 1000000UL;
3e170ce0
A
912 if (accsleep_ms > tp->t_accsleep_ms)
913 last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
fe8ab488
A
914 /*
915 * Drop a connection in the retransmit timer
916 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
917 * times
918 * 2. If the time spent in this retransmission episode is
919 * more than the time limit set with TCP_RXT_CONNDROPTIME
920 * socket option
921 * 3. If TCP_RXT_FINDROP socket option was set and
922 * we have already retransmitted the FIN 3 times without
923 * receiving an ack
6d2010ae
A
924 */
925 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
3e170ce0
A
926 (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
927 (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
928 ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
929 (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
930 (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
5ba3f43e
A
931 if (tp->t_state == TCPS_ESTABLISHED &&
932 tp->t_rxt_minimum_timeout > 0) {
933 /*
934 * Avoid dropping a connection if minimum
935 * timeout is set and that time did not
936 * pass. We will retry sending
937 * retransmissions at the maximum interval
938 */
939 if (TSTMP_LT(tcp_now, (tp->t_rxtstart +
940 tp->t_rxt_minimum_timeout))) {
941 tp->t_rxtshift = TCP_MAXRXTSHIFT - 1;
942 goto retransmit_packet;
943 }
944 }
6d2010ae
A
945 if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
946 tcpstat.tcps_rxtfindrop++;
3e170ce0
A
947 } else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
948 tcpstat.tcps_drop_after_sleep++;
6d2010ae
A
949 } else {
950 tcpstat.tcps_timeoutdrop++;
951 }
4bd07ac2
A
952 if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
953 if (TCP_ECN_ENABLED(tp)) {
954 INP_INC_IFNET_STAT(tp->t_inpcb,
955 ecn_on.rxmit_drop);
956 } else {
957 INP_INC_IFNET_STAT(tp->t_inpcb,
958 ecn_off.rxmit_drop);
959 }
960 }
1c79356b 961 tp->t_rxtshift = TCP_MAXRXTSHIFT;
39037602
A
962 postevent(so, 0, EV_TIMEOUT);
963 soevent(so,
316670eb 964 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
39037602
A
965
966 if (TCP_ECN_ENABLED(tp) &&
967 tp->t_state == TCPS_ESTABLISHED)
968 tcp_heuristic_ecn_droprxmt(tp);
969
1c79356b
A
970 tp = tcp_drop(tp, tp->t_softerror ?
971 tp->t_softerror : ETIMEDOUT);
316670eb 972
1c79356b
A
973 break;
974 }
5ba3f43e 975retransmit_packet:
39236c6e 976 tcpstat.tcps_rexmttimeo++;
3e170ce0 977 tp->t_accsleep_ms = accsleep_ms;
6d2010ae 978
39037602 979 if (tp->t_rxtshift == 1 &&
39236c6e
A
980 tp->t_state == TCPS_ESTABLISHED) {
981 /* Set the time at which retransmission started. */
982 tp->t_rxtstart = tcp_now;
983
39037602 984 /*
39236c6e
A
985 * if this is the first retransmit timeout, save
986 * the state so that we can recover if the timeout
987 * is spurious.
39037602 988 */
39236c6e
A
989 tcp_rexmt_save_state(tp);
990 }
991#if MPTCP
fe8ab488 992 if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
39236c6e
A
993 (tp->t_state == TCPS_ESTABLISHED) &&
994 (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
995 mptcp_act_on_txfail(so);
996
997 }
5ba3f43e
A
998
999 if (so->so_flags & SOF_MP_SUBFLOW) {
1000 struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1001
1002 mptcp_check_subflows_and_add(mpte);
1003 }
39236c6e
A
1004#endif /* MPTCP */
1005
1006 if (tp->t_adaptive_wtimo > 0 &&
1007 tp->t_rxtshift > tp->t_adaptive_wtimo &&
1008 TCPS_HAVEESTABLISHED(tp->t_state)) {
1009 /* Send an event to the application */
1010 soevent(so,
1011 (SO_FILT_HINT_LOCKED|
1012 SO_FILT_HINT_ADAPTIVE_WTIMO));
9bccf70c 1013 }
316670eb 1014
fe8ab488
A
1015 /*
1016 * If this is a retransmit timeout after PTO, the PTO
1017 * was not effective
1018 */
1019 if (tp->t_flagsext & TF_SENT_TLPROBE) {
1020 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1021 tcpstat.tcps_rto_after_pto++;
1022 }
1023
1024 if (tp->t_flagsext & TF_DELAY_RECOVERY) {
1025 /*
1026 * Retransmit timer fired before entering recovery
1027 * on a connection with packet re-ordering. This
1028 * suggests that the reordering metrics computed
1029 * are not accurate.
1030 */
1031 tp->t_reorderwin = 0;
1032 tp->t_timer[TCPT_DELAYFR] = 0;
1033 tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
1034 }
1035
3e170ce0
A
1036 if (tp->t_state == TCPS_SYN_RECEIVED)
1037 tcp_disable_tfo(tp);
1038
5ba3f43e
A
1039 if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1040 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
3e170ce0
A
1041 !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
1042 ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
1043 tp->t_rxtshift > 2)) {
1044 /*
1045 * For regular retransmissions, a first one is being
1046 * done for tail-loss probe.
1047 * Thus, if rxtshift > 1, this means we have sent the segment
1048 * a total of 3 times.
1049 *
1050 * If we are in SYN-SENT state, then there is no tail-loss
1051 * probe thus we have to let rxtshift go up to 3.
1052 */
1053 tcp_heuristic_tfo_middlebox(tp);
1054
1055 so->so_error = ENODATA;
1056 sorwakeup(so);
1057 sowwakeup(so);
39037602
A
1058
1059 tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
1060 tcpstat.tcps_tfo_sndblackhole++;
3e170ce0
A
1061 }
1062
5ba3f43e
A
1063 if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1064 (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
1065 tp->t_rxtshift > 1) {
1066 if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) {
1067 tcp_heuristic_tfo_middlebox(tp);
1068
1069 so->so_error = ENODATA;
1070 sorwakeup(so);
1071 sowwakeup(so);
1072 }
1073 }
1074
39236c6e 1075 if (tp->t_state == TCPS_SYN_SENT) {
9bccf70c 1076 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
39236c6e 1077 tp->t_stat.synrxtshift = tp->t_rxtshift;
3e170ce0
A
1078
1079 /* When retransmitting, disable TFO */
5ba3f43e
A
1080 if (tfo_enabled(tp) &&
1081 (!(so->so_flags1 & SOF1_DATA_AUTHENTICATED) ||
1082 (tp->t_flagsext & TF_FASTOPEN_HEUR))) {
3e170ce0
A
1083 tp->t_flagsext &= ~TF_FASTOPEN;
1084 tp->t_tfo_flags |= TFO_F_SYN_LOSS;
3e170ce0 1085 }
fe8ab488 1086 } else {
9bccf70c 1087 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
fe8ab488
A
1088 }
1089
490019cf 1090 TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
6d2010ae
A
1091 TCP_ADD_REXMTSLOP(tp));
1092 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
9bccf70c 1093
316670eb
A
1094 if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb))
1095 goto fc_output;
1096
1097 tcp_free_sackholes(tp);
9bccf70c 1098 /*
fe8ab488 1099 * Check for potential Path MTU Discovery Black Hole
b0d623f7 1100 */
fe8ab488
A
1101 if (tcp_pmtud_black_hole_detect &&
1102 !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
1103 (tp->t_state == TCPS_ESTABLISHED)) {
3e170ce0
A
1104 if ((tp->t_flags & TF_PMTUD) &&
1105 ((tp->t_flags & TF_MAXSEGSNT)
1106 || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) &&
1107 tp->t_rxtshift == 2) {
39037602 1108 /*
b0d623f7
A
1109 * Enter Path MTU Black-hole Detection mechanism:
1110 * - Disable Path MTU Discovery (IP "DF" bit).
fe8ab488
A
1111 * - Reduce MTU to lower value than what we
1112 * negotiated with the peer.
b0d623f7 1113 */
39236c6e
A
1114 /* Disable Path MTU Discovery for now */
1115 tp->t_flags &= ~TF_PMTUD;
1116 /* Record that we may have found a black hole */
1117 tp->t_flags |= TF_BLACKHOLE;
b0d623f7 1118 optlen = tp->t_maxopd - tp->t_maxseg;
39236c6e
A
1119 /* Keep track of previous MSS */
1120 tp->t_pmtud_saved_maxopd = tp->t_maxopd;
fe8ab488
A
1121 tp->t_pmtud_start_ts = tcp_now;
1122 if (tp->t_pmtud_start_ts == 0)
1123 tp->t_pmtud_start_ts++;
39236c6e
A
1124 /* Reduce the MSS to intermediary value */
1125 if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
1126 tp->t_maxopd = tcp_pmtud_black_hole_mss;
1127 } else {
b0d623f7
A
1128 tp->t_maxopd = /* use the default MSS */
1129#if INET6
1130 isipv6 ? tcp_v6mssdflt :
1131#endif /* INET6 */
1132 tcp_mssdflt;
1133 }
1134 tp->t_maxseg = tp->t_maxopd - optlen;
6d2010ae
A
1135
1136 /*
39037602 1137 * Reset the slow-start flight size
39236c6e 1138 * as it may depend on the new MSS
6d2010ae
A
1139 */
1140 if (CC_ALGO(tp)->cwnd_init != NULL)
1141 CC_ALGO(tp)->cwnd_init(tp);
39037602 1142 tp->snd_cwnd = tp->t_maxseg;
b0d623f7
A
1143 }
1144 /*
fe8ab488
A
1145 * If further retransmissions are still
1146 * unsuccessful with a lowered MTU, maybe this
1147 * isn't a Black Hole and we restore the previous
1148 * MSS and blackhole detection flags.
b0d623f7
A
1149 */
1150 else {
39037602 1151
fe8ab488
A
1152 if ((tp->t_flags & TF_BLACKHOLE) &&
1153 (tp->t_rxtshift > 4)) {
1154 tcp_pmtud_revert_segment_size(tp);
39037602 1155 tp->snd_cwnd = tp->t_maxseg;
b0d623f7
A
1156 }
1157 }
1158 }
1159
1160
1161 /*
fe8ab488
A
1162 * Disable rfc1323 and rfc1644 if we haven't got any
1163 * response to our SYN (after we reach the threshold)
1164 * to work-around some broken terminal servers (most of
1165 * which have hopefully been retired) that have bad VJ
1166 * header compression code which trashes TCP segments
1167 * containing unknown-to-them TCP options.
39236c6e 1168 * Do this only on non-local connections.
9bccf70c 1169 */
39236c6e 1170 if (tp->t_state == TCPS_SYN_SENT &&
3e170ce0 1171 tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres)
b0d623f7 1172 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
316670eb 1173
1c79356b
A
1174 /*
1175 * If losing, let the lower level know and try for
1176 * a better route. Also, if we backed off this far,
1177 * our srtt estimate is probably bogus. Clobber it
1178 * so we'll take the next rtt measurement as our srtt;
1179 * move the current srtt into rttvar to keep the current
1180 * retransmit times until then.
1181 */
1182 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
1183#if INET6
1184 if (isipv6)
1185 in6_losing(tp->t_inpcb);
1186 else
1187#endif /* INET6 */
1188 in_losing(tp->t_inpcb);
1189 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
1190 tp->t_srtt = 0;
1191 }
1192 tp->snd_nxt = tp->snd_una;
9bccf70c
A
1193 /*
1194 * Note: We overload snd_recover to function also as the
1195 * snd_last variable described in RFC 2582
1196 */
1197 tp->snd_recover = tp->snd_max;
1c79356b
A
1198 /*
1199 * Force a segment to be sent.
1200 */
1201 tp->t_flags |= TF_ACKNOW;
fe8ab488
A
1202
1203 /* If timing a segment in this window, stop the timer */
9bccf70c 1204 tp->t_rtttime = 0;
6d2010ae 1205
fe8ab488
A
1206 if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1)
1207 tcpstat.tcps_tailloss_rto++;
1208
39236c6e 1209
fe8ab488
A
1210 /*
1211 * RFC 5681 says: when a TCP sender detects segment loss
39236c6e
A
1212 * using retransmit timer and the given segment has already
1213 * been retransmitted by way of the retransmission timer at
1214 * least once, the value of ssthresh is held constant
1215 */
39037602 1216 if (tp->t_rxtshift == 1 &&
3e170ce0 1217 CC_ALGO(tp)->after_timeout != NULL) {
6d2010ae 1218 CC_ALGO(tp)->after_timeout(tp);
3e170ce0
A
1219 /*
1220 * CWR notifications are to be sent on new data
1221 * right after Fast Retransmits and ECE
1222 * notification receipts.
1223 */
1224 if (TCP_ECN_ENABLED(tp))
1225 tp->ecn_flags |= TE_SENDCWR;
1226 }
6d2010ae 1227
fe8ab488 1228 EXIT_FASTRECOVERY(tp);
6d2010ae 1229
3e170ce0
A
1230 /* Exit cwnd non validated phase */
1231 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1232
1233
316670eb 1234fc_output:
fe8ab488 1235 tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
6d2010ae 1236
1c79356b
A
1237 (void) tcp_output(tp);
1238 break;
1239
1240 /*
1241 * Persistance timer into zero window.
1242 * Force a byte to be output, if possible.
1243 */
1244 case TCPT_PERSIST:
1245 tcpstat.tcps_persisttimeo++;
1246 /*
1247 * Hack: if the peer is dead/unreachable, we do not
1248 * time out if the window is closed. After a full
1249 * backoff, drop the connection if the idle time
1250 * (no responses to probes) reaches the maximum
1251 * backoff that we would use if retransmitting.
39037602
A
1252 *
1253 * Drop the connection if we reached the maximum allowed time for
1254 * Zero Window Probes without a non-zero update from the peer.
6d2010ae 1255 * See rdar://5805356
1c79356b 1256 */
6d2010ae
A
1257 if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1258 (idle_time >= tcp_maxpersistidle ||
39037602
A
1259 idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1260 ((tp->t_persist_stop != 0) &&
316670eb 1261 TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1c79356b 1262 tcpstat.tcps_persistdrop++;
316670eb
A
1263 postevent(so, 0, EV_TIMEOUT);
1264 soevent(so,
1265 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
1c79356b 1266 tp = tcp_drop(tp, ETIMEDOUT);
1c79356b
A
1267 break;
1268 }
1269 tcp_setpersist(tp);
fe8ab488 1270 tp->t_flagsext |= TF_FORCE;
1c79356b 1271 (void) tcp_output(tp);
fe8ab488 1272 tp->t_flagsext &= ~TF_FORCE;
1c79356b
A
1273 break;
1274
1275 /*
1276 * Keep-alive timer went off; send something
1277 * or drop connection if idle for too long.
1278 */
1279 case TCPT_KEEP:
1280 tcpstat.tcps_keeptimeo++;
39236c6e
A
1281#if MPTCP
1282 /*
1283 * Regular TCP connections do not send keepalives after closing
1284 * MPTCP must not also, after sending Data FINs.
1285 */
5ba3f43e 1286 struct mptcb *mp_tp = tptomptp(tp);
fe8ab488
A
1287 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1288 (tp->t_state > TCPS_ESTABLISHED)) {
39236c6e
A
1289 goto dropit;
1290 } else if (mp_tp != NULL) {
1291 if ((mptcp_ok_to_keepalive(mp_tp) == 0))
1292 goto dropit;
1293 }
1294#endif /* MPTCP */
1c79356b
A
1295 if (tp->t_state < TCPS_ESTABLISHED)
1296 goto dropit;
1297 if ((always_keepalive ||
39236c6e 1298 (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
3e170ce0
A
1299 (tp->t_flagsext & TF_DETECT_READSTALL) ||
1300 (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
2d21ac55 1301 (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
39236c6e 1302 if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp))
1c79356b
A
1303 goto dropit;
1304 /*
1305 * Send a packet designed to force a response
1306 * if the peer is up and reachable:
1307 * either an ACK if the connection is still alive,
1308 * or an RST if the peer has closed the connection
1309 * due to timeout or reboot.
1310 * Using sequence number tp->snd_una-1
1311 * causes the transmitted zero-length segment
1312 * to lie outside the receive window;
1313 * by the protocol spec, this requires the
1314 * correspondent TCP to respond.
1315 */
1316 tcpstat.tcps_keepprobe++;
9bccf70c
A
1317 t_template = tcp_maketemplate(tp);
1318 if (t_template) {
fe8ab488
A
1319 struct inpcb *inp = tp->t_inpcb;
1320 struct tcp_respond_args tra;
c910b4d9 1321
fe8ab488
A
1322 bzero(&tra, sizeof(tra));
1323 tra.nocell = INP_NO_CELLULAR(inp);
1324 tra.noexpensive = INP_NO_EXPENSIVE(inp);
1325 tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp);
39037602 1326 tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp);
c910b4d9 1327 if (tp->t_inpcb->inp_flags & INP_BOUND_IF)
fe8ab488 1328 tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
c910b4d9 1329 else
fe8ab488 1330 tra.ifscope = IFSCOPE_NONE;
9bccf70c
A
1331 tcp_respond(tp, t_template->tt_ipgen,
1332 &t_template->tt_t, (struct mbuf *)NULL,
fe8ab488 1333 tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
9bccf70c 1334 (void) m_free(dtom(t_template));
39236c6e
A
1335 if (tp->t_flagsext & TF_DETECT_READSTALL)
1336 tp->t_rtimo_probes++;
1337 }
1338 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
3e170ce0 1339 TCP_CONN_KEEPINTVL(tp));
39236c6e
A
1340 } else {
1341 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
3e170ce0 1342 TCP_CONN_KEEPIDLE(tp));
39236c6e
A
1343 }
1344 if (tp->t_flagsext & TF_DETECT_READSTALL) {
3e170ce0
A
1345 struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1346 bool reenable_probe = false;
39037602 1347 /*
39236c6e 1348 * The keep alive packets sent to detect a read
39037602 1349 * stall did not get a response from the
39236c6e
A
1350 * peer. Generate more keep-alives to confirm this.
1351 * If the number of probes sent reaches the limit,
1352 * generate an event.
1353 */
3e170ce0
A
1354 if (tp->t_adaptive_rtimo > 0) {
1355 if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1356 /* Generate an event */
1357 soevent(so,
1358 (SO_FILT_HINT_LOCKED |
1359 SO_FILT_HINT_ADAPTIVE_RTIMO));
1360 tcp_keepalive_reset(tp);
1361 } else {
1362 reenable_probe = true;
1363 }
1364 } else if (outifp != NULL &&
1365 (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1366 tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1367 reenable_probe = true;
39236c6e 1368 } else {
3e170ce0
A
1369 tp->t_flagsext &= ~TF_DETECT_READSTALL;
1370 }
1371 if (reenable_probe) {
1372 int ind = min(tp->t_rtimo_probes,
1373 TCP_MAXRXTSHIFT);
39236c6e 1374 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
3e170ce0 1375 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
9bccf70c 1376 }
39236c6e 1377 }
3e170ce0
A
1378 if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1379 int ind;
1380
1381 tp->t_tfo_probes++;
1382 ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1383
1384 /*
1385 * We take the minimum among the time set by true
1386 * keepalive (see above) and the backoff'd RTO. That
1387 * way we backoff in case of packet-loss but will never
1388 * timeout slower than regular keepalive due to the
1389 * backing off.
1390 */
1391 tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1392 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1393 tp->t_timer[TCPT_KEEP]);
5ba3f43e
A
1394 } else if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1395 tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
3e170ce0
A
1396 /* Still no data! Let's assume a TFO-error and err out... */
1397 tcp_heuristic_tfo_middlebox(tp);
1398
1399 so->so_error = ENODATA;
1400 sorwakeup(so);
39037602 1401 tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
3e170ce0
A
1402 tcpstat.tcps_tfo_blackhole++;
1403 }
6d2010ae
A
1404 break;
1405 case TCPT_DELACK:
1406 if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1407 tp->t_flags &= ~TF_DELACK;
1408 tp->t_timer[TCPT_DELACK] = 0;
1409 tp->t_flags |= TF_ACKNOW;
1410
fe8ab488
A
1411 /*
1412 * If delayed ack timer fired while stretching
1413 * acks, count the number of times the streaming
39037602 1414 * detection was not correct. If this exceeds a
fe8ab488
A
1415 * threshold, disable strech ack on this
1416 * connection
1417 *
1418 * Also, go back to acking every other packet.
6d2010ae 1419 */
fe8ab488
A
1420 if ((tp->t_flags & TF_STRETCHACK)) {
1421 if (tp->t_unacksegs > 1 &&
1422 tp->t_unacksegs < maxseg_unacked)
1423 tp->t_stretchack_delayed++;
1424
1425 if (tp->t_stretchack_delayed >
1426 TCP_STRETCHACK_DELAY_THRESHOLD) {
1427 tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1428 /*
1429 * Note the time at which stretch
1430 * ack was disabled automatically
1431 */
1432 tp->rcv_nostrack_ts = tcp_now;
1433 tcpstat.tcps_nostretchack++;
1434 tp->t_stretchack_delayed = 0;
39037602 1435 tp->rcv_nostrack_pkts = 0;
fe8ab488 1436 }
6d2010ae 1437 tcp_reset_stretch_ack(tp);
fe8ab488 1438 }
6d2010ae 1439
fe8ab488
A
1440 /*
1441 * If we are measuring inter packet arrival jitter
1442 * for throttling a connection, this delayed ack
1443 * might be the reason for accumulating some
1444 * jitter. So let's restart the measurement.
316670eb
A
1445 */
1446 CLEAR_IAJ_STATE(tp);
1447
6d2010ae
A
1448 tcpstat.tcps_delack++;
1449 (void) tcp_output(tp);
1450 }
1c79356b 1451 break;
9bccf70c 1452
39236c6e
A
1453#if MPTCP
1454 case TCPT_JACK_RXMT:
1455 if ((tp->t_state == TCPS_ESTABLISHED) &&
1456 (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1457 (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1458 if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1459 tcpstat.tcps_timeoutdrop++;
1460 postevent(so, 0, EV_TIMEOUT);
39037602 1461 soevent(so,
5ba3f43e 1462 (SO_FILT_HINT_LOCKED|
39236c6e
A
1463 SO_FILT_HINT_TIMEOUT));
1464 tp = tcp_drop(tp, tp->t_softerror ?
5ba3f43e 1465 tp->t_softerror : ETIMEDOUT);
39236c6e
A
1466 break;
1467 }
1468 tcpstat.tcps_join_rxmts++;
5ba3f43e 1469 tp->t_mpflags |= TMPF_SND_JACK;
39236c6e
A
1470 tp->t_flags |= TF_ACKNOW;
1471
1472 /*
39037602 1473 * No backoff is implemented for simplicity for this
39236c6e
A
1474 * corner case.
1475 */
1476 (void) tcp_output(tp);
1477 }
1478 break;
1479#endif /* MPTCP */
1480
fe8ab488
A
1481 case TCPT_PTO:
1482 {
fe8ab488 1483 int32_t snd_len;
fe8ab488
A
1484 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1485
1486 /*
1487 * Check if the connection is in the right state to
1488 * send a probe
1489 */
1490 if (tp->t_state != TCPS_ESTABLISHED ||
3e170ce0
A
1491 (tp->t_rxtshift > 0 && !(tp->t_flagsext & TF_PROBING))
1492 || tp->snd_max == tp->snd_una ||
1493 !SACK_ENABLED(tp) || !TAILQ_EMPTY(&tp->snd_holes) ||
1494 IN_FASTRECOVERY(tp))
fe8ab488
A
1495 break;
1496
3e170ce0
A
1497 /*
1498 * If there is no new data to send or if the
1499 * connection is limited by receive window then
1500 * retransmit the last segment, otherwise send
1501 * new data.
1502 */
1503 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1504 - (tp->snd_max - tp->snd_una);
1505 if (snd_len > 0) {
1506 tp->snd_nxt = tp->snd_max;
1507 } else {
1508 snd_len = min((tp->snd_max - tp->snd_una),
1509 tp->t_maxseg);
1510 tp->snd_nxt = tp->snd_max - snd_len;
1511 }
1512
fe8ab488 1513 tcpstat.tcps_pto++;
3e170ce0
A
1514 if (tp->t_flagsext & TF_PROBING)
1515 tcpstat.tcps_probe_if++;
fe8ab488
A
1516
1517 /* If timing a segment in this window, stop the timer */
1518 tp->t_rtttime = 0;
fe8ab488
A
1519 /* Note that tail loss probe is being sent */
1520 tp->t_flagsext |= TF_SENT_TLPROBE;
1521 tp->t_tlpstart = tcp_now;
1522
1523 tp->snd_cwnd += tp->t_maxseg;
1524 (void )tcp_output(tp);
1525 tp->snd_cwnd -= tp->t_maxseg;
1526
1527 tp->t_tlphighrxt = tp->snd_nxt;
fe8ab488
A
1528 break;
1529 }
1530 case TCPT_DELAYFR:
1531 tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1532
1533 /*
1534 * Don't do anything if one of the following is true:
1535 * - the connection is already in recovery
1536 * - sequence until snd_recover has been acknowledged.
1537 * - retransmit timeout has fired
1538 */
1539 if (IN_FASTRECOVERY(tp) ||
1540 SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1541 tp->t_rxtshift > 0)
1542 break;
1543
1544 VERIFY(SACK_ENABLED(tp));
3e170ce0
A
1545 tcp_rexmt_save_state(tp);
1546 if (CC_ALGO(tp)->pre_fr != NULL) {
fe8ab488 1547 CC_ALGO(tp)->pre_fr(tp);
3e170ce0
A
1548 if (TCP_ECN_ENABLED(tp))
1549 tp->ecn_flags |= TE_SENDCWR;
1550 }
fe8ab488 1551 ENTER_FASTRECOVERY(tp);
fe8ab488
A
1552
1553 tp->t_timer[TCPT_REXMT] = 0;
1554 tcpstat.tcps_sack_recovery_episode++;
4bd07ac2 1555 tp->t_sack_recovery_episode++;
fe8ab488
A
1556 tp->sack_newdata = tp->snd_nxt;
1557 tp->snd_cwnd = tp->t_maxseg;
1558 tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1559 (void) tcp_output(tp);
1560 break;
1c79356b
A
1561 dropit:
1562 tcpstat.tcps_keepdrops++;
316670eb
A
1563 postevent(so, 0, EV_TIMEOUT);
1564 soevent(so,
1565 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
1c79356b 1566 tp = tcp_drop(tp, ETIMEDOUT);
1c79356b
A
1567 break;
1568 }
fe8ab488
A
1569#if TCPDEBUG
1570 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
1571 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
1572 PRU_SLOWTIMO);
1573#endif
1c79356b
A
1574 return (tp);
1575}
6d2010ae
A
1576
1577/* Remove a timer entry from timer list */
1578void
1579tcp_remove_timer(struct tcpcb *tp)
1580{
1581 struct tcptimerlist *listp = &tcp_timer_list;
1582
5ba3f43e 1583 socket_lock_assert_owned(tp->t_inpcb->inp_socket);
6d2010ae
A
1584 if (!(TIMER_IS_ON_LIST(tp))) {
1585 return;
1586 }
1587 lck_mtx_lock(listp->mtx);
39037602 1588
6d2010ae
A
1589 /* Check if pcb is on timer list again after acquiring the lock */
1590 if (!(TIMER_IS_ON_LIST(tp))) {
1591 lck_mtx_unlock(listp->mtx);
1592 return;
1593 }
39037602 1594
6d2010ae
A
1595 if (listp->next_te != NULL && listp->next_te == &tp->tentry)
1596 listp->next_te = LIST_NEXT(&tp->tentry, le);
1597
1598 LIST_REMOVE(&tp->tentry, le);
1599 tp->t_flags &= ~(TF_TIMER_ONLIST);
1600
1601 listp->entries--;
6d2010ae
A
1602
1603 tp->tentry.le.le_next = NULL;
1604 tp->tentry.le.le_prev = NULL;
ebb1b9f4 1605 lck_mtx_unlock(listp->mtx);
6d2010ae
A
1606}
1607
fe8ab488
A
1608/*
1609 * Function to check if the timerlist needs to be rescheduled to run
6d2010ae
A
1610 * the timer entry correctly. Basically, this is to check if we can avoid
1611 * taking the list lock.
1612 */
1613
1614static boolean_t
fe8ab488
A
1615need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1616{
6d2010ae
A
1617 struct tcptimerlist *listp = &tcp_timer_list;
1618 int32_t diff;
6d2010ae 1619
fe8ab488
A
1620 /*
1621 * If the list is being processed then the state of the list is
1622 * in flux. In this case always acquire the lock and set the state
1623 * correctly.
6d2010ae 1624 */
8a3053a0 1625 if (listp->running)
fe8ab488 1626 return (TRUE);
8a3053a0
A
1627
1628 if (!listp->scheduled)
1629 return (TRUE);
6d2010ae
A
1630
1631 diff = timer_diff(listp->runtime, 0, runtime, 0);
1632 if (diff <= 0) {
1633 /* The list is going to run before this timer */
fe8ab488 1634 return (FALSE);
6d2010ae 1635 } else {
fe8ab488
A
1636 if (mode & TCP_TIMERLIST_10MS_MODE) {
1637 if (diff <= TCP_TIMER_10MS_QUANTUM)
1638 return (FALSE);
1639 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
1640 if (diff <= TCP_TIMER_100MS_QUANTUM)
1641 return (FALSE);
6d2010ae 1642 } else {
fe8ab488
A
1643 if (diff <= TCP_TIMER_500MS_QUANTUM)
1644 return (FALSE);
6d2010ae
A
1645 }
1646 }
fe8ab488 1647 return (TRUE);
6d2010ae
A
1648}
1649
1650void
39037602 1651tcp_sched_timerlist(uint32_t offset)
6d2010ae 1652{
6d2010ae
A
1653 uint64_t deadline = 0;
1654 struct tcptimerlist *listp = &tcp_timer_list;
1655
5ba3f43e 1656 LCK_MTX_ASSERT(listp->mtx, LCK_MTX_ASSERT_OWNED);
6d2010ae 1657
8a3053a0 1658 offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
6d2010ae 1659 listp->runtime = tcp_now + offset;
5ba3f43e 1660 listp->schedtime = tcp_now;
fe8ab488 1661 if (listp->runtime == 0) {
8a3053a0 1662 listp->runtime++;
fe8ab488
A
1663 offset++;
1664 }
6d2010ae 1665
fe8ab488 1666 clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
6d2010ae
A
1667
1668 thread_call_enter_delayed(listp->call, deadline);
8a3053a0 1669 listp->scheduled = TRUE;
6d2010ae
A
1670}
1671
fe8ab488
A
1672/*
1673 * Function to run the timers for a connection.
6d2010ae 1674 *
39037602 1675 * Returns the offset of next timer to be run for this connection which
6d2010ae 1676 * can be used to reschedule the timerlist.
fe8ab488
A
1677 *
1678 * te_mode is an out parameter that indicates the modes of active
1679 * timers for this connection.
6d2010ae 1680 */
fe8ab488 1681u_int32_t
3e170ce0
A
1682tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
1683 u_int16_t probe_if_index)
1684{
fe8ab488
A
1685 struct socket *so;
1686 u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
1687 u_int32_t timer_val, offset = 0, lo_timer = 0;
6d2010ae
A
1688 int32_t diff;
1689 boolean_t needtorun[TCPT_NTIMERS];
1690 int count = 0;
1691
fe8ab488
A
1692 VERIFY(tp != NULL);
1693 bzero(needtorun, sizeof(needtorun));
1694 *te_mode = 0;
6d2010ae 1695
5ba3f43e 1696 socket_lock(tp->t_inpcb->inp_socket, 1);
6d2010ae 1697
fe8ab488 1698 so = tp->t_inpcb->inp_socket;
39037602 1699 /* Release the want count on inp */
fe8ab488
A
1700 if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
1701 == WNT_STOPUSING) {
6d2010ae
A
1702 if (TIMER_IS_ON_LIST(tp)) {
1703 tcp_remove_timer(tp);
1704 }
1705
39037602 1706 /* Looks like the TCP connection got closed while we
6d2010ae
A
1707 * were waiting for the lock.. Done
1708 */
1709 goto done;
1710 }
1711
3e170ce0
A
1712 /*
1713 * If this connection is over an interface that needs to
1714 * be probed, send probe packets to reinitiate communication.
1715 */
1716 if (probe_if_index > 0 && tp->t_inpcb->inp_last_outifp != NULL &&
1717 tp->t_inpcb->inp_last_outifp->if_index == probe_if_index) {
1718 tp->t_flagsext |= TF_PROBING;
1719 tcp_timers(tp, TCPT_PTO);
1720 tp->t_timer[TCPT_PTO] = 0;
39037602 1721 tp->t_flagsext &= ~TF_PROBING;
3e170ce0
A
1722 }
1723
fe8ab488
A
1724 /*
1725 * Since the timer thread needs to wait for tcp lock, it may race
1726 * with another thread that can cancel or reschedule the timer
1727 * that is about to run. Check if we need to run anything.
1728 */
8a3053a0 1729 if ((index = tp->tentry.index) == TCPT_NONE)
6d2010ae 1730 goto done;
39037602 1731
8a3053a0 1732 timer_val = tp->t_timer[index];
6d2010ae
A
1733
1734 diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
1735 if (diff > 0) {
1736 if (tp->tentry.index != TCPT_NONE) {
1737 offset = diff;
fe8ab488 1738 *(te_mode) = tp->tentry.mode;
6d2010ae
A
1739 }
1740 goto done;
1741 }
1742
1743 tp->t_timer[index] = 0;
1744 if (timer_val > 0) {
1745 tp = tcp_timers(tp, index);
fe8ab488 1746 if (tp == NULL)
6d2010ae
A
1747 goto done;
1748 }
39037602 1749
fe8ab488
A
1750 /*
1751 * Check if there are any other timers that need to be run.
1752 * While doing it, adjust the timer values wrt tcp_now.
6d2010ae 1753 */
fe8ab488 1754 tp->tentry.mode = 0;
6d2010ae
A
1755 for (i = 0; i < TCPT_NTIMERS; ++i) {
1756 if (tp->t_timer[i] != 0) {
fe8ab488
A
1757 diff = timer_diff(tp->tentry.timer_start,
1758 tp->t_timer[i], tcp_now, 0);
6d2010ae 1759 if (diff <= 0) {
6d2010ae
A
1760 needtorun[i] = TRUE;
1761 count++;
1762 } else {
1763 tp->t_timer[i] = diff;
1764 needtorun[i] = FALSE;
1765 if (lo_timer == 0 || diff < lo_timer) {
1766 lo_timer = diff;
1767 lo_index = i;
1768 }
fe8ab488 1769 TCP_SET_TIMER_MODE(tp->tentry.mode, i);
6d2010ae
A
1770 }
1771 }
1772 }
39037602 1773
6d2010ae
A
1774 tp->tentry.timer_start = tcp_now;
1775 tp->tentry.index = lo_index;
fe8ab488
A
1776 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
1777
1778 if (tp->tentry.index != TCPT_NONE) {
1779 tp->tentry.runtime = tp->tentry.timer_start +
1780 tp->t_timer[tp->tentry.index];
8a3053a0
A
1781 if (tp->tentry.runtime == 0)
1782 tp->tentry.runtime++;
6d2010ae
A
1783 }
1784
1785 if (count > 0) {
fe8ab488 1786 /* run any other timers outstanding at this time. */
6d2010ae
A
1787 for (i = 0; i < TCPT_NTIMERS; ++i) {
1788 if (needtorun[i]) {
1789 tp->t_timer[i] = 0;
1790 tp = tcp_timers(tp, i);
8a3053a0
A
1791 if (tp == NULL) {
1792 offset = 0;
fe8ab488 1793 *(te_mode) = 0;
6d2010ae 1794 goto done;
8a3053a0 1795 }
6d2010ae
A
1796 }
1797 }
1798 tcp_set_lotimer_index(tp);
1799 }
1800
1801 if (tp->tentry.index < TCPT_NONE) {
1802 offset = tp->t_timer[tp->tentry.index];
fe8ab488 1803 *(te_mode) = tp->tentry.mode;
6d2010ae
A
1804 }
1805
1806done:
1807 if (tp != NULL && tp->tentry.index == TCPT_NONE) {
1808 tcp_remove_timer(tp);
8a3053a0 1809 offset = 0;
6d2010ae 1810 }
fe8ab488 1811
5ba3f43e 1812 socket_unlock(so, 1);
fe8ab488 1813 return(offset);
6d2010ae
A
1814}
1815
1816void
39037602
A
1817tcp_run_timerlist(void * arg1, void * arg2)
1818{
6d2010ae 1819#pragma unused(arg1, arg2)
6d2010ae
A
1820 struct tcptimerentry *te, *next_te;
1821 struct tcptimerlist *listp = &tcp_timer_list;
1822 struct tcpcb *tp;
fe8ab488
A
1823 uint32_t next_timer = 0; /* offset of the next timer on the list */
1824 u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */
1825 u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
6d2010ae 1826 uint32_t active_count = 0;
6d2010ae
A
1827
1828 calculate_tcp_clock();
1829
1830 lck_mtx_lock(listp->mtx);
1831
5ba3f43e
A
1832 int32_t drift = tcp_now - listp->runtime;
1833 if (drift <= 1) {
1834 tcpstat.tcps_timer_drift_le_1_ms++;
1835 } else if (drift <= 10) {
1836 tcpstat.tcps_timer_drift_le_10_ms++;
1837 } else if (drift <= 20) {
1838 tcpstat.tcps_timer_drift_le_20_ms++;
1839 } else if (drift <= 50) {
1840 tcpstat.tcps_timer_drift_le_50_ms++;
1841 } else if (drift <= 100) {
1842 tcpstat.tcps_timer_drift_le_100_ms++;
1843 } else if (drift <= 200) {
1844 tcpstat.tcps_timer_drift_le_200_ms++;
1845 } else if (drift <= 500) {
1846 tcpstat.tcps_timer_drift_le_500_ms++;
1847 } else if (drift <= 1000) {
1848 tcpstat.tcps_timer_drift_le_1000_ms++;
1849 } else {
1850 tcpstat.tcps_timer_drift_gt_1000_ms++;
1851 }
1852
6d2010ae 1853 listp->running = TRUE;
39037602 1854
6d2010ae
A
1855 LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
1856 uint32_t offset = 0;
1857 uint32_t runtime = te->runtime;
8a3053a0 1858 if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) {
6d2010ae
A
1859 offset = timer_diff(runtime, 0, tcp_now, 0);
1860 if (next_timer == 0 || offset < next_timer) {
1861 next_timer = offset;
1862 }
fe8ab488 1863 list_mode |= te->mode;
6d2010ae
A
1864 continue;
1865 }
6d2010ae
A
1866
1867 tp = TIMERENTRY_TO_TP(te);
1868
fe8ab488
A
1869 /*
1870 * Acquire an inp wantcnt on the inpcb so that the socket
1871 * won't get detached even if tcp_close is called
6d2010ae 1872 */
fe8ab488
A
1873 if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
1874 == WNT_STOPUSING) {
1875 /*
1876 * Some how this pcb went into dead state while
1877 * on the timer list, just take it off the list.
1878 * Since the timer list entry pointers are
39037602 1879 * protected by the timer list lock, we can
fe8ab488 1880 * do it here without the socket lock.
6d2010ae
A
1881 */
1882 if (TIMER_IS_ON_LIST(tp)) {
1883 tp->t_flags &= ~(TF_TIMER_ONLIST);
1884 LIST_REMOVE(&tp->tentry, le);
1885 listp->entries--;
1886
1887 tp->tentry.le.le_next = NULL;
1888 tp->tentry.le.le_prev = NULL;
1889 }
1890 continue;
1891 }
fe8ab488 1892 active_count++;
6d2010ae 1893
fe8ab488
A
1894 /*
1895 * Store the next timerentry pointer before releasing the
1896 * list lock. If that entry has to be removed when we
1897 * release the lock, this pointer will be updated to the
1898 * element after that.
6d2010ae 1899 */
39037602 1900 listp->next_te = next_te;
6d2010ae
A
1901
1902 VERIFY_NEXT_LINK(&tp->tentry, le);
1903 VERIFY_PREV_LINK(&tp->tentry, le);
1904
1905 lck_mtx_unlock(listp->mtx);
1906
3e170ce0
A
1907 offset = tcp_run_conn_timer(tp, &te_mode,
1908 listp->probe_if_index);
39037602 1909
6d2010ae
A
1910 lck_mtx_lock(listp->mtx);
1911
1912 next_te = listp->next_te;
1913 listp->next_te = NULL;
1914
fe8ab488
A
1915 if (offset > 0 && te_mode != 0) {
1916 list_mode |= te_mode;
6d2010ae 1917
fe8ab488
A
1918 if (next_timer == 0 || offset < next_timer)
1919 next_timer = offset;
6d2010ae
A
1920 }
1921 }
1922
1923 if (!LIST_EMPTY(&listp->lhead)) {
fe8ab488
A
1924 u_int16_t next_mode = 0;
1925 if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
1926 (listp->pref_mode & TCP_TIMERLIST_10MS_MODE))
1927 next_mode = TCP_TIMERLIST_10MS_MODE;
1928 else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
1929 (listp->pref_mode & TCP_TIMERLIST_100MS_MODE))
1930 next_mode = TCP_TIMERLIST_100MS_MODE;
1931 else
1932 next_mode = TCP_TIMERLIST_500MS_MODE;
6d2010ae 1933
fe8ab488
A
1934 if (next_mode != TCP_TIMERLIST_500MS_MODE) {
1935 listp->idleruns = 0;
6d2010ae 1936 } else {
fe8ab488
A
1937 /*
1938 * the next required mode is slow mode, but if
1939 * the last one was a faster mode and we did not
1940 * have enough idle runs, repeat the last mode.
1941 *
1942 * We try to keep the timer list in fast mode for
1943 * some idle time in expectation of new data.
1944 */
1945 if (listp->mode != next_mode &&
1946 listp->idleruns < timer_fastmode_idlemax) {
1947 listp->idleruns++;
1948 next_mode = listp->mode;
1949 next_timer = TCP_TIMER_100MS_QUANTUM;
1950 } else {
1951 listp->idleruns = 0;
1952 }
6d2010ae 1953 }
fe8ab488
A
1954 listp->mode = next_mode;
1955 if (listp->pref_offset != 0)
1956 next_timer = min(listp->pref_offset, next_timer);
6d2010ae 1957
fe8ab488
A
1958 if (listp->mode == TCP_TIMERLIST_500MS_MODE)
1959 next_timer = max(next_timer,
1960 TCP_TIMER_500MS_QUANTUM);
6d2010ae
A
1961
1962 tcp_sched_timerlist(next_timer);
1963 } else {
8a3053a0
A
1964 /*
1965 * No need to reschedule this timer, but always run
1966 * periodically at a much higher granularity.
1967 */
1968 tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
6d2010ae
A
1969 }
1970
1971 listp->running = FALSE;
1972 listp->pref_mode = 0;
1973 listp->pref_offset = 0;
3e170ce0 1974 listp->probe_if_index = 0;
6d2010ae
A
1975
1976 lck_mtx_unlock(listp->mtx);
1977}
1978
fe8ab488 1979/*
3e170ce0 1980 * Function to check if the timerlist needs to be rescheduled to run this
fe8ab488
A
1981 * connection's timers correctly.
1982 */
39037602
A
1983void
1984tcp_sched_timers(struct tcpcb *tp)
6d2010ae
A
1985{
1986 struct tcptimerentry *te = &tp->tentry;
fe8ab488
A
1987 u_int16_t index = te->index;
1988 u_int16_t mode = te->mode;
6d2010ae 1989 struct tcptimerlist *listp = &tcp_timer_list;
8a3053a0 1990 int32_t offset = 0;
fe8ab488 1991 boolean_t list_locked = FALSE;
6d2010ae
A
1992
1993 if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
1994 /* Just return without adding the dead pcb to the list */
1995 if (TIMER_IS_ON_LIST(tp)) {
1996 tcp_remove_timer(tp);
1997 }
1998 return;
1999 }
2000
2001 if (index == TCPT_NONE) {
fe8ab488 2002 /* Nothing to run */
6d2010ae
A
2003 tcp_remove_timer(tp);
2004 return;
2005 }
2006
fe8ab488
A
2007 /*
2008 * compute the offset at which the next timer for this connection
2009 * has to run.
2010 */
8a3053a0
A
2011 offset = timer_diff(te->runtime, 0, tcp_now, 0);
2012 if (offset <= 0) {
6d2010ae
A
2013 offset = 1;
2014 tcp_timer_advanced++;
2015 }
6d2010ae
A
2016
2017 if (!TIMER_IS_ON_LIST(tp)) {
2018 if (!list_locked) {
2019 lck_mtx_lock(listp->mtx);
fe8ab488 2020 list_locked = TRUE;
6d2010ae
A
2021 }
2022
813fb2f6
A
2023 if (!TIMER_IS_ON_LIST(tp)) {
2024 LIST_INSERT_HEAD(&listp->lhead, te, le);
2025 tp->t_flags |= TF_TIMER_ONLIST;
6d2010ae 2026
813fb2f6
A
2027 listp->entries++;
2028 if (listp->entries > listp->maxentries)
2029 listp->maxentries = listp->entries;
6d2010ae 2030
813fb2f6
A
2031 /* if the list is not scheduled, just schedule it */
2032 if (!listp->scheduled)
2033 goto schedule;
2034 }
6d2010ae
A
2035 }
2036
fe8ab488
A
2037 /*
2038 * Timer entry is currently on the list, check if the list needs
2039 * to be rescheduled.
2040 */
2041 if (need_to_resched_timerlist(te->runtime, mode)) {
6d2010ae 2042 tcp_resched_timerlist++;
39037602 2043
6d2010ae
A
2044 if (!list_locked) {
2045 lck_mtx_lock(listp->mtx);
fe8ab488 2046 list_locked = TRUE;
6d2010ae
A
2047 }
2048
2049 VERIFY_NEXT_LINK(te, le);
2050 VERIFY_PREV_LINK(te, le);
2051
2052 if (listp->running) {
fe8ab488
A
2053 listp->pref_mode |= mode;
2054 if (listp->pref_offset == 0 ||
8a3053a0 2055 offset < listp->pref_offset) {
6d2010ae
A
2056 listp->pref_offset = offset;
2057 }
2058 } else {
8a3053a0 2059 /*
fe8ab488
A
2060 * The list could have got rescheduled while
2061 * this thread was waiting for the lock
8a3053a0
A
2062 */
2063 if (listp->scheduled) {
2064 int32_t diff;
2065 diff = timer_diff(listp->runtime, 0,
2066 tcp_now, offset);
2067 if (diff <= 0)
2068 goto done;
2069 else
2070 goto schedule;
6d2010ae
A
2071 } else {
2072 goto schedule;
2073 }
2074 }
2075 }
2076 goto done;
2077
2078schedule:
fe8ab488
A
2079 /*
2080 * Since a connection with timers is getting scheduled, the timer
2081 * list moves from idle to active state and that is why idlegen is
2082 * reset
2083 */
2084 if (mode & TCP_TIMERLIST_10MS_MODE) {
2085 listp->mode = TCP_TIMERLIST_10MS_MODE;
2086 listp->idleruns = 0;
2087 offset = min(offset, TCP_TIMER_10MS_QUANTUM);
2088 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
2089 if (listp->mode > TCP_TIMERLIST_100MS_MODE)
2090 listp->mode = TCP_TIMERLIST_100MS_MODE;
2091 listp->idleruns = 0;
2092 offset = min(offset, TCP_TIMER_100MS_QUANTUM);
6d2010ae
A
2093 }
2094 tcp_sched_timerlist(offset);
2095
2096done:
2097 if (list_locked)
2098 lck_mtx_unlock(listp->mtx);
2099
2100 return;
2101}
39037602 2102
fe8ab488 2103static inline void
3e170ce0
A
2104tcp_set_lotimer_index(struct tcpcb *tp)
2105{
fe8ab488 2106 uint16_t i, lo_index = TCPT_NONE, mode = 0;
6d2010ae
A
2107 uint32_t lo_timer = 0;
2108 for (i = 0; i < TCPT_NTIMERS; ++i) {
fe8ab488
A
2109 if (tp->t_timer[i] != 0) {
2110 TCP_SET_TIMER_MODE(mode, i);
2111 if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
2112 lo_timer = tp->t_timer[i];
2113 lo_index = i;
2114 }
6d2010ae
A
2115 }
2116 }
2117 tp->tentry.index = lo_index;
fe8ab488
A
2118 tp->tentry.mode = mode;
2119 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2120
2121 if (tp->tentry.index != TCPT_NONE) {
39037602 2122 tp->tentry.runtime = tp->tentry.timer_start
fe8ab488 2123 + tp->t_timer[tp->tentry.index];
8a3053a0
A
2124 if (tp->tentry.runtime == 0)
2125 tp->tentry.runtime++;
6d2010ae
A
2126 }
2127}
2128
2129void
3e170ce0
A
2130tcp_check_timer_state(struct tcpcb *tp)
2131{
5ba3f43e 2132 socket_lock_assert_owned(tp->t_inpcb->inp_socket);
6d2010ae 2133
8a3053a0
A
2134 if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT)
2135 return;
2136
6d2010ae
A
2137 tcp_set_lotimer_index(tp);
2138
2139 tcp_sched_timers(tp);
2140 return;
2141}
fe8ab488 2142
3e170ce0
A
2143static inline void
2144tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
2145{
2146 /* handle wrap around */
2147 int32_t diff = (int32_t) (cur - *prev);
2148 if (diff > 0)
2149 *dest = diff;
2150 else
2151 *dest = 0;
2152 *prev = cur;
2153 return;
2154}
2155
5ba3f43e
A
2156static inline void
2157tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest)
2158{
2159 /* handle wrap around */
2160 int64_t diff = (int64_t) (cur - *prev);
2161 if (diff > 0)
2162 *dest = diff;
2163 else
2164 *dest = 0;
2165 *prev = cur;
2166 return;
2167}
2168
fe8ab488
A
2169__private_extern__ void
2170tcp_report_stats(void)
2171{
2172 struct nstat_sysinfo_data data;
2173 struct sockaddr_in dst;
2174 struct sockaddr_in6 dst6;
2175 struct rtentry *rt = NULL;
3e170ce0 2176 static struct tcp_last_report_stats prev;
39037602 2177 u_int64_t var, uptime;
fe8ab488
A
2178
2179#define stat data.u.tcp_stats
2180 if (((uptime = net_uptime()) - tcp_last_report_time) <
3e170ce0 2181 tcp_report_stats_interval)
fe8ab488
A
2182 return;
2183
2184 tcp_last_report_time = uptime;
2185
2186 bzero(&data, sizeof(data));
2187 data.flags = NSTAT_SYSINFO_TCP_STATS;
2188
2189 bzero(&dst, sizeof(dst));
2190 dst.sin_len = sizeof(dst);
2191 dst.sin_family = AF_INET;
2192
2193 /* ipv4 avg rtt */
2194 lck_mtx_lock(rnh_lock);
2195 rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
2196 rt_tables[AF_INET], IFSCOPE_NONE);
2197 lck_mtx_unlock(rnh_lock);
2198 if (rt != NULL) {
2199 RT_LOCK(rt);
2200 if (rt_primary_default(rt, rt_key(rt)) &&
2201 rt->rt_stats != NULL) {
2202 stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
2203 }
2204 RT_UNLOCK(rt);
2205 rtfree(rt);
2206 rt = NULL;
2207 }
2208
2209 /* ipv6 avg rtt */
2210 bzero(&dst6, sizeof(dst6));
2211 dst6.sin6_len = sizeof(dst6);
2212 dst6.sin6_family = AF_INET6;
2213
2214 lck_mtx_lock(rnh_lock);
2215 rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL,
2216 rt_tables[AF_INET6], IFSCOPE_NONE);
2217 lck_mtx_unlock(rnh_lock);
2218 if (rt != NULL) {
2219 RT_LOCK(rt);
2220 if (rt_primary_default(rt, rt_key(rt)) &&
2221 rt->rt_stats != NULL) {
2222 stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
2223 }
2224 RT_UNLOCK(rt);
2225 rtfree(rt);
2226 rt = NULL;
2227 }
2228
2229 /* send packet loss rate, shift by 10 for precision */
2230 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
2231 var = tcpstat.tcps_sndrexmitpack << 10;
2232 stat.send_plr = (var * 100) / tcpstat.tcps_sndpack;
2233 }
2234
2235 /* recv packet loss rate, shift by 10 for precision */
2236 if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2237 var = tcpstat.tcps_recovered_pkts << 10;
2238 stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack;
2239 }
2240
2241 /* RTO after tail loss, shift by 10 for precision */
39037602 2242 if (tcpstat.tcps_sndrexmitpack > 0
fe8ab488
A
2243 && tcpstat.tcps_tailloss_rto > 0) {
2244 var = tcpstat.tcps_tailloss_rto << 10;
2245 stat.send_tlrto_rate =
2246 (var * 100) / tcpstat.tcps_sndrexmitpack;
2247 }
39037602 2248
fe8ab488
A
2249 /* packet reordering */
2250 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2251 var = tcpstat.tcps_reordered_pkts << 10;
2252 stat.send_reorder_rate =
2253 (var * 100) / tcpstat.tcps_sndpack;
2254 }
2255
3e170ce0
A
2256 if (tcp_ecn_outbound == 1)
2257 stat.ecn_client_enabled = 1;
2258 if (tcp_ecn_inbound == 1)
2259 stat.ecn_server_enabled = 1;
2260 tcp_cumulative_stat(tcpstat.tcps_connattempt,
2261 &prev.tcps_connattempt, &stat.connection_attempts);
2262 tcp_cumulative_stat(tcpstat.tcps_accepts,
2263 &prev.tcps_accepts, &stat.connection_accepts);
2264 tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2265 &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2266 tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2267 &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2268 tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2269 &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2270 tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2271 &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2272 tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2273 &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2274 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2275 &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2276 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2277 &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2278 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2279 &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2280 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2281 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2282 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2283 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2284 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2285 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2286 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2287 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2288 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2289 &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2290 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2291 &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2292 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2293 &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2294 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2295 &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2296 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2297 &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
4bd07ac2
A
2298 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2299 &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2300 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2301 &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2302 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2303 &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
3e170ce0
A
2304 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2305 &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2306 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2307 &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2308 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2309 &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2310 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2311 &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2312 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2313 &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2314 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2315 &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2316 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2317 &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2318 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2319 &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2320 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2321 &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2322 tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2323 &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
39037602
A
2324 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
2325 &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
2326 tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
2327 &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
2328 tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
2329 &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
2330 tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
2331 &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
2332
2333
5ba3f43e
A
2334 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt,
2335 &prev.tcps_mptcp_handover_attempt , &stat.mptcp_handover_attempt);
2336 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt,
2337 &prev.tcps_mptcp_interactive_attempt , &stat.mptcp_interactive_attempt);
2338 tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt,
2339 &prev.tcps_mptcp_aggregate_attempt , &stat.mptcp_aggregate_attempt);
2340 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt,
2341 &prev.tcps_mptcp_fp_handover_attempt , &stat.mptcp_fp_handover_attempt);
2342 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt,
2343 &prev.tcps_mptcp_fp_interactive_attempt , &stat.mptcp_fp_interactive_attempt);
2344 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt,
2345 &prev.tcps_mptcp_fp_aggregate_attempt , &stat.mptcp_fp_aggregate_attempt);
2346 tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback,
2347 &prev.tcps_mptcp_heuristic_fallback , &stat.mptcp_heuristic_fallback);
2348 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback,
2349 &prev.tcps_mptcp_fp_heuristic_fallback , &stat.mptcp_fp_heuristic_fallback);
2350 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi,
2351 &prev.tcps_mptcp_handover_success_wifi , &stat.mptcp_handover_success_wifi);
2352 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell,
2353 &prev.tcps_mptcp_handover_success_cell , &stat.mptcp_handover_success_cell);
2354 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success,
2355 &prev.tcps_mptcp_interactive_success , &stat.mptcp_interactive_success);
2356 tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success,
2357 &prev.tcps_mptcp_aggregate_success , &stat.mptcp_aggregate_success);
2358 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi,
2359 &prev.tcps_mptcp_fp_handover_success_wifi , &stat.mptcp_fp_handover_success_wifi);
2360 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell,
2361 &prev.tcps_mptcp_fp_handover_success_cell , &stat.mptcp_fp_handover_success_cell);
2362 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success,
2363 &prev.tcps_mptcp_fp_interactive_success , &stat.mptcp_fp_interactive_success);
2364 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success,
2365 &prev.tcps_mptcp_fp_aggregate_success , &stat.mptcp_fp_aggregate_success);
2366 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi,
2367 &prev.tcps_mptcp_handover_cell_from_wifi , &stat.mptcp_handover_cell_from_wifi);
2368 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell,
2369 &prev.tcps_mptcp_handover_wifi_from_cell , &stat.mptcp_handover_wifi_from_cell);
2370 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi,
2371 &prev.tcps_mptcp_interactive_cell_from_wifi , &stat.mptcp_interactive_cell_from_wifi);
2372 tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes,
2373 &prev.tcps_mptcp_handover_cell_bytes , &stat.mptcp_handover_cell_bytes);
2374 tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes,
2375 &prev.tcps_mptcp_interactive_cell_bytes , &stat.mptcp_interactive_cell_bytes);
2376 tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes,
2377 &prev.tcps_mptcp_aggregate_cell_bytes , &stat.mptcp_aggregate_cell_bytes);
2378 tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes,
2379 &prev.tcps_mptcp_handover_all_bytes , &stat.mptcp_handover_all_bytes);
2380 tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes,
2381 &prev.tcps_mptcp_interactive_all_bytes , &stat.mptcp_interactive_all_bytes);
2382 tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes,
2383 &prev.tcps_mptcp_aggregate_all_bytes , &stat.mptcp_aggregate_all_bytes);
2384 tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi,
2385 &prev.tcps_mptcp_back_to_wifi , &stat.mptcp_back_to_wifi);
2386 tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy,
2387 &prev.tcps_mptcp_wifi_proxy , &stat.mptcp_wifi_proxy);
2388 tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy,
2389 &prev.tcps_mptcp_cell_proxy , &stat.mptcp_cell_proxy);
39037602 2390
3e170ce0 2391
fe8ab488
A
2392 nstat_sysinfo_send_data(&data);
2393
2394#undef stat
2395}
3e170ce0
A
2396
2397void
2398tcp_interface_send_probe(u_int16_t probe_if_index)
2399{
2400 int32_t offset = 0;
2401 struct tcptimerlist *listp = &tcp_timer_list;
2402
2403 /* Make sure TCP clock is up to date */
2404 calculate_tcp_clock();
2405
2406 lck_mtx_lock(listp->mtx);
2407 if (listp->probe_if_index > 0) {
2408 tcpstat.tcps_probe_if_conflict++;
2409 goto done;
2410 }
2411
2412 listp->probe_if_index = probe_if_index;
2413 if (listp->running)
2414 goto done;
2415
2416 /*
2417 * Reschedule the timerlist to run within the next 10ms, which is
2418 * the fastest that we can do.
2419 */
2420 offset = TCP_TIMER_10MS_QUANTUM;
2421 if (listp->scheduled) {
2422 int32_t diff;
2423 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2424 if (diff <= 0) {
2425 /* The timer will fire sooner than what's needed */
2426 goto done;
2427 }
2428 }
2429 listp->mode = TCP_TIMERLIST_10MS_MODE;
2430 listp->idleruns = 0;
2431
2432 tcp_sched_timerlist(offset);
2433
2434done:
2435 lck_mtx_unlock(listp->mtx);
2436 return;
2437}
2438
2439/*
2440 * Enable read probes on this connection, if:
2441 * - it is in established state
2442 * - doesn't have any data outstanding
2443 * - the outgoing ifp matches
2444 * - we have not already sent any read probes
2445 */
2446static void
2447tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2448{
2449 if (tp->t_state == TCPS_ESTABLISHED &&
2450 tp->snd_max == tp->snd_una &&
2451 tp->t_inpcb->inp_last_outifp == ifp &&
2452 !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2453 tp->t_rtimo_probes == 0) {
2454 tp->t_flagsext |= TF_DETECT_READSTALL;
2455 tp->t_rtimo_probes = 0;
2456 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2457 TCP_TIMER_10MS_QUANTUM);
2458 if (tp->tentry.index == TCPT_NONE) {
2459 tp->tentry.index = TCPT_KEEP;
2460 tp->tentry.runtime = tcp_now +
2461 TCP_TIMER_10MS_QUANTUM;
2462 } else {
2463 int32_t diff = 0;
2464
2465 /* Reset runtime to be in next 10ms */
2466 diff = timer_diff(tp->tentry.runtime, 0,
2467 tcp_now, TCP_TIMER_10MS_QUANTUM);
2468 if (diff > 0) {
2469 tp->tentry.index = TCPT_KEEP;
2470 tp->tentry.runtime = tcp_now +
2471 TCP_TIMER_10MS_QUANTUM;
2472 if (tp->tentry.runtime == 0)
2473 tp->tentry.runtime++;
2474 }
2475 }
2476 }
2477}
2478
2479/*
2480 * Disable read probe and reset the keep alive timer
2481 */
2482static void
2483tcp_disable_read_probe(struct tcpcb *tp)
2484{
2485 if (tp->t_adaptive_rtimo == 0 &&
2486 ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2487 tp->t_rtimo_probes > 0)) {
2488 tcp_keepalive_reset(tp);
5ba3f43e
A
2489
2490 if (tp->t_mpsub)
2491 mptcp_reset_keepalive(tp);
3e170ce0
A
2492 }
2493}
2494
2495/*
2496 * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2497 * probes on connections going over a particular interface.
2498 */
2499void
2500tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2501{
2502 int32_t offset;
2503 struct tcptimerlist *listp = &tcp_timer_list;
2504 struct inpcbinfo *pcbinfo = &tcbinfo;
2505 struct inpcb *inp, *nxt;
2506
2507 if (ifp == NULL)
2508 return;
2509
2510 /* update clock */
2511 calculate_tcp_clock();
2512
2513 /*
2514 * Enable keep alive timer on all connections that are
2515 * active/established on this interface.
2516 */
2517 lck_rw_lock_shared(pcbinfo->ipi_lock);
2518
2519 LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2520 struct tcpcb *tp = NULL;
2521 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2522 WNT_STOPUSING)
2523 continue;
2524
2525 /* Acquire lock to look at the state of the connection */
5ba3f43e 2526 socket_lock(inp->inp_socket, 1);
3e170ce0
A
2527
2528 /* Release the want count */
490019cf
A
2529 if (inp->inp_ppcb == NULL ||
2530 (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
5ba3f43e 2531 socket_unlock(inp->inp_socket, 1);
3e170ce0
A
2532 continue;
2533 }
3e170ce0
A
2534 tp = intotcpcb(inp);
2535 if (enable)
2536 tcp_enable_read_probe(tp, ifp);
2537 else
2538 tcp_disable_read_probe(tp);
2539
5ba3f43e 2540 socket_unlock(inp->inp_socket, 1);
3e170ce0
A
2541 }
2542 lck_rw_done(pcbinfo->ipi_lock);
2543
2544 lck_mtx_lock(listp->mtx);
2545 if (listp->running) {
2546 listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2547 goto done;
2548 }
2549
2550 /* Reschedule within the next 10ms */
2551 offset = TCP_TIMER_10MS_QUANTUM;
2552 if (listp->scheduled) {
2553 int32_t diff;
2554 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2555 if (diff <= 0) {
2556 /* The timer will fire sooner than what's needed */
2557 goto done;
2558 }
2559 }
2560 listp->mode = TCP_TIMERLIST_10MS_MODE;
2561 listp->idleruns = 0;
2562
2563 tcp_sched_timerlist(offset);
2564done:
2565 lck_mtx_unlock(listp->mtx);
2566 return;
2567}
2568
39037602
A
2569inline void
2570tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
2571{
2572 struct if_cellular_status_v1 *ifsr;
2573 u_int32_t optlen;
2574 ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2575 if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2576 optlen = tp->t_maxopd - tp->t_maxseg;
2577
2578 if (ifsr->mss_recommended ==
2579 IF_CELL_UL_MSS_RECOMMENDED_NONE &&
2580 tp->t_cached_maxopd > 0 &&
2581 tp->t_maxopd < tp->t_cached_maxopd) {
2582 tp->t_maxopd = tp->t_cached_maxopd;
2583 tcpstat.tcps_mss_to_default++;
2584 } else if (ifsr->mss_recommended ==
2585 IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
2586 tp->t_maxopd > tcp_mss_rec_medium) {
2587 tp->t_cached_maxopd = tp->t_maxopd;
2588 tp->t_maxopd = tcp_mss_rec_medium;
2589 tcpstat.tcps_mss_to_medium++;
2590 } else if (ifsr->mss_recommended ==
2591 IF_CELL_UL_MSS_RECOMMENDED_LOW &&
2592 tp->t_maxopd > tcp_mss_rec_low) {
2593 tp->t_cached_maxopd = tp->t_maxopd;
2594 tp->t_maxopd = tcp_mss_rec_low;
2595 tcpstat.tcps_mss_to_low++;
2596 }
2597 tp->t_maxseg = tp->t_maxopd - optlen;
2598
2599 /*
2600 * clear the cached value if it is same as the current
2601 */
2602 if (tp->t_maxopd == tp->t_cached_maxopd)
2603 tp->t_cached_maxopd = 0;
2604 }
2605}
2606
2607void
2608tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
2609{
2610 struct inpcb *inp = sotoinpcb(so);
2611 struct tcpcb *tp = intotcpcb(inp);
2612
5ba3f43e 2613 if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL)
39037602
A
2614 return;
2615
39037602
A
2616 if (!IFNET_IS_CELLULAR(ifp)) {
2617 /*
2618 * This optimization is implemented for cellular
2619 * networks only
2620 */
2621 return;
2622 }
2623 if ( tp->t_state <= TCPS_CLOSE_WAIT) {
2624 /*
2625 * If the connection is currently doing or has done PMTU
2626 * blackhole detection, do not change the MSS
2627 */
2628 if (tp->t_flags & TF_BLACKHOLE)
2629 return;
2630 if (ifp->if_link_status == NULL)
2631 return;
2632 tcp_update_mss_core(tp, ifp);
2633 }
2634}
2635
3e170ce0
A
2636void
2637tcp_itimer(struct inpcbinfo *ipi)
2638{
2639 struct inpcb *inp, *nxt;
2640
2641 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
2642 if (tcp_itimer_done == TRUE) {
2643 tcp_itimer_done = FALSE;
2644 atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
2645 return;
2646 }
2647 /* Upgrade failed, lost lock now take it again exclusive */
2648 lck_rw_lock_exclusive(ipi->ipi_lock);
2649 }
2650 tcp_itimer_done = TRUE;
2651
2652 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
2653 struct socket *so;
5ba3f43e 2654 struct ifnet *ifp;
3e170ce0 2655
39037602
A
2656 if (inp->inp_ppcb == NULL ||
2657 in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
3e170ce0
A
2658 continue;
2659 so = inp->inp_socket;
5ba3f43e
A
2660 ifp = inp->inp_last_outifp;
2661 socket_lock(so, 1);
3e170ce0 2662 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
5ba3f43e 2663 socket_unlock(so, 1);
3e170ce0
A
2664 continue;
2665 }
2666 so_check_extended_bk_idle_time(so);
39037602
A
2667 if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
2668 tcp_update_mss_locked(so, NULL);
2669 }
5ba3f43e
A
2670 socket_unlock(so, 1);
2671
2672 /*
2673 * Defunct all system-initiated background sockets if the
2674 * socket is using the cellular interface and the interface
2675 * has its LQM set to abort.
2676 */
2677 if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) &&
2678 IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) &&
2679 ifp != NULL && IFNET_IS_CELLULAR(ifp) &&
2680 (ifp->if_interface_state.valid_bitmask &
2681 IF_INTERFACE_STATE_LQM_STATE_VALID) &&
2682 ifp->if_interface_state.lqm_state ==
2683 IFNET_LQM_THRESH_ABORT) {
2684 socket_defunct(current_proc(), so,
2685 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
2686 }
3e170ce0
A
2687 }
2688
5ba3f43e 2689 ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT);
3e170ce0
A
2690 lck_rw_done(ipi->ipi_lock);
2691}