]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_timer.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_timer.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
62 */
63
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/mbuf.h>
69 #include <sys/sysctl.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/protosw.h>
73 #include <sys/domain.h>
74 #include <sys/mcache.h>
75 #include <sys/queue.h>
76 #include <kern/locks.h>
77 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
78 #include <mach/boolean.h>
79
80 #include <net/route.h>
81 #include <net/if_var.h>
82 #include <net/ntstat.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_pcb.h>
87 #if INET6
88 #include <netinet6/in6_pcb.h>
89 #endif
90 #include <netinet/ip_var.h>
91 #include <netinet/tcp.h>
92 #include <netinet/tcp_cache.h>
93 #include <netinet/tcp_fsm.h>
94 #include <netinet/tcp_seq.h>
95 #include <netinet/tcp_timer.h>
96 #include <netinet/tcp_var.h>
97 #include <netinet/tcp_cc.h>
98 #if INET6
99 #include <netinet6/tcp6_var.h>
100 #endif
101 #include <netinet/tcpip.h>
102 #if TCPDEBUG
103 #include <netinet/tcp_debug.h>
104 #endif
105 #include <sys/kdebug.h>
106 #include <mach/sdt.h>
107 #include <netinet/mptcp_var.h>
108
109 /* Max number of times a stretch ack can be delayed on a connection */
110 #define TCP_STRETCHACK_DELAY_THRESHOLD 5
111
112 /*
113 * If the host processor has been sleeping for too long, this is the threshold
114 * used to avoid sending stale retransmissions.
115 */
116 #define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */
117
118 /* tcp timer list */
119 struct tcptimerlist tcp_timer_list;
120
121 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
122 struct tcptailq tcp_tw_tailq;
123
124 static int
125 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
126 {
127 #pragma unused(arg2)
128 int error, s, tt;
129
130 tt = *(int *)arg1;
131 s = tt * 1000 / TCP_RETRANSHZ;;
132
133 error = sysctl_handle_int(oidp, &s, 0, req);
134 if (error || !req->newptr)
135 return (error);
136
137 tt = s * TCP_RETRANSHZ / 1000;
138 if (tt < 1)
139 return (EINVAL);
140
141 *(int *)arg1 = tt;
142 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
143 return (0);
144 }
145
146 #if SYSCTL_SKMEM
147 int tcp_keepinit = TCPTV_KEEP_INIT;
148 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
149 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
150 &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit),
151 sysctl_msec_to_ticks, "I", "");
152
153 int tcp_keepidle = TCPTV_KEEP_IDLE;
154 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
155 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
156 &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle),
157 sysctl_msec_to_ticks, "I", "");
158
159 int tcp_keepintvl = TCPTV_KEEPINTVL;
160 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
161 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
162 &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl),
163 sysctl_msec_to_ticks, "I", "");
164
165 SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt,
166 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
167 int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive");
168
169 int tcp_msl = TCPTV_MSL;
170 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
171 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
172 &tcp_msl, offsetof(skmem_sysctl, tcp.msl),
173 sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
174 #else /* SYSCTL_SKMEM */
175 int tcp_keepinit;
176 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
177 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
178 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
179
180 int tcp_keepidle;
181 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
182 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
183 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
184
185 int tcp_keepintvl;
186 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
187 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
188 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
189
190 int tcp_keepcnt;
191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
192 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
193 &tcp_keepcnt, 0, "number of times to repeat keepalive");
194
195 int tcp_msl;
196 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
197 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
198 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
199 #endif /* SYSCTL_SKMEM */
200
201 /*
202 * Avoid DoS via TCP Robustness in Persist Condition
203 * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
204 * by allowing a system wide maximum persistence timeout value when in
205 * Zero Window Probe mode.
206 *
207 * Expressed in milliseconds to be consistent without timeout related
208 * values, the TCP socket option is in seconds.
209 */
210 #if SYSCTL_SKMEM
211 u_int32_t tcp_max_persist_timeout = 0;
212 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
213 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
214 &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout),
215 sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP");
216 #else /* SYSCTL_SKMEM */
217 u_int32_t tcp_max_persist_timeout = 0;
218 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
219 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
220 &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
221 "Maximum persistence timeout for ZWP");
222 #endif /* SYSCTL_SKMEM */
223
224 SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive,
225 CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0,
226 "Assume SO_KEEPALIVE on all TCP connections");
227
228 /*
229 * This parameter determines how long the timer list will stay in fast or
230 * quick mode even though all connections are idle. In this state, the
231 * timer will run more frequently anticipating new data.
232 */
233 SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax,
234 CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax,
235 TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode");
236
237 /*
238 * See tcp_syn_backoff[] for interval values between SYN retransmits;
239 * the value set below defines the number of retransmits, before we
240 * disable the timestamp and window scaling options during subsequent
241 * SYN retransmits. Setting it to 0 disables the dropping off of those
242 * two options.
243 */
244 SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres,
245 CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres,
246 10, "Number of retransmitted SYNs before disabling RFC 1323 "
247 "options on local connections");
248
249 static int tcp_timer_advanced = 0;
250 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
251 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
252 "Number of times one of the timers was advanced");
253
254 static int tcp_resched_timerlist = 0;
255 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
256 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
257 "Number of times timer list was rescheduled as part of processing a packet");
258
259 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection,
260 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1,
261 "Path MTU Discovery Black Hole Detection");
262
263 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss,
264 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200,
265 "Path MTU Discovery Black Hole Detection lowered MSS");
266
267 static u_int32_t tcp_mss_rec_medium = 1200;
268 static u_int32_t tcp_mss_rec_low = 512;
269
270 #define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */
271 int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
272
273 /* performed garbage collection of "used" sockets */
274 static boolean_t tcp_gc_done = FALSE;
275
276 /* max idle probes */
277 int tcp_maxpersistidle = TCPTV_KEEP_IDLE;
278
279 /*
280 * TCP delack timer is set to 100 ms. Since the processing of timer list
281 * in fast mode will happen no faster than 100 ms, the delayed ack timer
282 * will fire some where between 100 and 200 ms.
283 */
284 int tcp_delack = TCP_RETRANSHZ / 10;
285
286 #if MPTCP
287 /*
288 * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
289 */
290 int tcp_jack_rxmt = TCP_RETRANSHZ / 2;
291 #endif /* MPTCP */
292
293 static boolean_t tcp_itimer_done = FALSE;
294
295 static void tcp_remove_timer(struct tcpcb *tp);
296 static void tcp_sched_timerlist(uint32_t offset);
297 static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
298 u_int16_t probe_if_index);
299 static void tcp_sched_timers(struct tcpcb *tp);
300 static inline void tcp_set_lotimer_index(struct tcpcb *);
301 __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
302 static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
303 __private_extern__ void tcp_report_stats(void);
304
305 static u_int64_t tcp_last_report_time;
306
307 /*
308 * Structure to store previously reported stats so that we can send
309 * incremental changes in each report interval.
310 */
311 struct tcp_last_report_stats {
312 u_int32_t tcps_connattempt;
313 u_int32_t tcps_accepts;
314 u_int32_t tcps_ecn_client_setup;
315 u_int32_t tcps_ecn_server_setup;
316 u_int32_t tcps_ecn_client_success;
317 u_int32_t tcps_ecn_server_success;
318 u_int32_t tcps_ecn_not_supported;
319 u_int32_t tcps_ecn_lost_syn;
320 u_int32_t tcps_ecn_lost_synack;
321 u_int32_t tcps_ecn_recv_ce;
322 u_int32_t tcps_ecn_recv_ece;
323 u_int32_t tcps_ecn_sent_ece;
324 u_int32_t tcps_ecn_conn_recv_ce;
325 u_int32_t tcps_ecn_conn_recv_ece;
326 u_int32_t tcps_ecn_conn_plnoce;
327 u_int32_t tcps_ecn_conn_pl_ce;
328 u_int32_t tcps_ecn_conn_nopl_ce;
329 u_int32_t tcps_ecn_fallback_synloss;
330 u_int32_t tcps_ecn_fallback_reorder;
331 u_int32_t tcps_ecn_fallback_ce;
332
333 /* TFO-related statistics */
334 u_int32_t tcps_tfo_syn_data_rcv;
335 u_int32_t tcps_tfo_cookie_req_rcv;
336 u_int32_t tcps_tfo_cookie_sent;
337 u_int32_t tcps_tfo_cookie_invalid;
338 u_int32_t tcps_tfo_cookie_req;
339 u_int32_t tcps_tfo_cookie_rcv;
340 u_int32_t tcps_tfo_syn_data_sent;
341 u_int32_t tcps_tfo_syn_data_acked;
342 u_int32_t tcps_tfo_syn_loss;
343 u_int32_t tcps_tfo_blackhole;
344 u_int32_t tcps_tfo_cookie_wrong;
345 u_int32_t tcps_tfo_no_cookie_rcv;
346 u_int32_t tcps_tfo_heuristics_disable;
347 u_int32_t tcps_tfo_sndblackhole;
348
349 /* MPTCP-related statistics */
350 u_int32_t tcps_mptcp_handover_attempt;
351 u_int32_t tcps_mptcp_interactive_attempt;
352 u_int32_t tcps_mptcp_aggregate_attempt;
353 u_int32_t tcps_mptcp_fp_handover_attempt;
354 u_int32_t tcps_mptcp_fp_interactive_attempt;
355 u_int32_t tcps_mptcp_fp_aggregate_attempt;
356 u_int32_t tcps_mptcp_heuristic_fallback;
357 u_int32_t tcps_mptcp_fp_heuristic_fallback;
358 u_int32_t tcps_mptcp_handover_success_wifi;
359 u_int32_t tcps_mptcp_handover_success_cell;
360 u_int32_t tcps_mptcp_interactive_success;
361 u_int32_t tcps_mptcp_aggregate_success;
362 u_int32_t tcps_mptcp_fp_handover_success_wifi;
363 u_int32_t tcps_mptcp_fp_handover_success_cell;
364 u_int32_t tcps_mptcp_fp_interactive_success;
365 u_int32_t tcps_mptcp_fp_aggregate_success;
366 u_int32_t tcps_mptcp_handover_cell_from_wifi;
367 u_int32_t tcps_mptcp_handover_wifi_from_cell;
368 u_int32_t tcps_mptcp_interactive_cell_from_wifi;
369 u_int64_t tcps_mptcp_handover_cell_bytes;
370 u_int64_t tcps_mptcp_interactive_cell_bytes;
371 u_int64_t tcps_mptcp_aggregate_cell_bytes;
372 u_int64_t tcps_mptcp_handover_all_bytes;
373 u_int64_t tcps_mptcp_interactive_all_bytes;
374 u_int64_t tcps_mptcp_aggregate_all_bytes;
375 u_int32_t tcps_mptcp_back_to_wifi;
376 u_int32_t tcps_mptcp_wifi_proxy;
377 u_int32_t tcps_mptcp_cell_proxy;
378 };
379
380
381 /* Returns true if the timer is on the timer list */
382 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
383
384 /* Run the TCP timerlist atleast once every hour */
385 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
386
387
388 static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
389 static boolean_t tcp_garbage_collect(struct inpcb *, int);
390
391 #define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
392
393 #define VERIFY_NEXT_LINK(elm,field) do { \
394 if (LIST_NEXT((elm),field) != NULL && \
395 LIST_NEXT((elm),field)->field.le_prev != \
396 &((elm)->field.le_next)) \
397 panic("Bad link elm %p next->prev != elm", (elm)); \
398 } while(0)
399
400 #define VERIFY_PREV_LINK(elm,field) do { \
401 if (*(elm)->field.le_prev != (elm)) \
402 panic("Bad link elm %p prev->next != elm", (elm)); \
403 } while(0)
404
405 #define TCP_SET_TIMER_MODE(mode, i) do { \
406 if (IS_TIMER_HZ_10MS(i)) \
407 (mode) |= TCP_TIMERLIST_10MS_MODE; \
408 else if (IS_TIMER_HZ_100MS(i)) \
409 (mode) |= TCP_TIMERLIST_100MS_MODE; \
410 else \
411 (mode) |= TCP_TIMERLIST_500MS_MODE; \
412 } while(0)
413
414 #if (DEVELOPMENT || DEBUG)
415 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
416 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
417 "Medium MSS based on recommendation in link status report");
418 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
419 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
420 "Low MSS based on recommendation in link status report");
421
422 static int32_t tcp_change_mss_recommended = 0;
423 static int
424 sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
425 {
426 #pragma unused(oidp, arg1, arg2)
427 int i, err = 0, changed = 0;
428 struct ifnet *ifp;
429 struct if_link_status ifsr;
430 struct if_cellular_status_v1 *new_cell_sr;
431 err = sysctl_io_number(req, tcp_change_mss_recommended,
432 sizeof (int32_t), &i, &changed);
433 if (changed) {
434 ifnet_head_lock_shared();
435 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
436 if (IFNET_IS_CELLULAR(ifp)) {
437 bzero(&ifsr, sizeof (ifsr));
438 new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
439 ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
440 ifsr.ifsr_len = sizeof(*new_cell_sr);
441
442 /* Set MSS recommended */
443 new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
444 new_cell_sr->mss_recommended = i;
445 err = ifnet_link_status_report(ifp, new_cell_sr, sizeof (new_cell_sr));
446 if (err == 0) {
447 tcp_change_mss_recommended = i;
448 } else {
449 break;
450 }
451 }
452 }
453 ifnet_head_done();
454 }
455 return (err);
456 }
457
458 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
459 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
460 0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
461
462 SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
463 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
464 "Report stats interval");
465 #endif /* (DEVELOPMENT || DEBUG) */
466
467 /*
468 * Macro to compare two timers. If there is a reset of the sign bit,
469 * it is safe to assume that the timer has wrapped around. By doing
470 * signed comparision, we take care of wrap around such that the value
471 * with the sign bit reset is actually ahead of the other.
472 */
473 inline int32_t
474 timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) {
475 return (int32_t)((t1 + toff1) - (t2 + toff2));
476 };
477
478 /*
479 * Add to tcp timewait list, delay is given in milliseconds.
480 */
481 static void
482 add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
483 {
484 struct inpcbinfo *pcbinfo = &tcbinfo;
485 struct inpcb *inp = tp->t_inpcb;
486 uint32_t timer;
487
488 /* pcb list should be locked when we get here */
489 LCK_RW_ASSERT(pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
490
491 /* We may get here multiple times, so check */
492 if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
493 pcbinfo->ipi_twcount++;
494 inp->inp_flags2 |= INP2_TIMEWAIT;
495
496 /* Remove from global inp list */
497 LIST_REMOVE(inp, inp_list);
498 } else {
499 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
500 }
501
502 /* Compute the time at which this socket can be closed */
503 timer = tcp_now + delay;
504
505 /* We will use the TCPT_2MSL timer for tracking this delay */
506
507 if (TIMER_IS_ON_LIST(tp))
508 tcp_remove_timer(tp);
509 tp->t_timer[TCPT_2MSL] = timer;
510
511 TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
512 }
513
514 void
515 add_to_time_wait(struct tcpcb *tp, uint32_t delay)
516 {
517 struct inpcbinfo *pcbinfo = &tcbinfo;
518 if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP)
519 socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
520
521 /* 19182803: Notify nstat that connection is closing before waiting. */
522 nstat_pcb_detach(tp->t_inpcb);
523
524 if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) {
525 socket_unlock(tp->t_inpcb->inp_socket, 0);
526 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
527 socket_lock(tp->t_inpcb->inp_socket, 0);
528 }
529 add_to_time_wait_locked(tp, delay);
530 lck_rw_done(pcbinfo->ipi_lock);
531
532 inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
533 }
534
535 /* If this is on time wait queue, remove it. */
536 void
537 tcp_remove_from_time_wait(struct inpcb *inp)
538 {
539 struct tcpcb *tp = intotcpcb(inp);
540 if (inp->inp_flags2 & INP2_TIMEWAIT)
541 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
542 }
543
544 static boolean_t
545 tcp_garbage_collect(struct inpcb *inp, int istimewait)
546 {
547 boolean_t active = FALSE;
548 struct socket *so, *mp_so = NULL;
549 struct tcpcb *tp;
550
551 so = inp->inp_socket;
552 tp = intotcpcb(inp);
553
554 if (so->so_flags & SOF_MP_SUBFLOW) {
555 mp_so = mptetoso(tptomptp(tp)->mpt_mpte);
556 if (!socket_try_lock(mp_so)) {
557 mp_so = NULL;
558 active = TRUE;
559 goto out;
560 }
561 mp_so->so_usecount++;
562 }
563
564 /*
565 * Skip if still in use or busy; it would have been more efficient
566 * if we were to test so_usecount against 0, but this isn't possible
567 * due to the current implementation of tcp_dropdropablreq() where
568 * overflow sockets that are eligible for garbage collection have
569 * their usecounts set to 1.
570 */
571 if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) {
572 active = TRUE;
573 goto out;
574 }
575
576 /* Check again under the lock */
577 if (so->so_usecount > 1) {
578 if (inp->inp_wantcnt == WNT_STOPUSING)
579 active = TRUE;
580 lck_mtx_unlock(&inp->inpcb_mtx);
581 goto out;
582 }
583
584 if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
585 tp->t_state != TCPS_CLOSED) {
586 /* Become a regular mutex */
587 lck_mtx_convert_spin(&inp->inpcb_mtx);
588 tcp_close(tp);
589 }
590
591 /*
592 * Overflowed socket dropped from the listening queue? Do this
593 * only if we are called to clean up the time wait slots, since
594 * tcp_dropdropablreq() considers a socket to have been fully
595 * dropped after add_to_time_wait() is finished.
596 * Also handle the case of connections getting closed by the peer
597 * while in the queue as seen with rdar://6422317
598 *
599 */
600 if (so->so_usecount == 1 &&
601 ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
602 ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
603 (so->so_head != NULL) &&
604 ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) ==
605 (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) {
606
607 if (inp->inp_state != INPCB_STATE_DEAD) {
608 /* Become a regular mutex */
609 lck_mtx_convert_spin(&inp->inpcb_mtx);
610 #if INET6
611 if (SOCK_CHECK_DOM(so, PF_INET6))
612 in6_pcbdetach(inp);
613 else
614 #endif /* INET6 */
615 in_pcbdetach(inp);
616 }
617 VERIFY(so->so_usecount > 0);
618 so->so_usecount--;
619 if (inp->inp_wantcnt == WNT_STOPUSING)
620 active = TRUE;
621 lck_mtx_unlock(&inp->inpcb_mtx);
622 goto out;
623 } else if (inp->inp_wantcnt != WNT_STOPUSING) {
624 lck_mtx_unlock(&inp->inpcb_mtx);
625 active = FALSE;
626 goto out;
627 }
628
629 /*
630 * We get here because the PCB is no longer searchable
631 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
632 * (usecount is 0). This covers all cases, including overflow
633 * sockets and those that are considered as "embryonic",
634 * i.e. created by sonewconn() in TCP input path, and have
635 * not yet been committed. For the former, we reduce the usecount
636 * to 0 as done by the code above. For the latter, the usecount
637 * would have reduced to 0 as part calling soabort() when the
638 * socket is dropped at the end of tcp_input().
639 */
640 if (so->so_usecount == 0) {
641 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
642 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
643 /* Become a regular mutex */
644 lck_mtx_convert_spin(&inp->inpcb_mtx);
645
646 /*
647 * If this tp still happens to be on the timer list,
648 * take it out
649 */
650 if (TIMER_IS_ON_LIST(tp)) {
651 tcp_remove_timer(tp);
652 }
653
654 if (inp->inp_state != INPCB_STATE_DEAD) {
655 #if INET6
656 if (SOCK_CHECK_DOM(so, PF_INET6))
657 in6_pcbdetach(inp);
658 else
659 #endif /* INET6 */
660 in_pcbdetach(inp);
661 }
662
663 if (mp_so) {
664 mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub);
665
666 /* so is now unlinked from mp_so - let's drop the lock */
667 socket_unlock(mp_so, 1);
668 mp_so = NULL;
669 }
670
671 in_pcbdispose(inp);
672 active = FALSE;
673 goto out;
674 }
675
676 lck_mtx_unlock(&inp->inpcb_mtx);
677 active = TRUE;
678
679 out:
680 if (mp_so)
681 socket_unlock(mp_so, 1);
682
683 return (active);
684 }
685
686 /*
687 * TCP garbage collector callback (inpcb_timer_func_t).
688 *
689 * Returns the number of pcbs that will need to be gc-ed soon,
690 * returnining > 0 will keep timer active.
691 */
692 void
693 tcp_gc(struct inpcbinfo *ipi)
694 {
695 struct inpcb *inp, *nxt;
696 struct tcpcb *tw_tp, *tw_ntp;
697 #if TCPDEBUG
698 int ostate;
699 #endif
700 #if KDEBUG
701 static int tws_checked = 0;
702 #endif
703
704 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
705
706 /*
707 * Update tcp_now here as it may get used while
708 * processing the slow timer.
709 */
710 calculate_tcp_clock();
711
712 /*
713 * Garbage collect socket/tcpcb: We need to acquire the list lock
714 * exclusively to do this
715 */
716
717 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
718 /* don't sweat it this time; cleanup was done last time */
719 if (tcp_gc_done == TRUE) {
720 tcp_gc_done = FALSE;
721 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
722 tws_checked, cur_tw_slot, 0, 0, 0);
723 /* Lock upgrade failed, give up this round */
724 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
725 return;
726 }
727 /* Upgrade failed, lost lock now take it again exclusive */
728 lck_rw_lock_exclusive(ipi->ipi_lock);
729 }
730 tcp_gc_done = TRUE;
731
732 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
733 if (tcp_garbage_collect(inp, 0))
734 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
735 }
736
737 /* Now cleanup the time wait ones */
738 TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
739 /*
740 * We check the timestamp here without holding the
741 * socket lock for better performance. If there are
742 * any pcbs in time-wait, the timer will get rescheduled.
743 * Hence some error in this check can be tolerated.
744 *
745 * Sometimes a socket on time-wait queue can be closed if
746 * 2MSL timer expired but the application still has a
747 * usecount on it.
748 */
749 if (tw_tp->t_state == TCPS_CLOSED ||
750 TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
751 if (tcp_garbage_collect(tw_tp->t_inpcb, 1))
752 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
753 }
754 }
755
756 /* take into account pcbs that are still in time_wait_slots */
757 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount);
758
759 lck_rw_done(ipi->ipi_lock);
760
761 /* Clean up the socache while we are here */
762 if (so_cache_timer())
763 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
764
765 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
766 cur_tw_slot, 0, 0, 0);
767
768 return;
769 }
770
771 /*
772 * Cancel all timers for TCP tp.
773 */
774 void
775 tcp_canceltimers(struct tcpcb *tp)
776 {
777 int i;
778
779 tcp_remove_timer(tp);
780 for (i = 0; i < TCPT_NTIMERS; i++)
781 tp->t_timer[i] = 0;
782 tp->tentry.timer_start = tcp_now;
783 tp->tentry.index = TCPT_NONE;
784 }
785
786 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
787 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
788
789 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
790 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
791
792 static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
793
794 void
795 tcp_rexmt_save_state(struct tcpcb *tp)
796 {
797 u_int32_t fsize;
798 if (TSTMP_SUPPORTED(tp)) {
799 /*
800 * Since timestamps are supported on the connection,
801 * we can do recovery as described in rfc 4015.
802 */
803 fsize = tp->snd_max - tp->snd_una;
804 tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
805 tp->snd_recover_prev = tp->snd_recover;
806 } else {
807 /*
808 * Timestamp option is not supported on this connection.
809 * Record ssthresh and cwnd so they can
810 * be recovered if this turns out to be a "bad" retransmit.
811 * A retransmit is considered "bad" if an ACK for this
812 * segment is received within RTT/2 interval; the assumption
813 * here is that the ACK was already in flight. See
814 * "On Estimating End-to-End Network Path Properties" by
815 * Allman and Paxson for more details.
816 */
817 tp->snd_cwnd_prev = tp->snd_cwnd;
818 tp->snd_ssthresh_prev = tp->snd_ssthresh;
819 tp->snd_recover_prev = tp->snd_recover;
820 if (IN_FASTRECOVERY(tp))
821 tp->t_flags |= TF_WASFRECOVERY;
822 else
823 tp->t_flags &= ~TF_WASFRECOVERY;
824 }
825 tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
826 tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
827 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
828 }
829
830 /*
831 * Revert to the older segment size if there is an indication that PMTU
832 * blackhole detection was not needed.
833 */
834 void
835 tcp_pmtud_revert_segment_size(struct tcpcb *tp)
836 {
837 int32_t optlen;
838
839 VERIFY(tp->t_pmtud_saved_maxopd > 0);
840 tp->t_flags |= TF_PMTUD;
841 tp->t_flags &= ~TF_BLACKHOLE;
842 optlen = tp->t_maxopd - tp->t_maxseg;
843 tp->t_maxopd = tp->t_pmtud_saved_maxopd;
844 tp->t_maxseg = tp->t_maxopd - optlen;
845 /*
846 * Reset the slow-start flight size as it
847 * may depend on the new MSS
848 */
849 if (CC_ALGO(tp)->cwnd_init != NULL)
850 CC_ALGO(tp)->cwnd_init(tp);
851 tp->t_pmtud_start_ts = 0;
852 tcpstat.tcps_pmtudbh_reverted++;
853
854 /* change MSS according to recommendation, if there was one */
855 tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL);
856 }
857
858 /*
859 * TCP timer processing.
860 */
861 struct tcpcb *
862 tcp_timers(struct tcpcb *tp, int timer)
863 {
864 int32_t rexmt, optlen = 0, idle_time = 0;
865 struct socket *so;
866 struct tcptemp *t_template;
867 #if TCPDEBUG
868 int ostate;
869 #endif
870
871 #if INET6
872 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
873 #endif /* INET6 */
874 u_int64_t accsleep_ms;
875 u_int32_t last_sleep_ms = 0;
876
877 so = tp->t_inpcb->inp_socket;
878 idle_time = tcp_now - tp->t_rcvtime;
879
880 switch (timer) {
881
882 /*
883 * 2 MSL timeout in shutdown went off. If we're closed but
884 * still waiting for peer to close and connection has been idle
885 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
886 * delete connection control block.
887 * Otherwise, (this case shouldn't happen) check again in a bit
888 * we keep the socket in the main list in that case.
889 */
890 case TCPT_2MSL:
891 tcp_free_sackholes(tp);
892 if (tp->t_state != TCPS_TIME_WAIT &&
893 tp->t_state != TCPS_FIN_WAIT_2 &&
894 ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
895 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
896 (u_int32_t)TCP_CONN_KEEPINTVL(tp));
897 } else {
898 tp = tcp_close(tp);
899 return(tp);
900 }
901 break;
902
903 /*
904 * Retransmission timer went off. Message has not
905 * been acked within retransmit interval. Back off
906 * to a longer retransmit interval and retransmit one segment.
907 */
908 case TCPT_REXMT:
909 absolutetime_to_nanoseconds(mach_absolutetime_asleep,
910 &accsleep_ms);
911 accsleep_ms = accsleep_ms / 1000000UL;
912 if (accsleep_ms > tp->t_accsleep_ms)
913 last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
914 /*
915 * Drop a connection in the retransmit timer
916 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
917 * times
918 * 2. If the time spent in this retransmission episode is
919 * more than the time limit set with TCP_RXT_CONNDROPTIME
920 * socket option
921 * 3. If TCP_RXT_FINDROP socket option was set and
922 * we have already retransmitted the FIN 3 times without
923 * receiving an ack
924 */
925 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
926 (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
927 (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
928 ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
929 (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
930 (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
931 if (tp->t_state == TCPS_ESTABLISHED &&
932 tp->t_rxt_minimum_timeout > 0) {
933 /*
934 * Avoid dropping a connection if minimum
935 * timeout is set and that time did not
936 * pass. We will retry sending
937 * retransmissions at the maximum interval
938 */
939 if (TSTMP_LT(tcp_now, (tp->t_rxtstart +
940 tp->t_rxt_minimum_timeout))) {
941 tp->t_rxtshift = TCP_MAXRXTSHIFT - 1;
942 goto retransmit_packet;
943 }
944 }
945 if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
946 tcpstat.tcps_rxtfindrop++;
947 } else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
948 tcpstat.tcps_drop_after_sleep++;
949 } else {
950 tcpstat.tcps_timeoutdrop++;
951 }
952 if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
953 if (TCP_ECN_ENABLED(tp)) {
954 INP_INC_IFNET_STAT(tp->t_inpcb,
955 ecn_on.rxmit_drop);
956 } else {
957 INP_INC_IFNET_STAT(tp->t_inpcb,
958 ecn_off.rxmit_drop);
959 }
960 }
961 tp->t_rxtshift = TCP_MAXRXTSHIFT;
962 postevent(so, 0, EV_TIMEOUT);
963 soevent(so,
964 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
965
966 if (TCP_ECN_ENABLED(tp) &&
967 tp->t_state == TCPS_ESTABLISHED)
968 tcp_heuristic_ecn_droprxmt(tp);
969
970 tp = tcp_drop(tp, tp->t_softerror ?
971 tp->t_softerror : ETIMEDOUT);
972
973 break;
974 }
975 retransmit_packet:
976 tcpstat.tcps_rexmttimeo++;
977 tp->t_accsleep_ms = accsleep_ms;
978
979 if (tp->t_rxtshift == 1 &&
980 tp->t_state == TCPS_ESTABLISHED) {
981 /* Set the time at which retransmission started. */
982 tp->t_rxtstart = tcp_now;
983
984 /*
985 * if this is the first retransmit timeout, save
986 * the state so that we can recover if the timeout
987 * is spurious.
988 */
989 tcp_rexmt_save_state(tp);
990 }
991 #if MPTCP
992 if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
993 (tp->t_state == TCPS_ESTABLISHED) &&
994 (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
995 mptcp_act_on_txfail(so);
996
997 }
998
999 if (so->so_flags & SOF_MP_SUBFLOW) {
1000 struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1001
1002 mptcp_check_subflows_and_add(mpte);
1003 }
1004 #endif /* MPTCP */
1005
1006 if (tp->t_adaptive_wtimo > 0 &&
1007 tp->t_rxtshift > tp->t_adaptive_wtimo &&
1008 TCPS_HAVEESTABLISHED(tp->t_state)) {
1009 /* Send an event to the application */
1010 soevent(so,
1011 (SO_FILT_HINT_LOCKED|
1012 SO_FILT_HINT_ADAPTIVE_WTIMO));
1013 }
1014
1015 /*
1016 * If this is a retransmit timeout after PTO, the PTO
1017 * was not effective
1018 */
1019 if (tp->t_flagsext & TF_SENT_TLPROBE) {
1020 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1021 tcpstat.tcps_rto_after_pto++;
1022 }
1023
1024 if (tp->t_flagsext & TF_DELAY_RECOVERY) {
1025 /*
1026 * Retransmit timer fired before entering recovery
1027 * on a connection with packet re-ordering. This
1028 * suggests that the reordering metrics computed
1029 * are not accurate.
1030 */
1031 tp->t_reorderwin = 0;
1032 tp->t_timer[TCPT_DELAYFR] = 0;
1033 tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
1034 }
1035
1036 if (tp->t_state == TCPS_SYN_RECEIVED)
1037 tcp_disable_tfo(tp);
1038
1039 if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1040 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
1041 !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
1042 ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
1043 tp->t_rxtshift > 2)) {
1044 /*
1045 * For regular retransmissions, a first one is being
1046 * done for tail-loss probe.
1047 * Thus, if rxtshift > 1, this means we have sent the segment
1048 * a total of 3 times.
1049 *
1050 * If we are in SYN-SENT state, then there is no tail-loss
1051 * probe thus we have to let rxtshift go up to 3.
1052 */
1053 tcp_heuristic_tfo_middlebox(tp);
1054
1055 so->so_error = ENODATA;
1056 sorwakeup(so);
1057 sowwakeup(so);
1058
1059 tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
1060 tcpstat.tcps_tfo_sndblackhole++;
1061 }
1062
1063 if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1064 (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
1065 tp->t_rxtshift > 1) {
1066 if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) {
1067 tcp_heuristic_tfo_middlebox(tp);
1068
1069 so->so_error = ENODATA;
1070 sorwakeup(so);
1071 sowwakeup(so);
1072 }
1073 }
1074
1075 if (tp->t_state == TCPS_SYN_SENT) {
1076 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
1077 tp->t_stat.synrxtshift = tp->t_rxtshift;
1078
1079 /* When retransmitting, disable TFO */
1080 if (tfo_enabled(tp) &&
1081 (!(so->so_flags1 & SOF1_DATA_AUTHENTICATED) ||
1082 (tp->t_flagsext & TF_FASTOPEN_HEUR))) {
1083 tp->t_flagsext &= ~TF_FASTOPEN;
1084 tp->t_tfo_flags |= TFO_F_SYN_LOSS;
1085 }
1086 } else {
1087 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
1088 }
1089
1090 TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
1091 TCP_ADD_REXMTSLOP(tp));
1092 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1093
1094 if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb))
1095 goto fc_output;
1096
1097 tcp_free_sackholes(tp);
1098 /*
1099 * Check for potential Path MTU Discovery Black Hole
1100 */
1101 if (tcp_pmtud_black_hole_detect &&
1102 !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
1103 (tp->t_state == TCPS_ESTABLISHED)) {
1104 if ((tp->t_flags & TF_PMTUD) &&
1105 ((tp->t_flags & TF_MAXSEGSNT)
1106 || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) &&
1107 tp->t_rxtshift == 2) {
1108 /*
1109 * Enter Path MTU Black-hole Detection mechanism:
1110 * - Disable Path MTU Discovery (IP "DF" bit).
1111 * - Reduce MTU to lower value than what we
1112 * negotiated with the peer.
1113 */
1114 /* Disable Path MTU Discovery for now */
1115 tp->t_flags &= ~TF_PMTUD;
1116 /* Record that we may have found a black hole */
1117 tp->t_flags |= TF_BLACKHOLE;
1118 optlen = tp->t_maxopd - tp->t_maxseg;
1119 /* Keep track of previous MSS */
1120 tp->t_pmtud_saved_maxopd = tp->t_maxopd;
1121 tp->t_pmtud_start_ts = tcp_now;
1122 if (tp->t_pmtud_start_ts == 0)
1123 tp->t_pmtud_start_ts++;
1124 /* Reduce the MSS to intermediary value */
1125 if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
1126 tp->t_maxopd = tcp_pmtud_black_hole_mss;
1127 } else {
1128 tp->t_maxopd = /* use the default MSS */
1129 #if INET6
1130 isipv6 ? tcp_v6mssdflt :
1131 #endif /* INET6 */
1132 tcp_mssdflt;
1133 }
1134 tp->t_maxseg = tp->t_maxopd - optlen;
1135
1136 /*
1137 * Reset the slow-start flight size
1138 * as it may depend on the new MSS
1139 */
1140 if (CC_ALGO(tp)->cwnd_init != NULL)
1141 CC_ALGO(tp)->cwnd_init(tp);
1142 tp->snd_cwnd = tp->t_maxseg;
1143 }
1144 /*
1145 * If further retransmissions are still
1146 * unsuccessful with a lowered MTU, maybe this
1147 * isn't a Black Hole and we restore the previous
1148 * MSS and blackhole detection flags.
1149 */
1150 else {
1151
1152 if ((tp->t_flags & TF_BLACKHOLE) &&
1153 (tp->t_rxtshift > 4)) {
1154 tcp_pmtud_revert_segment_size(tp);
1155 tp->snd_cwnd = tp->t_maxseg;
1156 }
1157 }
1158 }
1159
1160
1161 /*
1162 * Disable rfc1323 and rfc1644 if we haven't got any
1163 * response to our SYN (after we reach the threshold)
1164 * to work-around some broken terminal servers (most of
1165 * which have hopefully been retired) that have bad VJ
1166 * header compression code which trashes TCP segments
1167 * containing unknown-to-them TCP options.
1168 * Do this only on non-local connections.
1169 */
1170 if (tp->t_state == TCPS_SYN_SENT &&
1171 tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres)
1172 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
1173
1174 /*
1175 * If losing, let the lower level know and try for
1176 * a better route. Also, if we backed off this far,
1177 * our srtt estimate is probably bogus. Clobber it
1178 * so we'll take the next rtt measurement as our srtt;
1179 * move the current srtt into rttvar to keep the current
1180 * retransmit times until then.
1181 */
1182 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
1183 #if INET6
1184 if (isipv6)
1185 in6_losing(tp->t_inpcb);
1186 else
1187 #endif /* INET6 */
1188 in_losing(tp->t_inpcb);
1189 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
1190 tp->t_srtt = 0;
1191 }
1192 tp->snd_nxt = tp->snd_una;
1193 /*
1194 * Note: We overload snd_recover to function also as the
1195 * snd_last variable described in RFC 2582
1196 */
1197 tp->snd_recover = tp->snd_max;
1198 /*
1199 * Force a segment to be sent.
1200 */
1201 tp->t_flags |= TF_ACKNOW;
1202
1203 /* If timing a segment in this window, stop the timer */
1204 tp->t_rtttime = 0;
1205
1206 if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1)
1207 tcpstat.tcps_tailloss_rto++;
1208
1209
1210 /*
1211 * RFC 5681 says: when a TCP sender detects segment loss
1212 * using retransmit timer and the given segment has already
1213 * been retransmitted by way of the retransmission timer at
1214 * least once, the value of ssthresh is held constant
1215 */
1216 if (tp->t_rxtshift == 1 &&
1217 CC_ALGO(tp)->after_timeout != NULL) {
1218 CC_ALGO(tp)->after_timeout(tp);
1219 /*
1220 * CWR notifications are to be sent on new data
1221 * right after Fast Retransmits and ECE
1222 * notification receipts.
1223 */
1224 if (TCP_ECN_ENABLED(tp))
1225 tp->ecn_flags |= TE_SENDCWR;
1226 }
1227
1228 EXIT_FASTRECOVERY(tp);
1229
1230 /* Exit cwnd non validated phase */
1231 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1232
1233
1234 fc_output:
1235 tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
1236
1237 (void) tcp_output(tp);
1238 break;
1239
1240 /*
1241 * Persistance timer into zero window.
1242 * Force a byte to be output, if possible.
1243 */
1244 case TCPT_PERSIST:
1245 tcpstat.tcps_persisttimeo++;
1246 /*
1247 * Hack: if the peer is dead/unreachable, we do not
1248 * time out if the window is closed. After a full
1249 * backoff, drop the connection if the idle time
1250 * (no responses to probes) reaches the maximum
1251 * backoff that we would use if retransmitting.
1252 *
1253 * Drop the connection if we reached the maximum allowed time for
1254 * Zero Window Probes without a non-zero update from the peer.
1255 * See rdar://5805356
1256 */
1257 if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1258 (idle_time >= tcp_maxpersistidle ||
1259 idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1260 ((tp->t_persist_stop != 0) &&
1261 TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1262 tcpstat.tcps_persistdrop++;
1263 postevent(so, 0, EV_TIMEOUT);
1264 soevent(so,
1265 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
1266 tp = tcp_drop(tp, ETIMEDOUT);
1267 break;
1268 }
1269 tcp_setpersist(tp);
1270 tp->t_flagsext |= TF_FORCE;
1271 (void) tcp_output(tp);
1272 tp->t_flagsext &= ~TF_FORCE;
1273 break;
1274
1275 /*
1276 * Keep-alive timer went off; send something
1277 * or drop connection if idle for too long.
1278 */
1279 case TCPT_KEEP:
1280 tcpstat.tcps_keeptimeo++;
1281 #if MPTCP
1282 /*
1283 * Regular TCP connections do not send keepalives after closing
1284 * MPTCP must not also, after sending Data FINs.
1285 */
1286 struct mptcb *mp_tp = tptomptp(tp);
1287 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1288 (tp->t_state > TCPS_ESTABLISHED)) {
1289 goto dropit;
1290 } else if (mp_tp != NULL) {
1291 if ((mptcp_ok_to_keepalive(mp_tp) == 0))
1292 goto dropit;
1293 }
1294 #endif /* MPTCP */
1295 if (tp->t_state < TCPS_ESTABLISHED)
1296 goto dropit;
1297 if ((always_keepalive ||
1298 (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
1299 (tp->t_flagsext & TF_DETECT_READSTALL) ||
1300 (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
1301 (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
1302 if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp))
1303 goto dropit;
1304 /*
1305 * Send a packet designed to force a response
1306 * if the peer is up and reachable:
1307 * either an ACK if the connection is still alive,
1308 * or an RST if the peer has closed the connection
1309 * due to timeout or reboot.
1310 * Using sequence number tp->snd_una-1
1311 * causes the transmitted zero-length segment
1312 * to lie outside the receive window;
1313 * by the protocol spec, this requires the
1314 * correspondent TCP to respond.
1315 */
1316 tcpstat.tcps_keepprobe++;
1317 t_template = tcp_maketemplate(tp);
1318 if (t_template) {
1319 struct inpcb *inp = tp->t_inpcb;
1320 struct tcp_respond_args tra;
1321
1322 bzero(&tra, sizeof(tra));
1323 tra.nocell = INP_NO_CELLULAR(inp);
1324 tra.noexpensive = INP_NO_EXPENSIVE(inp);
1325 tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp);
1326 tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp);
1327 if (tp->t_inpcb->inp_flags & INP_BOUND_IF)
1328 tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
1329 else
1330 tra.ifscope = IFSCOPE_NONE;
1331 tcp_respond(tp, t_template->tt_ipgen,
1332 &t_template->tt_t, (struct mbuf *)NULL,
1333 tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
1334 (void) m_free(dtom(t_template));
1335 if (tp->t_flagsext & TF_DETECT_READSTALL)
1336 tp->t_rtimo_probes++;
1337 }
1338 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1339 TCP_CONN_KEEPINTVL(tp));
1340 } else {
1341 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1342 TCP_CONN_KEEPIDLE(tp));
1343 }
1344 if (tp->t_flagsext & TF_DETECT_READSTALL) {
1345 struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1346 bool reenable_probe = false;
1347 /*
1348 * The keep alive packets sent to detect a read
1349 * stall did not get a response from the
1350 * peer. Generate more keep-alives to confirm this.
1351 * If the number of probes sent reaches the limit,
1352 * generate an event.
1353 */
1354 if (tp->t_adaptive_rtimo > 0) {
1355 if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1356 /* Generate an event */
1357 soevent(so,
1358 (SO_FILT_HINT_LOCKED |
1359 SO_FILT_HINT_ADAPTIVE_RTIMO));
1360 tcp_keepalive_reset(tp);
1361 } else {
1362 reenable_probe = true;
1363 }
1364 } else if (outifp != NULL &&
1365 (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1366 tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1367 reenable_probe = true;
1368 } else {
1369 tp->t_flagsext &= ~TF_DETECT_READSTALL;
1370 }
1371 if (reenable_probe) {
1372 int ind = min(tp->t_rtimo_probes,
1373 TCP_MAXRXTSHIFT);
1374 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
1375 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
1376 }
1377 }
1378 if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1379 int ind;
1380
1381 tp->t_tfo_probes++;
1382 ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1383
1384 /*
1385 * We take the minimum among the time set by true
1386 * keepalive (see above) and the backoff'd RTO. That
1387 * way we backoff in case of packet-loss but will never
1388 * timeout slower than regular keepalive due to the
1389 * backing off.
1390 */
1391 tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1392 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1393 tp->t_timer[TCPT_KEEP]);
1394 } else if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1395 tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
1396 /* Still no data! Let's assume a TFO-error and err out... */
1397 tcp_heuristic_tfo_middlebox(tp);
1398
1399 so->so_error = ENODATA;
1400 sorwakeup(so);
1401 tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
1402 tcpstat.tcps_tfo_blackhole++;
1403 }
1404 break;
1405 case TCPT_DELACK:
1406 if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1407 tp->t_flags &= ~TF_DELACK;
1408 tp->t_timer[TCPT_DELACK] = 0;
1409 tp->t_flags |= TF_ACKNOW;
1410
1411 /*
1412 * If delayed ack timer fired while stretching
1413 * acks, count the number of times the streaming
1414 * detection was not correct. If this exceeds a
1415 * threshold, disable strech ack on this
1416 * connection
1417 *
1418 * Also, go back to acking every other packet.
1419 */
1420 if ((tp->t_flags & TF_STRETCHACK)) {
1421 if (tp->t_unacksegs > 1 &&
1422 tp->t_unacksegs < maxseg_unacked)
1423 tp->t_stretchack_delayed++;
1424
1425 if (tp->t_stretchack_delayed >
1426 TCP_STRETCHACK_DELAY_THRESHOLD) {
1427 tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1428 /*
1429 * Note the time at which stretch
1430 * ack was disabled automatically
1431 */
1432 tp->rcv_nostrack_ts = tcp_now;
1433 tcpstat.tcps_nostretchack++;
1434 tp->t_stretchack_delayed = 0;
1435 tp->rcv_nostrack_pkts = 0;
1436 }
1437 tcp_reset_stretch_ack(tp);
1438 }
1439
1440 /*
1441 * If we are measuring inter packet arrival jitter
1442 * for throttling a connection, this delayed ack
1443 * might be the reason for accumulating some
1444 * jitter. So let's restart the measurement.
1445 */
1446 CLEAR_IAJ_STATE(tp);
1447
1448 tcpstat.tcps_delack++;
1449 (void) tcp_output(tp);
1450 }
1451 break;
1452
1453 #if MPTCP
1454 case TCPT_JACK_RXMT:
1455 if ((tp->t_state == TCPS_ESTABLISHED) &&
1456 (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1457 (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1458 if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1459 tcpstat.tcps_timeoutdrop++;
1460 postevent(so, 0, EV_TIMEOUT);
1461 soevent(so,
1462 (SO_FILT_HINT_LOCKED|
1463 SO_FILT_HINT_TIMEOUT));
1464 tp = tcp_drop(tp, tp->t_softerror ?
1465 tp->t_softerror : ETIMEDOUT);
1466 break;
1467 }
1468 tcpstat.tcps_join_rxmts++;
1469 tp->t_mpflags |= TMPF_SND_JACK;
1470 tp->t_flags |= TF_ACKNOW;
1471
1472 /*
1473 * No backoff is implemented for simplicity for this
1474 * corner case.
1475 */
1476 (void) tcp_output(tp);
1477 }
1478 break;
1479 #endif /* MPTCP */
1480
1481 case TCPT_PTO:
1482 {
1483 int32_t snd_len;
1484 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1485
1486 /*
1487 * Check if the connection is in the right state to
1488 * send a probe
1489 */
1490 if (tp->t_state != TCPS_ESTABLISHED ||
1491 (tp->t_rxtshift > 0 && !(tp->t_flagsext & TF_PROBING))
1492 || tp->snd_max == tp->snd_una ||
1493 !SACK_ENABLED(tp) || !TAILQ_EMPTY(&tp->snd_holes) ||
1494 IN_FASTRECOVERY(tp))
1495 break;
1496
1497 /*
1498 * If there is no new data to send or if the
1499 * connection is limited by receive window then
1500 * retransmit the last segment, otherwise send
1501 * new data.
1502 */
1503 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1504 - (tp->snd_max - tp->snd_una);
1505 if (snd_len > 0) {
1506 tp->snd_nxt = tp->snd_max;
1507 } else {
1508 snd_len = min((tp->snd_max - tp->snd_una),
1509 tp->t_maxseg);
1510 tp->snd_nxt = tp->snd_max - snd_len;
1511 }
1512
1513 tcpstat.tcps_pto++;
1514 if (tp->t_flagsext & TF_PROBING)
1515 tcpstat.tcps_probe_if++;
1516
1517 /* If timing a segment in this window, stop the timer */
1518 tp->t_rtttime = 0;
1519 /* Note that tail loss probe is being sent */
1520 tp->t_flagsext |= TF_SENT_TLPROBE;
1521 tp->t_tlpstart = tcp_now;
1522
1523 tp->snd_cwnd += tp->t_maxseg;
1524 (void )tcp_output(tp);
1525 tp->snd_cwnd -= tp->t_maxseg;
1526
1527 tp->t_tlphighrxt = tp->snd_nxt;
1528 break;
1529 }
1530 case TCPT_DELAYFR:
1531 tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1532
1533 /*
1534 * Don't do anything if one of the following is true:
1535 * - the connection is already in recovery
1536 * - sequence until snd_recover has been acknowledged.
1537 * - retransmit timeout has fired
1538 */
1539 if (IN_FASTRECOVERY(tp) ||
1540 SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1541 tp->t_rxtshift > 0)
1542 break;
1543
1544 VERIFY(SACK_ENABLED(tp));
1545 tcp_rexmt_save_state(tp);
1546 if (CC_ALGO(tp)->pre_fr != NULL) {
1547 CC_ALGO(tp)->pre_fr(tp);
1548 if (TCP_ECN_ENABLED(tp))
1549 tp->ecn_flags |= TE_SENDCWR;
1550 }
1551 ENTER_FASTRECOVERY(tp);
1552
1553 tp->t_timer[TCPT_REXMT] = 0;
1554 tcpstat.tcps_sack_recovery_episode++;
1555 tp->t_sack_recovery_episode++;
1556 tp->sack_newdata = tp->snd_nxt;
1557 tp->snd_cwnd = tp->t_maxseg;
1558 tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1559 (void) tcp_output(tp);
1560 break;
1561 dropit:
1562 tcpstat.tcps_keepdrops++;
1563 postevent(so, 0, EV_TIMEOUT);
1564 soevent(so,
1565 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
1566 tp = tcp_drop(tp, ETIMEDOUT);
1567 break;
1568 }
1569 #if TCPDEBUG
1570 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
1571 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
1572 PRU_SLOWTIMO);
1573 #endif
1574 return (tp);
1575 }
1576
1577 /* Remove a timer entry from timer list */
1578 void
1579 tcp_remove_timer(struct tcpcb *tp)
1580 {
1581 struct tcptimerlist *listp = &tcp_timer_list;
1582
1583 socket_lock_assert_owned(tp->t_inpcb->inp_socket);
1584 if (!(TIMER_IS_ON_LIST(tp))) {
1585 return;
1586 }
1587 lck_mtx_lock(listp->mtx);
1588
1589 /* Check if pcb is on timer list again after acquiring the lock */
1590 if (!(TIMER_IS_ON_LIST(tp))) {
1591 lck_mtx_unlock(listp->mtx);
1592 return;
1593 }
1594
1595 if (listp->next_te != NULL && listp->next_te == &tp->tentry)
1596 listp->next_te = LIST_NEXT(&tp->tentry, le);
1597
1598 LIST_REMOVE(&tp->tentry, le);
1599 tp->t_flags &= ~(TF_TIMER_ONLIST);
1600
1601 listp->entries--;
1602
1603 tp->tentry.le.le_next = NULL;
1604 tp->tentry.le.le_prev = NULL;
1605 lck_mtx_unlock(listp->mtx);
1606 }
1607
1608 /*
1609 * Function to check if the timerlist needs to be rescheduled to run
1610 * the timer entry correctly. Basically, this is to check if we can avoid
1611 * taking the list lock.
1612 */
1613
1614 static boolean_t
1615 need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1616 {
1617 struct tcptimerlist *listp = &tcp_timer_list;
1618 int32_t diff;
1619
1620 /*
1621 * If the list is being processed then the state of the list is
1622 * in flux. In this case always acquire the lock and set the state
1623 * correctly.
1624 */
1625 if (listp->running)
1626 return (TRUE);
1627
1628 if (!listp->scheduled)
1629 return (TRUE);
1630
1631 diff = timer_diff(listp->runtime, 0, runtime, 0);
1632 if (diff <= 0) {
1633 /* The list is going to run before this timer */
1634 return (FALSE);
1635 } else {
1636 if (mode & TCP_TIMERLIST_10MS_MODE) {
1637 if (diff <= TCP_TIMER_10MS_QUANTUM)
1638 return (FALSE);
1639 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
1640 if (diff <= TCP_TIMER_100MS_QUANTUM)
1641 return (FALSE);
1642 } else {
1643 if (diff <= TCP_TIMER_500MS_QUANTUM)
1644 return (FALSE);
1645 }
1646 }
1647 return (TRUE);
1648 }
1649
1650 void
1651 tcp_sched_timerlist(uint32_t offset)
1652 {
1653 uint64_t deadline = 0;
1654 struct tcptimerlist *listp = &tcp_timer_list;
1655
1656 LCK_MTX_ASSERT(listp->mtx, LCK_MTX_ASSERT_OWNED);
1657
1658 offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
1659 listp->runtime = tcp_now + offset;
1660 listp->schedtime = tcp_now;
1661 if (listp->runtime == 0) {
1662 listp->runtime++;
1663 offset++;
1664 }
1665
1666 clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
1667
1668 thread_call_enter_delayed(listp->call, deadline);
1669 listp->scheduled = TRUE;
1670 }
1671
1672 /*
1673 * Function to run the timers for a connection.
1674 *
1675 * Returns the offset of next timer to be run for this connection which
1676 * can be used to reschedule the timerlist.
1677 *
1678 * te_mode is an out parameter that indicates the modes of active
1679 * timers for this connection.
1680 */
1681 u_int32_t
1682 tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
1683 u_int16_t probe_if_index)
1684 {
1685 struct socket *so;
1686 u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
1687 u_int32_t timer_val, offset = 0, lo_timer = 0;
1688 int32_t diff;
1689 boolean_t needtorun[TCPT_NTIMERS];
1690 int count = 0;
1691
1692 VERIFY(tp != NULL);
1693 bzero(needtorun, sizeof(needtorun));
1694 *te_mode = 0;
1695
1696 socket_lock(tp->t_inpcb->inp_socket, 1);
1697
1698 so = tp->t_inpcb->inp_socket;
1699 /* Release the want count on inp */
1700 if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
1701 == WNT_STOPUSING) {
1702 if (TIMER_IS_ON_LIST(tp)) {
1703 tcp_remove_timer(tp);
1704 }
1705
1706 /* Looks like the TCP connection got closed while we
1707 * were waiting for the lock.. Done
1708 */
1709 goto done;
1710 }
1711
1712 /*
1713 * If this connection is over an interface that needs to
1714 * be probed, send probe packets to reinitiate communication.
1715 */
1716 if (probe_if_index > 0 && tp->t_inpcb->inp_last_outifp != NULL &&
1717 tp->t_inpcb->inp_last_outifp->if_index == probe_if_index) {
1718 tp->t_flagsext |= TF_PROBING;
1719 tcp_timers(tp, TCPT_PTO);
1720 tp->t_timer[TCPT_PTO] = 0;
1721 tp->t_flagsext &= ~TF_PROBING;
1722 }
1723
1724 /*
1725 * Since the timer thread needs to wait for tcp lock, it may race
1726 * with another thread that can cancel or reschedule the timer
1727 * that is about to run. Check if we need to run anything.
1728 */
1729 if ((index = tp->tentry.index) == TCPT_NONE)
1730 goto done;
1731
1732 timer_val = tp->t_timer[index];
1733
1734 diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
1735 if (diff > 0) {
1736 if (tp->tentry.index != TCPT_NONE) {
1737 offset = diff;
1738 *(te_mode) = tp->tentry.mode;
1739 }
1740 goto done;
1741 }
1742
1743 tp->t_timer[index] = 0;
1744 if (timer_val > 0) {
1745 tp = tcp_timers(tp, index);
1746 if (tp == NULL)
1747 goto done;
1748 }
1749
1750 /*
1751 * Check if there are any other timers that need to be run.
1752 * While doing it, adjust the timer values wrt tcp_now.
1753 */
1754 tp->tentry.mode = 0;
1755 for (i = 0; i < TCPT_NTIMERS; ++i) {
1756 if (tp->t_timer[i] != 0) {
1757 diff = timer_diff(tp->tentry.timer_start,
1758 tp->t_timer[i], tcp_now, 0);
1759 if (diff <= 0) {
1760 needtorun[i] = TRUE;
1761 count++;
1762 } else {
1763 tp->t_timer[i] = diff;
1764 needtorun[i] = FALSE;
1765 if (lo_timer == 0 || diff < lo_timer) {
1766 lo_timer = diff;
1767 lo_index = i;
1768 }
1769 TCP_SET_TIMER_MODE(tp->tentry.mode, i);
1770 }
1771 }
1772 }
1773
1774 tp->tentry.timer_start = tcp_now;
1775 tp->tentry.index = lo_index;
1776 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
1777
1778 if (tp->tentry.index != TCPT_NONE) {
1779 tp->tentry.runtime = tp->tentry.timer_start +
1780 tp->t_timer[tp->tentry.index];
1781 if (tp->tentry.runtime == 0)
1782 tp->tentry.runtime++;
1783 }
1784
1785 if (count > 0) {
1786 /* run any other timers outstanding at this time. */
1787 for (i = 0; i < TCPT_NTIMERS; ++i) {
1788 if (needtorun[i]) {
1789 tp->t_timer[i] = 0;
1790 tp = tcp_timers(tp, i);
1791 if (tp == NULL) {
1792 offset = 0;
1793 *(te_mode) = 0;
1794 goto done;
1795 }
1796 }
1797 }
1798 tcp_set_lotimer_index(tp);
1799 }
1800
1801 if (tp->tentry.index < TCPT_NONE) {
1802 offset = tp->t_timer[tp->tentry.index];
1803 *(te_mode) = tp->tentry.mode;
1804 }
1805
1806 done:
1807 if (tp != NULL && tp->tentry.index == TCPT_NONE) {
1808 tcp_remove_timer(tp);
1809 offset = 0;
1810 }
1811
1812 socket_unlock(so, 1);
1813 return(offset);
1814 }
1815
1816 void
1817 tcp_run_timerlist(void * arg1, void * arg2)
1818 {
1819 #pragma unused(arg1, arg2)
1820 struct tcptimerentry *te, *next_te;
1821 struct tcptimerlist *listp = &tcp_timer_list;
1822 struct tcpcb *tp;
1823 uint32_t next_timer = 0; /* offset of the next timer on the list */
1824 u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */
1825 u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
1826 uint32_t active_count = 0;
1827
1828 calculate_tcp_clock();
1829
1830 lck_mtx_lock(listp->mtx);
1831
1832 int32_t drift = tcp_now - listp->runtime;
1833 if (drift <= 1) {
1834 tcpstat.tcps_timer_drift_le_1_ms++;
1835 } else if (drift <= 10) {
1836 tcpstat.tcps_timer_drift_le_10_ms++;
1837 } else if (drift <= 20) {
1838 tcpstat.tcps_timer_drift_le_20_ms++;
1839 } else if (drift <= 50) {
1840 tcpstat.tcps_timer_drift_le_50_ms++;
1841 } else if (drift <= 100) {
1842 tcpstat.tcps_timer_drift_le_100_ms++;
1843 } else if (drift <= 200) {
1844 tcpstat.tcps_timer_drift_le_200_ms++;
1845 } else if (drift <= 500) {
1846 tcpstat.tcps_timer_drift_le_500_ms++;
1847 } else if (drift <= 1000) {
1848 tcpstat.tcps_timer_drift_le_1000_ms++;
1849 } else {
1850 tcpstat.tcps_timer_drift_gt_1000_ms++;
1851 }
1852
1853 listp->running = TRUE;
1854
1855 LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
1856 uint32_t offset = 0;
1857 uint32_t runtime = te->runtime;
1858 if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) {
1859 offset = timer_diff(runtime, 0, tcp_now, 0);
1860 if (next_timer == 0 || offset < next_timer) {
1861 next_timer = offset;
1862 }
1863 list_mode |= te->mode;
1864 continue;
1865 }
1866
1867 tp = TIMERENTRY_TO_TP(te);
1868
1869 /*
1870 * Acquire an inp wantcnt on the inpcb so that the socket
1871 * won't get detached even if tcp_close is called
1872 */
1873 if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
1874 == WNT_STOPUSING) {
1875 /*
1876 * Some how this pcb went into dead state while
1877 * on the timer list, just take it off the list.
1878 * Since the timer list entry pointers are
1879 * protected by the timer list lock, we can
1880 * do it here without the socket lock.
1881 */
1882 if (TIMER_IS_ON_LIST(tp)) {
1883 tp->t_flags &= ~(TF_TIMER_ONLIST);
1884 LIST_REMOVE(&tp->tentry, le);
1885 listp->entries--;
1886
1887 tp->tentry.le.le_next = NULL;
1888 tp->tentry.le.le_prev = NULL;
1889 }
1890 continue;
1891 }
1892 active_count++;
1893
1894 /*
1895 * Store the next timerentry pointer before releasing the
1896 * list lock. If that entry has to be removed when we
1897 * release the lock, this pointer will be updated to the
1898 * element after that.
1899 */
1900 listp->next_te = next_te;
1901
1902 VERIFY_NEXT_LINK(&tp->tentry, le);
1903 VERIFY_PREV_LINK(&tp->tentry, le);
1904
1905 lck_mtx_unlock(listp->mtx);
1906
1907 offset = tcp_run_conn_timer(tp, &te_mode,
1908 listp->probe_if_index);
1909
1910 lck_mtx_lock(listp->mtx);
1911
1912 next_te = listp->next_te;
1913 listp->next_te = NULL;
1914
1915 if (offset > 0 && te_mode != 0) {
1916 list_mode |= te_mode;
1917
1918 if (next_timer == 0 || offset < next_timer)
1919 next_timer = offset;
1920 }
1921 }
1922
1923 if (!LIST_EMPTY(&listp->lhead)) {
1924 u_int16_t next_mode = 0;
1925 if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
1926 (listp->pref_mode & TCP_TIMERLIST_10MS_MODE))
1927 next_mode = TCP_TIMERLIST_10MS_MODE;
1928 else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
1929 (listp->pref_mode & TCP_TIMERLIST_100MS_MODE))
1930 next_mode = TCP_TIMERLIST_100MS_MODE;
1931 else
1932 next_mode = TCP_TIMERLIST_500MS_MODE;
1933
1934 if (next_mode != TCP_TIMERLIST_500MS_MODE) {
1935 listp->idleruns = 0;
1936 } else {
1937 /*
1938 * the next required mode is slow mode, but if
1939 * the last one was a faster mode and we did not
1940 * have enough idle runs, repeat the last mode.
1941 *
1942 * We try to keep the timer list in fast mode for
1943 * some idle time in expectation of new data.
1944 */
1945 if (listp->mode != next_mode &&
1946 listp->idleruns < timer_fastmode_idlemax) {
1947 listp->idleruns++;
1948 next_mode = listp->mode;
1949 next_timer = TCP_TIMER_100MS_QUANTUM;
1950 } else {
1951 listp->idleruns = 0;
1952 }
1953 }
1954 listp->mode = next_mode;
1955 if (listp->pref_offset != 0)
1956 next_timer = min(listp->pref_offset, next_timer);
1957
1958 if (listp->mode == TCP_TIMERLIST_500MS_MODE)
1959 next_timer = max(next_timer,
1960 TCP_TIMER_500MS_QUANTUM);
1961
1962 tcp_sched_timerlist(next_timer);
1963 } else {
1964 /*
1965 * No need to reschedule this timer, but always run
1966 * periodically at a much higher granularity.
1967 */
1968 tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
1969 }
1970
1971 listp->running = FALSE;
1972 listp->pref_mode = 0;
1973 listp->pref_offset = 0;
1974 listp->probe_if_index = 0;
1975
1976 lck_mtx_unlock(listp->mtx);
1977 }
1978
1979 /*
1980 * Function to check if the timerlist needs to be rescheduled to run this
1981 * connection's timers correctly.
1982 */
1983 void
1984 tcp_sched_timers(struct tcpcb *tp)
1985 {
1986 struct tcptimerentry *te = &tp->tentry;
1987 u_int16_t index = te->index;
1988 u_int16_t mode = te->mode;
1989 struct tcptimerlist *listp = &tcp_timer_list;
1990 int32_t offset = 0;
1991 boolean_t list_locked = FALSE;
1992
1993 if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
1994 /* Just return without adding the dead pcb to the list */
1995 if (TIMER_IS_ON_LIST(tp)) {
1996 tcp_remove_timer(tp);
1997 }
1998 return;
1999 }
2000
2001 if (index == TCPT_NONE) {
2002 /* Nothing to run */
2003 tcp_remove_timer(tp);
2004 return;
2005 }
2006
2007 /*
2008 * compute the offset at which the next timer for this connection
2009 * has to run.
2010 */
2011 offset = timer_diff(te->runtime, 0, tcp_now, 0);
2012 if (offset <= 0) {
2013 offset = 1;
2014 tcp_timer_advanced++;
2015 }
2016
2017 if (!TIMER_IS_ON_LIST(tp)) {
2018 if (!list_locked) {
2019 lck_mtx_lock(listp->mtx);
2020 list_locked = TRUE;
2021 }
2022
2023 if (!TIMER_IS_ON_LIST(tp)) {
2024 LIST_INSERT_HEAD(&listp->lhead, te, le);
2025 tp->t_flags |= TF_TIMER_ONLIST;
2026
2027 listp->entries++;
2028 if (listp->entries > listp->maxentries)
2029 listp->maxentries = listp->entries;
2030
2031 /* if the list is not scheduled, just schedule it */
2032 if (!listp->scheduled)
2033 goto schedule;
2034 }
2035 }
2036
2037 /*
2038 * Timer entry is currently on the list, check if the list needs
2039 * to be rescheduled.
2040 */
2041 if (need_to_resched_timerlist(te->runtime, mode)) {
2042 tcp_resched_timerlist++;
2043
2044 if (!list_locked) {
2045 lck_mtx_lock(listp->mtx);
2046 list_locked = TRUE;
2047 }
2048
2049 VERIFY_NEXT_LINK(te, le);
2050 VERIFY_PREV_LINK(te, le);
2051
2052 if (listp->running) {
2053 listp->pref_mode |= mode;
2054 if (listp->pref_offset == 0 ||
2055 offset < listp->pref_offset) {
2056 listp->pref_offset = offset;
2057 }
2058 } else {
2059 /*
2060 * The list could have got rescheduled while
2061 * this thread was waiting for the lock
2062 */
2063 if (listp->scheduled) {
2064 int32_t diff;
2065 diff = timer_diff(listp->runtime, 0,
2066 tcp_now, offset);
2067 if (diff <= 0)
2068 goto done;
2069 else
2070 goto schedule;
2071 } else {
2072 goto schedule;
2073 }
2074 }
2075 }
2076 goto done;
2077
2078 schedule:
2079 /*
2080 * Since a connection with timers is getting scheduled, the timer
2081 * list moves from idle to active state and that is why idlegen is
2082 * reset
2083 */
2084 if (mode & TCP_TIMERLIST_10MS_MODE) {
2085 listp->mode = TCP_TIMERLIST_10MS_MODE;
2086 listp->idleruns = 0;
2087 offset = min(offset, TCP_TIMER_10MS_QUANTUM);
2088 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
2089 if (listp->mode > TCP_TIMERLIST_100MS_MODE)
2090 listp->mode = TCP_TIMERLIST_100MS_MODE;
2091 listp->idleruns = 0;
2092 offset = min(offset, TCP_TIMER_100MS_QUANTUM);
2093 }
2094 tcp_sched_timerlist(offset);
2095
2096 done:
2097 if (list_locked)
2098 lck_mtx_unlock(listp->mtx);
2099
2100 return;
2101 }
2102
2103 static inline void
2104 tcp_set_lotimer_index(struct tcpcb *tp)
2105 {
2106 uint16_t i, lo_index = TCPT_NONE, mode = 0;
2107 uint32_t lo_timer = 0;
2108 for (i = 0; i < TCPT_NTIMERS; ++i) {
2109 if (tp->t_timer[i] != 0) {
2110 TCP_SET_TIMER_MODE(mode, i);
2111 if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
2112 lo_timer = tp->t_timer[i];
2113 lo_index = i;
2114 }
2115 }
2116 }
2117 tp->tentry.index = lo_index;
2118 tp->tentry.mode = mode;
2119 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2120
2121 if (tp->tentry.index != TCPT_NONE) {
2122 tp->tentry.runtime = tp->tentry.timer_start
2123 + tp->t_timer[tp->tentry.index];
2124 if (tp->tentry.runtime == 0)
2125 tp->tentry.runtime++;
2126 }
2127 }
2128
2129 void
2130 tcp_check_timer_state(struct tcpcb *tp)
2131 {
2132 socket_lock_assert_owned(tp->t_inpcb->inp_socket);
2133
2134 if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT)
2135 return;
2136
2137 tcp_set_lotimer_index(tp);
2138
2139 tcp_sched_timers(tp);
2140 return;
2141 }
2142
2143 static inline void
2144 tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
2145 {
2146 /* handle wrap around */
2147 int32_t diff = (int32_t) (cur - *prev);
2148 if (diff > 0)
2149 *dest = diff;
2150 else
2151 *dest = 0;
2152 *prev = cur;
2153 return;
2154 }
2155
2156 static inline void
2157 tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest)
2158 {
2159 /* handle wrap around */
2160 int64_t diff = (int64_t) (cur - *prev);
2161 if (diff > 0)
2162 *dest = diff;
2163 else
2164 *dest = 0;
2165 *prev = cur;
2166 return;
2167 }
2168
2169 __private_extern__ void
2170 tcp_report_stats(void)
2171 {
2172 struct nstat_sysinfo_data data;
2173 struct sockaddr_in dst;
2174 struct sockaddr_in6 dst6;
2175 struct rtentry *rt = NULL;
2176 static struct tcp_last_report_stats prev;
2177 u_int64_t var, uptime;
2178
2179 #define stat data.u.tcp_stats
2180 if (((uptime = net_uptime()) - tcp_last_report_time) <
2181 tcp_report_stats_interval)
2182 return;
2183
2184 tcp_last_report_time = uptime;
2185
2186 bzero(&data, sizeof(data));
2187 data.flags = NSTAT_SYSINFO_TCP_STATS;
2188
2189 bzero(&dst, sizeof(dst));
2190 dst.sin_len = sizeof(dst);
2191 dst.sin_family = AF_INET;
2192
2193 /* ipv4 avg rtt */
2194 lck_mtx_lock(rnh_lock);
2195 rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
2196 rt_tables[AF_INET], IFSCOPE_NONE);
2197 lck_mtx_unlock(rnh_lock);
2198 if (rt != NULL) {
2199 RT_LOCK(rt);
2200 if (rt_primary_default(rt, rt_key(rt)) &&
2201 rt->rt_stats != NULL) {
2202 stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
2203 }
2204 RT_UNLOCK(rt);
2205 rtfree(rt);
2206 rt = NULL;
2207 }
2208
2209 /* ipv6 avg rtt */
2210 bzero(&dst6, sizeof(dst6));
2211 dst6.sin6_len = sizeof(dst6);
2212 dst6.sin6_family = AF_INET6;
2213
2214 lck_mtx_lock(rnh_lock);
2215 rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL,
2216 rt_tables[AF_INET6], IFSCOPE_NONE);
2217 lck_mtx_unlock(rnh_lock);
2218 if (rt != NULL) {
2219 RT_LOCK(rt);
2220 if (rt_primary_default(rt, rt_key(rt)) &&
2221 rt->rt_stats != NULL) {
2222 stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
2223 }
2224 RT_UNLOCK(rt);
2225 rtfree(rt);
2226 rt = NULL;
2227 }
2228
2229 /* send packet loss rate, shift by 10 for precision */
2230 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
2231 var = tcpstat.tcps_sndrexmitpack << 10;
2232 stat.send_plr = (var * 100) / tcpstat.tcps_sndpack;
2233 }
2234
2235 /* recv packet loss rate, shift by 10 for precision */
2236 if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2237 var = tcpstat.tcps_recovered_pkts << 10;
2238 stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack;
2239 }
2240
2241 /* RTO after tail loss, shift by 10 for precision */
2242 if (tcpstat.tcps_sndrexmitpack > 0
2243 && tcpstat.tcps_tailloss_rto > 0) {
2244 var = tcpstat.tcps_tailloss_rto << 10;
2245 stat.send_tlrto_rate =
2246 (var * 100) / tcpstat.tcps_sndrexmitpack;
2247 }
2248
2249 /* packet reordering */
2250 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2251 var = tcpstat.tcps_reordered_pkts << 10;
2252 stat.send_reorder_rate =
2253 (var * 100) / tcpstat.tcps_sndpack;
2254 }
2255
2256 if (tcp_ecn_outbound == 1)
2257 stat.ecn_client_enabled = 1;
2258 if (tcp_ecn_inbound == 1)
2259 stat.ecn_server_enabled = 1;
2260 tcp_cumulative_stat(tcpstat.tcps_connattempt,
2261 &prev.tcps_connattempt, &stat.connection_attempts);
2262 tcp_cumulative_stat(tcpstat.tcps_accepts,
2263 &prev.tcps_accepts, &stat.connection_accepts);
2264 tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2265 &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2266 tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2267 &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2268 tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2269 &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2270 tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2271 &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2272 tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2273 &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2274 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2275 &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2276 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2277 &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2278 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2279 &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2280 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2281 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2282 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2283 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2284 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2285 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2286 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2287 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2288 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2289 &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2290 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2291 &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2292 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2293 &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2294 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2295 &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2296 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2297 &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
2298 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2299 &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2300 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2301 &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2302 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2303 &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
2304 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2305 &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2306 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2307 &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2308 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2309 &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2310 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2311 &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2312 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2313 &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2314 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2315 &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2316 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2317 &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2318 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2319 &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2320 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2321 &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2322 tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2323 &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
2324 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
2325 &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
2326 tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
2327 &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
2328 tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
2329 &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
2330 tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
2331 &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
2332
2333
2334 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt,
2335 &prev.tcps_mptcp_handover_attempt , &stat.mptcp_handover_attempt);
2336 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt,
2337 &prev.tcps_mptcp_interactive_attempt , &stat.mptcp_interactive_attempt);
2338 tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt,
2339 &prev.tcps_mptcp_aggregate_attempt , &stat.mptcp_aggregate_attempt);
2340 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt,
2341 &prev.tcps_mptcp_fp_handover_attempt , &stat.mptcp_fp_handover_attempt);
2342 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt,
2343 &prev.tcps_mptcp_fp_interactive_attempt , &stat.mptcp_fp_interactive_attempt);
2344 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt,
2345 &prev.tcps_mptcp_fp_aggregate_attempt , &stat.mptcp_fp_aggregate_attempt);
2346 tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback,
2347 &prev.tcps_mptcp_heuristic_fallback , &stat.mptcp_heuristic_fallback);
2348 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback,
2349 &prev.tcps_mptcp_fp_heuristic_fallback , &stat.mptcp_fp_heuristic_fallback);
2350 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi,
2351 &prev.tcps_mptcp_handover_success_wifi , &stat.mptcp_handover_success_wifi);
2352 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell,
2353 &prev.tcps_mptcp_handover_success_cell , &stat.mptcp_handover_success_cell);
2354 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success,
2355 &prev.tcps_mptcp_interactive_success , &stat.mptcp_interactive_success);
2356 tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success,
2357 &prev.tcps_mptcp_aggregate_success , &stat.mptcp_aggregate_success);
2358 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi,
2359 &prev.tcps_mptcp_fp_handover_success_wifi , &stat.mptcp_fp_handover_success_wifi);
2360 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell,
2361 &prev.tcps_mptcp_fp_handover_success_cell , &stat.mptcp_fp_handover_success_cell);
2362 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success,
2363 &prev.tcps_mptcp_fp_interactive_success , &stat.mptcp_fp_interactive_success);
2364 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success,
2365 &prev.tcps_mptcp_fp_aggregate_success , &stat.mptcp_fp_aggregate_success);
2366 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi,
2367 &prev.tcps_mptcp_handover_cell_from_wifi , &stat.mptcp_handover_cell_from_wifi);
2368 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell,
2369 &prev.tcps_mptcp_handover_wifi_from_cell , &stat.mptcp_handover_wifi_from_cell);
2370 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi,
2371 &prev.tcps_mptcp_interactive_cell_from_wifi , &stat.mptcp_interactive_cell_from_wifi);
2372 tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes,
2373 &prev.tcps_mptcp_handover_cell_bytes , &stat.mptcp_handover_cell_bytes);
2374 tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes,
2375 &prev.tcps_mptcp_interactive_cell_bytes , &stat.mptcp_interactive_cell_bytes);
2376 tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes,
2377 &prev.tcps_mptcp_aggregate_cell_bytes , &stat.mptcp_aggregate_cell_bytes);
2378 tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes,
2379 &prev.tcps_mptcp_handover_all_bytes , &stat.mptcp_handover_all_bytes);
2380 tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes,
2381 &prev.tcps_mptcp_interactive_all_bytes , &stat.mptcp_interactive_all_bytes);
2382 tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes,
2383 &prev.tcps_mptcp_aggregate_all_bytes , &stat.mptcp_aggregate_all_bytes);
2384 tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi,
2385 &prev.tcps_mptcp_back_to_wifi , &stat.mptcp_back_to_wifi);
2386 tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy,
2387 &prev.tcps_mptcp_wifi_proxy , &stat.mptcp_wifi_proxy);
2388 tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy,
2389 &prev.tcps_mptcp_cell_proxy , &stat.mptcp_cell_proxy);
2390
2391
2392 nstat_sysinfo_send_data(&data);
2393
2394 #undef stat
2395 }
2396
2397 void
2398 tcp_interface_send_probe(u_int16_t probe_if_index)
2399 {
2400 int32_t offset = 0;
2401 struct tcptimerlist *listp = &tcp_timer_list;
2402
2403 /* Make sure TCP clock is up to date */
2404 calculate_tcp_clock();
2405
2406 lck_mtx_lock(listp->mtx);
2407 if (listp->probe_if_index > 0) {
2408 tcpstat.tcps_probe_if_conflict++;
2409 goto done;
2410 }
2411
2412 listp->probe_if_index = probe_if_index;
2413 if (listp->running)
2414 goto done;
2415
2416 /*
2417 * Reschedule the timerlist to run within the next 10ms, which is
2418 * the fastest that we can do.
2419 */
2420 offset = TCP_TIMER_10MS_QUANTUM;
2421 if (listp->scheduled) {
2422 int32_t diff;
2423 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2424 if (diff <= 0) {
2425 /* The timer will fire sooner than what's needed */
2426 goto done;
2427 }
2428 }
2429 listp->mode = TCP_TIMERLIST_10MS_MODE;
2430 listp->idleruns = 0;
2431
2432 tcp_sched_timerlist(offset);
2433
2434 done:
2435 lck_mtx_unlock(listp->mtx);
2436 return;
2437 }
2438
2439 /*
2440 * Enable read probes on this connection, if:
2441 * - it is in established state
2442 * - doesn't have any data outstanding
2443 * - the outgoing ifp matches
2444 * - we have not already sent any read probes
2445 */
2446 static void
2447 tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2448 {
2449 if (tp->t_state == TCPS_ESTABLISHED &&
2450 tp->snd_max == tp->snd_una &&
2451 tp->t_inpcb->inp_last_outifp == ifp &&
2452 !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2453 tp->t_rtimo_probes == 0) {
2454 tp->t_flagsext |= TF_DETECT_READSTALL;
2455 tp->t_rtimo_probes = 0;
2456 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2457 TCP_TIMER_10MS_QUANTUM);
2458 if (tp->tentry.index == TCPT_NONE) {
2459 tp->tentry.index = TCPT_KEEP;
2460 tp->tentry.runtime = tcp_now +
2461 TCP_TIMER_10MS_QUANTUM;
2462 } else {
2463 int32_t diff = 0;
2464
2465 /* Reset runtime to be in next 10ms */
2466 diff = timer_diff(tp->tentry.runtime, 0,
2467 tcp_now, TCP_TIMER_10MS_QUANTUM);
2468 if (diff > 0) {
2469 tp->tentry.index = TCPT_KEEP;
2470 tp->tentry.runtime = tcp_now +
2471 TCP_TIMER_10MS_QUANTUM;
2472 if (tp->tentry.runtime == 0)
2473 tp->tentry.runtime++;
2474 }
2475 }
2476 }
2477 }
2478
2479 /*
2480 * Disable read probe and reset the keep alive timer
2481 */
2482 static void
2483 tcp_disable_read_probe(struct tcpcb *tp)
2484 {
2485 if (tp->t_adaptive_rtimo == 0 &&
2486 ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2487 tp->t_rtimo_probes > 0)) {
2488 tcp_keepalive_reset(tp);
2489
2490 if (tp->t_mpsub)
2491 mptcp_reset_keepalive(tp);
2492 }
2493 }
2494
2495 /*
2496 * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2497 * probes on connections going over a particular interface.
2498 */
2499 void
2500 tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2501 {
2502 int32_t offset;
2503 struct tcptimerlist *listp = &tcp_timer_list;
2504 struct inpcbinfo *pcbinfo = &tcbinfo;
2505 struct inpcb *inp, *nxt;
2506
2507 if (ifp == NULL)
2508 return;
2509
2510 /* update clock */
2511 calculate_tcp_clock();
2512
2513 /*
2514 * Enable keep alive timer on all connections that are
2515 * active/established on this interface.
2516 */
2517 lck_rw_lock_shared(pcbinfo->ipi_lock);
2518
2519 LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2520 struct tcpcb *tp = NULL;
2521 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2522 WNT_STOPUSING)
2523 continue;
2524
2525 /* Acquire lock to look at the state of the connection */
2526 socket_lock(inp->inp_socket, 1);
2527
2528 /* Release the want count */
2529 if (inp->inp_ppcb == NULL ||
2530 (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
2531 socket_unlock(inp->inp_socket, 1);
2532 continue;
2533 }
2534 tp = intotcpcb(inp);
2535 if (enable)
2536 tcp_enable_read_probe(tp, ifp);
2537 else
2538 tcp_disable_read_probe(tp);
2539
2540 socket_unlock(inp->inp_socket, 1);
2541 }
2542 lck_rw_done(pcbinfo->ipi_lock);
2543
2544 lck_mtx_lock(listp->mtx);
2545 if (listp->running) {
2546 listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2547 goto done;
2548 }
2549
2550 /* Reschedule within the next 10ms */
2551 offset = TCP_TIMER_10MS_QUANTUM;
2552 if (listp->scheduled) {
2553 int32_t diff;
2554 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2555 if (diff <= 0) {
2556 /* The timer will fire sooner than what's needed */
2557 goto done;
2558 }
2559 }
2560 listp->mode = TCP_TIMERLIST_10MS_MODE;
2561 listp->idleruns = 0;
2562
2563 tcp_sched_timerlist(offset);
2564 done:
2565 lck_mtx_unlock(listp->mtx);
2566 return;
2567 }
2568
2569 inline void
2570 tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
2571 {
2572 struct if_cellular_status_v1 *ifsr;
2573 u_int32_t optlen;
2574 ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2575 if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2576 optlen = tp->t_maxopd - tp->t_maxseg;
2577
2578 if (ifsr->mss_recommended ==
2579 IF_CELL_UL_MSS_RECOMMENDED_NONE &&
2580 tp->t_cached_maxopd > 0 &&
2581 tp->t_maxopd < tp->t_cached_maxopd) {
2582 tp->t_maxopd = tp->t_cached_maxopd;
2583 tcpstat.tcps_mss_to_default++;
2584 } else if (ifsr->mss_recommended ==
2585 IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
2586 tp->t_maxopd > tcp_mss_rec_medium) {
2587 tp->t_cached_maxopd = tp->t_maxopd;
2588 tp->t_maxopd = tcp_mss_rec_medium;
2589 tcpstat.tcps_mss_to_medium++;
2590 } else if (ifsr->mss_recommended ==
2591 IF_CELL_UL_MSS_RECOMMENDED_LOW &&
2592 tp->t_maxopd > tcp_mss_rec_low) {
2593 tp->t_cached_maxopd = tp->t_maxopd;
2594 tp->t_maxopd = tcp_mss_rec_low;
2595 tcpstat.tcps_mss_to_low++;
2596 }
2597 tp->t_maxseg = tp->t_maxopd - optlen;
2598
2599 /*
2600 * clear the cached value if it is same as the current
2601 */
2602 if (tp->t_maxopd == tp->t_cached_maxopd)
2603 tp->t_cached_maxopd = 0;
2604 }
2605 }
2606
2607 void
2608 tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
2609 {
2610 struct inpcb *inp = sotoinpcb(so);
2611 struct tcpcb *tp = intotcpcb(inp);
2612
2613 if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL)
2614 return;
2615
2616 if (!IFNET_IS_CELLULAR(ifp)) {
2617 /*
2618 * This optimization is implemented for cellular
2619 * networks only
2620 */
2621 return;
2622 }
2623 if ( tp->t_state <= TCPS_CLOSE_WAIT) {
2624 /*
2625 * If the connection is currently doing or has done PMTU
2626 * blackhole detection, do not change the MSS
2627 */
2628 if (tp->t_flags & TF_BLACKHOLE)
2629 return;
2630 if (ifp->if_link_status == NULL)
2631 return;
2632 tcp_update_mss_core(tp, ifp);
2633 }
2634 }
2635
2636 void
2637 tcp_itimer(struct inpcbinfo *ipi)
2638 {
2639 struct inpcb *inp, *nxt;
2640
2641 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
2642 if (tcp_itimer_done == TRUE) {
2643 tcp_itimer_done = FALSE;
2644 atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
2645 return;
2646 }
2647 /* Upgrade failed, lost lock now take it again exclusive */
2648 lck_rw_lock_exclusive(ipi->ipi_lock);
2649 }
2650 tcp_itimer_done = TRUE;
2651
2652 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
2653 struct socket *so;
2654 struct ifnet *ifp;
2655
2656 if (inp->inp_ppcb == NULL ||
2657 in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
2658 continue;
2659 so = inp->inp_socket;
2660 ifp = inp->inp_last_outifp;
2661 socket_lock(so, 1);
2662 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2663 socket_unlock(so, 1);
2664 continue;
2665 }
2666 so_check_extended_bk_idle_time(so);
2667 if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
2668 tcp_update_mss_locked(so, NULL);
2669 }
2670 socket_unlock(so, 1);
2671
2672 /*
2673 * Defunct all system-initiated background sockets if the
2674 * socket is using the cellular interface and the interface
2675 * has its LQM set to abort.
2676 */
2677 if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) &&
2678 IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) &&
2679 ifp != NULL && IFNET_IS_CELLULAR(ifp) &&
2680 (ifp->if_interface_state.valid_bitmask &
2681 IF_INTERFACE_STATE_LQM_STATE_VALID) &&
2682 ifp->if_interface_state.lqm_state ==
2683 IFNET_LQM_THRESH_ABORT) {
2684 socket_defunct(current_proc(), so,
2685 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
2686 }
2687 }
2688
2689 ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT);
2690 lck_rw_done(ipi->ipi_lock);
2691 }