]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_timer.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_timer.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
62 */
63
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/mbuf.h>
69 #include <sys/sysctl.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/protosw.h>
73 #include <sys/domain.h>
74 #include <sys/mcache.h>
75 #include <sys/queue.h>
76 #include <kern/locks.h>
77 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
78 #include <mach/boolean.h>
79
80 #include <net/route.h>
81 #include <net/if_var.h>
82 #include <net/ntstat.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_pcb.h>
87 #if INET6
88 #include <netinet6/in6_pcb.h>
89 #endif
90 #include <netinet/ip_var.h>
91 #include <netinet/tcp.h>
92 #include <netinet/tcp_cache.h>
93 #include <netinet/tcp_fsm.h>
94 #include <netinet/tcp_seq.h>
95 #include <netinet/tcp_timer.h>
96 #include <netinet/tcp_var.h>
97 #include <netinet/tcp_cc.h>
98 #if INET6
99 #include <netinet6/tcp6_var.h>
100 #endif
101 #include <netinet/tcpip.h>
102 #if TCPDEBUG
103 #include <netinet/tcp_debug.h>
104 #endif
105 #include <sys/kdebug.h>
106 #include <mach/sdt.h>
107 #include <netinet/mptcp_var.h>
108
109 /* Max number of times a stretch ack can be delayed on a connection */
110 #define TCP_STRETCHACK_DELAY_THRESHOLD 5
111
112 /*
113 * If the host processor has been sleeping for too long, this is the threshold
114 * used to avoid sending stale retransmissions.
115 */
116 #define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */
117
118 /* tcp timer list */
119 struct tcptimerlist tcp_timer_list;
120
121 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
122 struct tcptailq tcp_tw_tailq;
123
124 static int
125 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
126 {
127 #pragma unused(arg2)
128 int error, s, tt;
129
130 tt = *(int *)arg1;
131 s = tt * 1000 / TCP_RETRANSHZ;;
132
133 error = sysctl_handle_int(oidp, &s, 0, req);
134 if (error || !req->newptr)
135 return (error);
136
137 tt = s * TCP_RETRANSHZ / 1000;
138 if (tt < 1)
139 return (EINVAL);
140
141 *(int *)arg1 = tt;
142 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
143 return (0);
144 }
145
146 #if SYSCTL_SKMEM
147 int tcp_keepinit = TCPTV_KEEP_INIT;
148 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
149 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
150 &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit),
151 sysctl_msec_to_ticks, "I", "");
152
153 int tcp_keepidle = TCPTV_KEEP_IDLE;
154 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
155 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
156 &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle),
157 sysctl_msec_to_ticks, "I", "");
158
159 int tcp_keepintvl = TCPTV_KEEPINTVL;
160 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
161 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
162 &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl),
163 sysctl_msec_to_ticks, "I", "");
164
165 SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt,
166 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
167 int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive");
168
169 int tcp_msl = TCPTV_MSL;
170 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
171 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
172 &tcp_msl, offsetof(skmem_sysctl, tcp.msl),
173 sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
174 #else /* SYSCTL_SKMEM */
175 int tcp_keepinit;
176 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
177 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
178 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
179
180 int tcp_keepidle;
181 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
182 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
183 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
184
185 int tcp_keepintvl;
186 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
187 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
188 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
189
190 int tcp_keepcnt;
191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
192 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
193 &tcp_keepcnt, 0, "number of times to repeat keepalive");
194
195 int tcp_msl;
196 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
197 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
198 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
199 #endif /* SYSCTL_SKMEM */
200
201 /*
202 * Avoid DoS via TCP Robustness in Persist Condition
203 * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
204 * by allowing a system wide maximum persistence timeout value when in
205 * Zero Window Probe mode.
206 *
207 * Expressed in milliseconds to be consistent without timeout related
208 * values, the TCP socket option is in seconds.
209 */
210 #if SYSCTL_SKMEM
211 u_int32_t tcp_max_persist_timeout = 0;
212 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
213 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
214 &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout),
215 sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP");
216 #else /* SYSCTL_SKMEM */
217 u_int32_t tcp_max_persist_timeout = 0;
218 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
219 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
220 &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
221 "Maximum persistence timeout for ZWP");
222 #endif /* SYSCTL_SKMEM */
223
224 SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive,
225 CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0,
226 "Assume SO_KEEPALIVE on all TCP connections");
227
228 /*
229 * This parameter determines how long the timer list will stay in fast or
230 * quick mode even though all connections are idle. In this state, the
231 * timer will run more frequently anticipating new data.
232 */
233 SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax,
234 CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax,
235 TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode");
236
237 /*
238 * See tcp_syn_backoff[] for interval values between SYN retransmits;
239 * the value set below defines the number of retransmits, before we
240 * disable the timestamp and window scaling options during subsequent
241 * SYN retransmits. Setting it to 0 disables the dropping off of those
242 * two options.
243 */
244 SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres,
245 CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres,
246 10, "Number of retransmitted SYNs before disabling RFC 1323 "
247 "options on local connections");
248
249 static int tcp_timer_advanced = 0;
250 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
251 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
252 "Number of times one of the timers was advanced");
253
254 static int tcp_resched_timerlist = 0;
255 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
256 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
257 "Number of times timer list was rescheduled as part of processing a packet");
258
259 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection,
260 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1,
261 "Path MTU Discovery Black Hole Detection");
262
263 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss,
264 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200,
265 "Path MTU Discovery Black Hole Detection lowered MSS");
266
267 static u_int32_t tcp_mss_rec_medium = 1200;
268 static u_int32_t tcp_mss_rec_low = 512;
269
270 #define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */
271 int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
272
273 /* performed garbage collection of "used" sockets */
274 static boolean_t tcp_gc_done = FALSE;
275
276 /* max idle probes */
277 int tcp_maxpersistidle = TCPTV_KEEP_IDLE;
278
279 /*
280 * TCP delack timer is set to 100 ms. Since the processing of timer list
281 * in fast mode will happen no faster than 100 ms, the delayed ack timer
282 * will fire some where between 100 and 200 ms.
283 */
284 int tcp_delack = TCP_RETRANSHZ / 10;
285
286 #if MPTCP
287 /*
288 * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
289 */
290 int tcp_jack_rxmt = TCP_RETRANSHZ / 2;
291 #endif /* MPTCP */
292
293 static boolean_t tcp_itimer_done = FALSE;
294
295 static void tcp_remove_timer(struct tcpcb *tp);
296 static void tcp_sched_timerlist(uint32_t offset);
297 static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
298 u_int16_t probe_if_index);
299 static void tcp_sched_timers(struct tcpcb *tp);
300 static inline void tcp_set_lotimer_index(struct tcpcb *);
301 __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
302 static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
303 __private_extern__ void tcp_report_stats(void);
304
305 static u_int64_t tcp_last_report_time;
306
307 /*
308 * Structure to store previously reported stats so that we can send
309 * incremental changes in each report interval.
310 */
311 struct tcp_last_report_stats {
312 u_int32_t tcps_connattempt;
313 u_int32_t tcps_accepts;
314 u_int32_t tcps_ecn_client_setup;
315 u_int32_t tcps_ecn_server_setup;
316 u_int32_t tcps_ecn_client_success;
317 u_int32_t tcps_ecn_server_success;
318 u_int32_t tcps_ecn_not_supported;
319 u_int32_t tcps_ecn_lost_syn;
320 u_int32_t tcps_ecn_lost_synack;
321 u_int32_t tcps_ecn_recv_ce;
322 u_int32_t tcps_ecn_recv_ece;
323 u_int32_t tcps_ecn_sent_ece;
324 u_int32_t tcps_ecn_conn_recv_ce;
325 u_int32_t tcps_ecn_conn_recv_ece;
326 u_int32_t tcps_ecn_conn_plnoce;
327 u_int32_t tcps_ecn_conn_pl_ce;
328 u_int32_t tcps_ecn_conn_nopl_ce;
329 u_int32_t tcps_ecn_fallback_synloss;
330 u_int32_t tcps_ecn_fallback_reorder;
331 u_int32_t tcps_ecn_fallback_ce;
332
333 /* TFO-related statistics */
334 u_int32_t tcps_tfo_syn_data_rcv;
335 u_int32_t tcps_tfo_cookie_req_rcv;
336 u_int32_t tcps_tfo_cookie_sent;
337 u_int32_t tcps_tfo_cookie_invalid;
338 u_int32_t tcps_tfo_cookie_req;
339 u_int32_t tcps_tfo_cookie_rcv;
340 u_int32_t tcps_tfo_syn_data_sent;
341 u_int32_t tcps_tfo_syn_data_acked;
342 u_int32_t tcps_tfo_syn_loss;
343 u_int32_t tcps_tfo_blackhole;
344 u_int32_t tcps_tfo_cookie_wrong;
345 u_int32_t tcps_tfo_no_cookie_rcv;
346 u_int32_t tcps_tfo_heuristics_disable;
347 u_int32_t tcps_tfo_sndblackhole;
348
349 /* MPTCP-related statistics */
350 u_int32_t tcps_mptcp_handover_attempt;
351 u_int32_t tcps_mptcp_interactive_attempt;
352 u_int32_t tcps_mptcp_aggregate_attempt;
353 u_int32_t tcps_mptcp_fp_handover_attempt;
354 u_int32_t tcps_mptcp_fp_interactive_attempt;
355 u_int32_t tcps_mptcp_fp_aggregate_attempt;
356 u_int32_t tcps_mptcp_heuristic_fallback;
357 u_int32_t tcps_mptcp_fp_heuristic_fallback;
358 u_int32_t tcps_mptcp_handover_success_wifi;
359 u_int32_t tcps_mptcp_handover_success_cell;
360 u_int32_t tcps_mptcp_interactive_success;
361 u_int32_t tcps_mptcp_aggregate_success;
362 u_int32_t tcps_mptcp_fp_handover_success_wifi;
363 u_int32_t tcps_mptcp_fp_handover_success_cell;
364 u_int32_t tcps_mptcp_fp_interactive_success;
365 u_int32_t tcps_mptcp_fp_aggregate_success;
366 u_int32_t tcps_mptcp_handover_cell_from_wifi;
367 u_int32_t tcps_mptcp_handover_wifi_from_cell;
368 u_int32_t tcps_mptcp_interactive_cell_from_wifi;
369 u_int64_t tcps_mptcp_handover_cell_bytes;
370 u_int64_t tcps_mptcp_interactive_cell_bytes;
371 u_int64_t tcps_mptcp_aggregate_cell_bytes;
372 u_int64_t tcps_mptcp_handover_all_bytes;
373 u_int64_t tcps_mptcp_interactive_all_bytes;
374 u_int64_t tcps_mptcp_aggregate_all_bytes;
375 u_int32_t tcps_mptcp_back_to_wifi;
376 u_int32_t tcps_mptcp_wifi_proxy;
377 u_int32_t tcps_mptcp_cell_proxy;
378 };
379
380
381 /* Returns true if the timer is on the timer list */
382 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
383
384 /* Run the TCP timerlist atleast once every hour */
385 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
386
387
388 static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
389 static boolean_t tcp_garbage_collect(struct inpcb *, int);
390
391 #define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
392
393 #define VERIFY_NEXT_LINK(elm,field) do { \
394 if (LIST_NEXT((elm),field) != NULL && \
395 LIST_NEXT((elm),field)->field.le_prev != \
396 &((elm)->field.le_next)) \
397 panic("Bad link elm %p next->prev != elm", (elm)); \
398 } while(0)
399
400 #define VERIFY_PREV_LINK(elm,field) do { \
401 if (*(elm)->field.le_prev != (elm)) \
402 panic("Bad link elm %p prev->next != elm", (elm)); \
403 } while(0)
404
405 #define TCP_SET_TIMER_MODE(mode, i) do { \
406 if (IS_TIMER_HZ_10MS(i)) \
407 (mode) |= TCP_TIMERLIST_10MS_MODE; \
408 else if (IS_TIMER_HZ_100MS(i)) \
409 (mode) |= TCP_TIMERLIST_100MS_MODE; \
410 else \
411 (mode) |= TCP_TIMERLIST_500MS_MODE; \
412 } while(0)
413
414 #if (DEVELOPMENT || DEBUG)
415 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
416 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
417 "Medium MSS based on recommendation in link status report");
418 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
419 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
420 "Low MSS based on recommendation in link status report");
421
422 static int32_t tcp_change_mss_recommended = 0;
423 static int
424 sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
425 {
426 #pragma unused(oidp, arg1, arg2)
427 int i, err = 0, changed = 0;
428 struct ifnet *ifp;
429 struct if_link_status ifsr;
430 struct if_cellular_status_v1 *new_cell_sr;
431 err = sysctl_io_number(req, tcp_change_mss_recommended,
432 sizeof (int32_t), &i, &changed);
433 if (changed) {
434 ifnet_head_lock_shared();
435 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
436 if (IFNET_IS_CELLULAR(ifp)) {
437 bzero(&ifsr, sizeof (ifsr));
438 new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
439 ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
440 ifsr.ifsr_len = sizeof(*new_cell_sr);
441
442 /* Set MSS recommended */
443 new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
444 new_cell_sr->mss_recommended = i;
445 err = ifnet_link_status_report(ifp, new_cell_sr, sizeof (new_cell_sr));
446 if (err == 0) {
447 tcp_change_mss_recommended = i;
448 } else {
449 break;
450 }
451 }
452 }
453 ifnet_head_done();
454 }
455 return (err);
456 }
457
458 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
459 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
460 0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
461
462 SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
463 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
464 "Report stats interval");
465 #endif /* (DEVELOPMENT || DEBUG) */
466
467 /*
468 * Macro to compare two timers. If there is a reset of the sign bit,
469 * it is safe to assume that the timer has wrapped around. By doing
470 * signed comparision, we take care of wrap around such that the value
471 * with the sign bit reset is actually ahead of the other.
472 */
473 inline int32_t
474 timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) {
475 return (int32_t)((t1 + toff1) - (t2 + toff2));
476 };
477
478 /*
479 * Add to tcp timewait list, delay is given in milliseconds.
480 */
481 static void
482 add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
483 {
484 struct inpcbinfo *pcbinfo = &tcbinfo;
485 struct inpcb *inp = tp->t_inpcb;
486 uint32_t timer;
487
488 /* pcb list should be locked when we get here */
489 LCK_RW_ASSERT(pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
490
491 /* We may get here multiple times, so check */
492 if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
493 pcbinfo->ipi_twcount++;
494 inp->inp_flags2 |= INP2_TIMEWAIT;
495
496 /* Remove from global inp list */
497 LIST_REMOVE(inp, inp_list);
498 } else {
499 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
500 }
501
502 /* Compute the time at which this socket can be closed */
503 timer = tcp_now + delay;
504
505 /* We will use the TCPT_2MSL timer for tracking this delay */
506
507 if (TIMER_IS_ON_LIST(tp))
508 tcp_remove_timer(tp);
509 tp->t_timer[TCPT_2MSL] = timer;
510
511 TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
512 }
513
514 void
515 add_to_time_wait(struct tcpcb *tp, uint32_t delay)
516 {
517 struct inpcbinfo *pcbinfo = &tcbinfo;
518 if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP)
519 socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
520
521 /* 19182803: Notify nstat that connection is closing before waiting. */
522 nstat_pcb_detach(tp->t_inpcb);
523
524 if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) {
525 socket_unlock(tp->t_inpcb->inp_socket, 0);
526 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
527 socket_lock(tp->t_inpcb->inp_socket, 0);
528 }
529 add_to_time_wait_locked(tp, delay);
530 lck_rw_done(pcbinfo->ipi_lock);
531
532 inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
533 }
534
535 /* If this is on time wait queue, remove it. */
536 void
537 tcp_remove_from_time_wait(struct inpcb *inp)
538 {
539 struct tcpcb *tp = intotcpcb(inp);
540 if (inp->inp_flags2 & INP2_TIMEWAIT)
541 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
542 }
543
544 static boolean_t
545 tcp_garbage_collect(struct inpcb *inp, int istimewait)
546 {
547 boolean_t active = FALSE;
548 struct socket *so, *mp_so = NULL;
549 struct tcpcb *tp;
550
551 so = inp->inp_socket;
552 tp = intotcpcb(inp);
553
554 if (so->so_flags & SOF_MP_SUBFLOW) {
555 mp_so = mptetoso(tptomptp(tp)->mpt_mpte);
556 if (!socket_try_lock(mp_so)) {
557 mp_so = NULL;
558 active = TRUE;
559 goto out;
560 }
561 mp_so->so_usecount++;
562 }
563
564 /*
565 * Skip if still in use or busy; it would have been more efficient
566 * if we were to test so_usecount against 0, but this isn't possible
567 * due to the current implementation of tcp_dropdropablreq() where
568 * overflow sockets that are eligible for garbage collection have
569 * their usecounts set to 1.
570 */
571 if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) {
572 active = TRUE;
573 goto out;
574 }
575
576 /* Check again under the lock */
577 if (so->so_usecount > 1) {
578 if (inp->inp_wantcnt == WNT_STOPUSING)
579 active = TRUE;
580 lck_mtx_unlock(&inp->inpcb_mtx);
581 goto out;
582 }
583
584 if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
585 tp->t_state != TCPS_CLOSED) {
586 /* Become a regular mutex */
587 lck_mtx_convert_spin(&inp->inpcb_mtx);
588 tcp_close(tp);
589 }
590
591 /*
592 * Overflowed socket dropped from the listening queue? Do this
593 * only if we are called to clean up the time wait slots, since
594 * tcp_dropdropablreq() considers a socket to have been fully
595 * dropped after add_to_time_wait() is finished.
596 * Also handle the case of connections getting closed by the peer
597 * while in the queue as seen with rdar://6422317
598 *
599 */
600 if (so->so_usecount == 1 &&
601 ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
602 ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
603 (so->so_head != NULL) &&
604 ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) ==
605 (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) {
606
607 if (inp->inp_state != INPCB_STATE_DEAD) {
608 /* Become a regular mutex */
609 lck_mtx_convert_spin(&inp->inpcb_mtx);
610 #if INET6
611 if (SOCK_CHECK_DOM(so, PF_INET6))
612 in6_pcbdetach(inp);
613 else
614 #endif /* INET6 */
615 in_pcbdetach(inp);
616 }
617 VERIFY(so->so_usecount > 0);
618 so->so_usecount--;
619 if (inp->inp_wantcnt == WNT_STOPUSING)
620 active = TRUE;
621 lck_mtx_unlock(&inp->inpcb_mtx);
622 goto out;
623 } else if (inp->inp_wantcnt != WNT_STOPUSING) {
624 lck_mtx_unlock(&inp->inpcb_mtx);
625 active = FALSE;
626 goto out;
627 }
628
629 /*
630 * We get here because the PCB is no longer searchable
631 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
632 * (usecount is 0). This covers all cases, including overflow
633 * sockets and those that are considered as "embryonic",
634 * i.e. created by sonewconn() in TCP input path, and have
635 * not yet been committed. For the former, we reduce the usecount
636 * to 0 as done by the code above. For the latter, the usecount
637 * would have reduced to 0 as part calling soabort() when the
638 * socket is dropped at the end of tcp_input().
639 */
640 if (so->so_usecount == 0) {
641 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
642 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
643 /* Become a regular mutex */
644 lck_mtx_convert_spin(&inp->inpcb_mtx);
645
646 /*
647 * If this tp still happens to be on the timer list,
648 * take it out
649 */
650 if (TIMER_IS_ON_LIST(tp)) {
651 tcp_remove_timer(tp);
652 }
653
654 if (inp->inp_state != INPCB_STATE_DEAD) {
655 #if INET6
656 if (SOCK_CHECK_DOM(so, PF_INET6))
657 in6_pcbdetach(inp);
658 else
659 #endif /* INET6 */
660 in_pcbdetach(inp);
661 }
662
663 if (mp_so) {
664 mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub);
665
666 /* so is now unlinked from mp_so - let's drop the lock */
667 socket_unlock(mp_so, 1);
668 mp_so = NULL;
669 }
670
671 in_pcbdispose(inp);
672 active = FALSE;
673 goto out;
674 }
675
676 lck_mtx_unlock(&inp->inpcb_mtx);
677 active = TRUE;
678
679 out:
680 if (mp_so)
681 socket_unlock(mp_so, 1);
682
683 return (active);
684 }
685
686 /*
687 * TCP garbage collector callback (inpcb_timer_func_t).
688 *
689 * Returns the number of pcbs that will need to be gc-ed soon,
690 * returnining > 0 will keep timer active.
691 */
692 void
693 tcp_gc(struct inpcbinfo *ipi)
694 {
695 struct inpcb *inp, *nxt;
696 struct tcpcb *tw_tp, *tw_ntp;
697 #if TCPDEBUG
698 int ostate;
699 #endif
700 #if KDEBUG
701 static int tws_checked = 0;
702 #endif
703
704 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
705
706 /*
707 * Update tcp_now here as it may get used while
708 * processing the slow timer.
709 */
710 calculate_tcp_clock();
711
712 /*
713 * Garbage collect socket/tcpcb: We need to acquire the list lock
714 * exclusively to do this
715 */
716
717 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
718 /* don't sweat it this time; cleanup was done last time */
719 if (tcp_gc_done == TRUE) {
720 tcp_gc_done = FALSE;
721 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
722 tws_checked, cur_tw_slot, 0, 0, 0);
723 /* Lock upgrade failed, give up this round */
724 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
725 return;
726 }
727 /* Upgrade failed, lost lock now take it again exclusive */
728 lck_rw_lock_exclusive(ipi->ipi_lock);
729 }
730 tcp_gc_done = TRUE;
731
732 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
733 if (tcp_garbage_collect(inp, 0))
734 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
735 }
736
737 /* Now cleanup the time wait ones */
738 TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
739 /*
740 * We check the timestamp here without holding the
741 * socket lock for better performance. If there are
742 * any pcbs in time-wait, the timer will get rescheduled.
743 * Hence some error in this check can be tolerated.
744 *
745 * Sometimes a socket on time-wait queue can be closed if
746 * 2MSL timer expired but the application still has a
747 * usecount on it.
748 */
749 if (tw_tp->t_state == TCPS_CLOSED ||
750 TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
751 if (tcp_garbage_collect(tw_tp->t_inpcb, 1))
752 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
753 }
754 }
755
756 /* take into account pcbs that are still in time_wait_slots */
757 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount);
758
759 lck_rw_done(ipi->ipi_lock);
760
761 /* Clean up the socache while we are here */
762 if (so_cache_timer())
763 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
764
765 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
766 cur_tw_slot, 0, 0, 0);
767
768 return;
769 }
770
771 /*
772 * Cancel all timers for TCP tp.
773 */
774 void
775 tcp_canceltimers(struct tcpcb *tp)
776 {
777 int i;
778
779 tcp_remove_timer(tp);
780 for (i = 0; i < TCPT_NTIMERS; i++)
781 tp->t_timer[i] = 0;
782 tp->tentry.timer_start = tcp_now;
783 tp->tentry.index = TCPT_NONE;
784 }
785
786 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
787 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
788
789 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
790 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
791
792 static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
793
794 void
795 tcp_rexmt_save_state(struct tcpcb *tp)
796 {
797 u_int32_t fsize;
798 if (TSTMP_SUPPORTED(tp)) {
799 /*
800 * Since timestamps are supported on the connection,
801 * we can do recovery as described in rfc 4015.
802 */
803 fsize = tp->snd_max - tp->snd_una;
804 tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
805 tp->snd_recover_prev = tp->snd_recover;
806 } else {
807 /*
808 * Timestamp option is not supported on this connection.
809 * Record ssthresh and cwnd so they can
810 * be recovered if this turns out to be a "bad" retransmit.
811 * A retransmit is considered "bad" if an ACK for this
812 * segment is received within RTT/2 interval; the assumption
813 * here is that the ACK was already in flight. See
814 * "On Estimating End-to-End Network Path Properties" by
815 * Allman and Paxson for more details.
816 */
817 tp->snd_cwnd_prev = tp->snd_cwnd;
818 tp->snd_ssthresh_prev = tp->snd_ssthresh;
819 tp->snd_recover_prev = tp->snd_recover;
820 if (IN_FASTRECOVERY(tp))
821 tp->t_flags |= TF_WASFRECOVERY;
822 else
823 tp->t_flags &= ~TF_WASFRECOVERY;
824 }
825 tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
826 tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
827 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
828 }
829
830 /*
831 * Revert to the older segment size if there is an indication that PMTU
832 * blackhole detection was not needed.
833 */
834 void
835 tcp_pmtud_revert_segment_size(struct tcpcb *tp)
836 {
837 int32_t optlen;
838
839 VERIFY(tp->t_pmtud_saved_maxopd > 0);
840 tp->t_flags |= TF_PMTUD;
841 tp->t_flags &= ~TF_BLACKHOLE;
842 optlen = tp->t_maxopd - tp->t_maxseg;
843 tp->t_maxopd = tp->t_pmtud_saved_maxopd;
844 tp->t_maxseg = tp->t_maxopd - optlen;
845
846 /*
847 * Reset the slow-start flight size as it
848 * may depend on the new MSS
849 */
850 if (CC_ALGO(tp)->cwnd_init != NULL)
851 CC_ALGO(tp)->cwnd_init(tp);
852 tp->t_pmtud_start_ts = 0;
853 tcpstat.tcps_pmtudbh_reverted++;
854
855 /* change MSS according to recommendation, if there was one */
856 tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL);
857 }
858
859 /*
860 * TCP timer processing.
861 */
862 struct tcpcb *
863 tcp_timers(struct tcpcb *tp, int timer)
864 {
865 int32_t rexmt, optlen = 0, idle_time = 0;
866 struct socket *so;
867 struct tcptemp *t_template;
868 #if TCPDEBUG
869 int ostate;
870 #endif
871
872 #if INET6
873 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
874 #endif /* INET6 */
875 u_int64_t accsleep_ms;
876 u_int32_t last_sleep_ms = 0;
877
878 so = tp->t_inpcb->inp_socket;
879 idle_time = tcp_now - tp->t_rcvtime;
880
881 switch (timer) {
882
883 /*
884 * 2 MSL timeout in shutdown went off. If we're closed but
885 * still waiting for peer to close and connection has been idle
886 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
887 * delete connection control block.
888 * Otherwise, (this case shouldn't happen) check again in a bit
889 * we keep the socket in the main list in that case.
890 */
891 case TCPT_2MSL:
892 tcp_free_sackholes(tp);
893 if (tp->t_state != TCPS_TIME_WAIT &&
894 tp->t_state != TCPS_FIN_WAIT_2 &&
895 ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
896 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
897 (u_int32_t)TCP_CONN_KEEPINTVL(tp));
898 } else {
899 tp = tcp_close(tp);
900 return(tp);
901 }
902 break;
903
904 /*
905 * Retransmission timer went off. Message has not
906 * been acked within retransmit interval. Back off
907 * to a longer retransmit interval and retransmit one segment.
908 */
909 case TCPT_REXMT:
910 absolutetime_to_nanoseconds(mach_absolutetime_asleep,
911 &accsleep_ms);
912 accsleep_ms = accsleep_ms / 1000000UL;
913 if (accsleep_ms > tp->t_accsleep_ms)
914 last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
915 /*
916 * Drop a connection in the retransmit timer
917 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
918 * times
919 * 2. If the time spent in this retransmission episode is
920 * more than the time limit set with TCP_RXT_CONNDROPTIME
921 * socket option
922 * 3. If TCP_RXT_FINDROP socket option was set and
923 * we have already retransmitted the FIN 3 times without
924 * receiving an ack
925 */
926 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
927 (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
928 (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
929 ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
930 (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
931 (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
932 if (tp->t_state == TCPS_ESTABLISHED &&
933 tp->t_rxt_minimum_timeout > 0) {
934 /*
935 * Avoid dropping a connection if minimum
936 * timeout is set and that time did not
937 * pass. We will retry sending
938 * retransmissions at the maximum interval
939 */
940 if (TSTMP_LT(tcp_now, (tp->t_rxtstart +
941 tp->t_rxt_minimum_timeout))) {
942 tp->t_rxtshift = TCP_MAXRXTSHIFT - 1;
943 goto retransmit_packet;
944 }
945 }
946 if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
947 tcpstat.tcps_rxtfindrop++;
948 } else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
949 tcpstat.tcps_drop_after_sleep++;
950 } else {
951 tcpstat.tcps_timeoutdrop++;
952 }
953 if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
954 if (TCP_ECN_ENABLED(tp)) {
955 INP_INC_IFNET_STAT(tp->t_inpcb,
956 ecn_on.rxmit_drop);
957 } else {
958 INP_INC_IFNET_STAT(tp->t_inpcb,
959 ecn_off.rxmit_drop);
960 }
961 }
962 tp->t_rxtshift = TCP_MAXRXTSHIFT;
963 postevent(so, 0, EV_TIMEOUT);
964 soevent(so,
965 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
966
967 if (TCP_ECN_ENABLED(tp) &&
968 tp->t_state == TCPS_ESTABLISHED)
969 tcp_heuristic_ecn_droprxmt(tp);
970
971 tp = tcp_drop(tp, tp->t_softerror ?
972 tp->t_softerror : ETIMEDOUT);
973
974 break;
975 }
976 retransmit_packet:
977 tcpstat.tcps_rexmttimeo++;
978 tp->t_accsleep_ms = accsleep_ms;
979
980 if (tp->t_rxtshift == 1 &&
981 tp->t_state == TCPS_ESTABLISHED) {
982 /* Set the time at which retransmission started. */
983 tp->t_rxtstart = tcp_now;
984
985 /*
986 * if this is the first retransmit timeout, save
987 * the state so that we can recover if the timeout
988 * is spurious.
989 */
990 tcp_rexmt_save_state(tp);
991 }
992 #if MPTCP
993 if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
994 (tp->t_state == TCPS_ESTABLISHED) &&
995 (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
996 mptcp_act_on_txfail(so);
997
998 }
999
1000 if (so->so_flags & SOF_MP_SUBFLOW) {
1001 struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1002
1003 mptcp_check_subflows_and_add(mpte);
1004 }
1005 #endif /* MPTCP */
1006
1007 if (tp->t_adaptive_wtimo > 0 &&
1008 tp->t_rxtshift > tp->t_adaptive_wtimo &&
1009 TCPS_HAVEESTABLISHED(tp->t_state)) {
1010 /* Send an event to the application */
1011 soevent(so,
1012 (SO_FILT_HINT_LOCKED|
1013 SO_FILT_HINT_ADAPTIVE_WTIMO));
1014 }
1015
1016 /*
1017 * If this is a retransmit timeout after PTO, the PTO
1018 * was not effective
1019 */
1020 if (tp->t_flagsext & TF_SENT_TLPROBE) {
1021 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1022 tcpstat.tcps_rto_after_pto++;
1023 }
1024
1025 if (tp->t_flagsext & TF_DELAY_RECOVERY) {
1026 /*
1027 * Retransmit timer fired before entering recovery
1028 * on a connection with packet re-ordering. This
1029 * suggests that the reordering metrics computed
1030 * are not accurate.
1031 */
1032 tp->t_reorderwin = 0;
1033 tp->t_timer[TCPT_DELAYFR] = 0;
1034 tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
1035 }
1036
1037 if (tp->t_state == TCPS_SYN_RECEIVED)
1038 tcp_disable_tfo(tp);
1039
1040 if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1041 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
1042 !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
1043 ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
1044 tp->t_rxtshift > 4)) {
1045 /*
1046 * For regular retransmissions, a first one is being
1047 * done for tail-loss probe.
1048 * Thus, if rxtshift > 1, this means we have sent the segment
1049 * a total of 3 times.
1050 *
1051 * If we are in SYN-SENT state, then there is no tail-loss
1052 * probe thus we have to let rxtshift go up to 3.
1053 */
1054 tcp_heuristic_tfo_middlebox(tp);
1055
1056 so->so_error = ENODATA;
1057 sorwakeup(so);
1058 sowwakeup(so);
1059
1060 tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
1061 tcpstat.tcps_tfo_sndblackhole++;
1062 }
1063
1064 if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1065 (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
1066 tp->t_rxtshift > 3) {
1067 if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) {
1068 tcp_heuristic_tfo_middlebox(tp);
1069
1070 so->so_error = ENODATA;
1071 sorwakeup(so);
1072 sowwakeup(so);
1073 }
1074 }
1075
1076 if (tp->t_state == TCPS_SYN_SENT) {
1077 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
1078 tp->t_stat.synrxtshift = tp->t_rxtshift;
1079
1080 /* When retransmitting, disable TFO */
1081 if (tfo_enabled(tp) &&
1082 (!(so->so_flags1 & SOF1_DATA_AUTHENTICATED) ||
1083 (tp->t_flagsext & TF_FASTOPEN_HEUR))) {
1084 tp->t_flagsext &= ~TF_FASTOPEN;
1085 tp->t_tfo_flags |= TFO_F_SYN_LOSS;
1086 }
1087 } else {
1088 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
1089 }
1090
1091 TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
1092 TCP_ADD_REXMTSLOP(tp));
1093 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1094
1095 if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb))
1096 goto fc_output;
1097
1098 tcp_free_sackholes(tp);
1099 /*
1100 * Check for potential Path MTU Discovery Black Hole
1101 */
1102 if (tcp_pmtud_black_hole_detect &&
1103 !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
1104 (tp->t_state == TCPS_ESTABLISHED)) {
1105 if ((tp->t_flags & TF_PMTUD) &&
1106 ((tp->t_flags & TF_MAXSEGSNT)
1107 || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) &&
1108 tp->t_rxtshift == 2) {
1109 /*
1110 * Enter Path MTU Black-hole Detection mechanism:
1111 * - Disable Path MTU Discovery (IP "DF" bit).
1112 * - Reduce MTU to lower value than what we
1113 * negotiated with the peer.
1114 */
1115 /* Disable Path MTU Discovery for now */
1116 tp->t_flags &= ~TF_PMTUD;
1117 /* Record that we may have found a black hole */
1118 tp->t_flags |= TF_BLACKHOLE;
1119 optlen = tp->t_maxopd - tp->t_maxseg;
1120 /* Keep track of previous MSS */
1121 tp->t_pmtud_saved_maxopd = tp->t_maxopd;
1122 tp->t_pmtud_start_ts = tcp_now;
1123 if (tp->t_pmtud_start_ts == 0)
1124 tp->t_pmtud_start_ts++;
1125 /* Reduce the MSS to intermediary value */
1126 if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
1127 tp->t_maxopd = tcp_pmtud_black_hole_mss;
1128 } else {
1129 tp->t_maxopd = /* use the default MSS */
1130 #if INET6
1131 isipv6 ? tcp_v6mssdflt :
1132 #endif /* INET6 */
1133 tcp_mssdflt;
1134 }
1135 tp->t_maxseg = tp->t_maxopd - optlen;
1136
1137 /*
1138 * Reset the slow-start flight size
1139 * as it may depend on the new MSS
1140 */
1141 if (CC_ALGO(tp)->cwnd_init != NULL)
1142 CC_ALGO(tp)->cwnd_init(tp);
1143 tp->snd_cwnd = tp->t_maxseg;
1144 }
1145 /*
1146 * If further retransmissions are still
1147 * unsuccessful with a lowered MTU, maybe this
1148 * isn't a Black Hole and we restore the previous
1149 * MSS and blackhole detection flags.
1150 */
1151 else {
1152
1153 if ((tp->t_flags & TF_BLACKHOLE) &&
1154 (tp->t_rxtshift > 4)) {
1155 tcp_pmtud_revert_segment_size(tp);
1156 tp->snd_cwnd = tp->t_maxseg;
1157 }
1158 }
1159 }
1160
1161
1162 /*
1163 * Disable rfc1323 and rfc1644 if we haven't got any
1164 * response to our SYN (after we reach the threshold)
1165 * to work-around some broken terminal servers (most of
1166 * which have hopefully been retired) that have bad VJ
1167 * header compression code which trashes TCP segments
1168 * containing unknown-to-them TCP options.
1169 * Do this only on non-local connections.
1170 */
1171 if (tp->t_state == TCPS_SYN_SENT &&
1172 tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres)
1173 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
1174
1175 /*
1176 * If losing, let the lower level know and try for
1177 * a better route. Also, if we backed off this far,
1178 * our srtt estimate is probably bogus. Clobber it
1179 * so we'll take the next rtt measurement as our srtt;
1180 * move the current srtt into rttvar to keep the current
1181 * retransmit times until then.
1182 */
1183 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
1184 #if INET6
1185 if (isipv6)
1186 in6_losing(tp->t_inpcb);
1187 else
1188 #endif /* INET6 */
1189 in_losing(tp->t_inpcb);
1190 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
1191 tp->t_srtt = 0;
1192 }
1193 tp->snd_nxt = tp->snd_una;
1194 /*
1195 * Note: We overload snd_recover to function also as the
1196 * snd_last variable described in RFC 2582
1197 */
1198 tp->snd_recover = tp->snd_max;
1199 /*
1200 * Force a segment to be sent.
1201 */
1202 tp->t_flags |= TF_ACKNOW;
1203
1204 /* If timing a segment in this window, stop the timer */
1205 tp->t_rtttime = 0;
1206
1207 if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1)
1208 tcpstat.tcps_tailloss_rto++;
1209
1210
1211 /*
1212 * RFC 5681 says: when a TCP sender detects segment loss
1213 * using retransmit timer and the given segment has already
1214 * been retransmitted by way of the retransmission timer at
1215 * least once, the value of ssthresh is held constant
1216 */
1217 if (tp->t_rxtshift == 1 &&
1218 CC_ALGO(tp)->after_timeout != NULL) {
1219 CC_ALGO(tp)->after_timeout(tp);
1220 /*
1221 * CWR notifications are to be sent on new data
1222 * right after Fast Retransmits and ECE
1223 * notification receipts.
1224 */
1225 if (TCP_ECN_ENABLED(tp))
1226 tp->ecn_flags |= TE_SENDCWR;
1227 }
1228
1229 EXIT_FASTRECOVERY(tp);
1230
1231 /* Exit cwnd non validated phase */
1232 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1233
1234
1235 fc_output:
1236 tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
1237
1238 (void) tcp_output(tp);
1239 break;
1240
1241 /*
1242 * Persistance timer into zero window.
1243 * Force a byte to be output, if possible.
1244 */
1245 case TCPT_PERSIST:
1246 tcpstat.tcps_persisttimeo++;
1247 /*
1248 * Hack: if the peer is dead/unreachable, we do not
1249 * time out if the window is closed. After a full
1250 * backoff, drop the connection if the idle time
1251 * (no responses to probes) reaches the maximum
1252 * backoff that we would use if retransmitting.
1253 *
1254 * Drop the connection if we reached the maximum allowed time for
1255 * Zero Window Probes without a non-zero update from the peer.
1256 * See rdar://5805356
1257 */
1258 if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1259 (idle_time >= tcp_maxpersistidle ||
1260 idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1261 ((tp->t_persist_stop != 0) &&
1262 TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1263 tcpstat.tcps_persistdrop++;
1264 postevent(so, 0, EV_TIMEOUT);
1265 soevent(so,
1266 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
1267 tp = tcp_drop(tp, ETIMEDOUT);
1268 break;
1269 }
1270 tcp_setpersist(tp);
1271 tp->t_flagsext |= TF_FORCE;
1272 (void) tcp_output(tp);
1273 tp->t_flagsext &= ~TF_FORCE;
1274 break;
1275
1276 /*
1277 * Keep-alive timer went off; send something
1278 * or drop connection if idle for too long.
1279 */
1280 case TCPT_KEEP:
1281 tcpstat.tcps_keeptimeo++;
1282 #if MPTCP
1283 /*
1284 * Regular TCP connections do not send keepalives after closing
1285 * MPTCP must not also, after sending Data FINs.
1286 */
1287 struct mptcb *mp_tp = tptomptp(tp);
1288 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1289 (tp->t_state > TCPS_ESTABLISHED)) {
1290 goto dropit;
1291 } else if (mp_tp != NULL) {
1292 if ((mptcp_ok_to_keepalive(mp_tp) == 0))
1293 goto dropit;
1294 }
1295 #endif /* MPTCP */
1296 if (tp->t_state < TCPS_ESTABLISHED)
1297 goto dropit;
1298 if ((always_keepalive ||
1299 (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
1300 (tp->t_flagsext & TF_DETECT_READSTALL) ||
1301 (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
1302 (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
1303 if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp))
1304 goto dropit;
1305 /*
1306 * Send a packet designed to force a response
1307 * if the peer is up and reachable:
1308 * either an ACK if the connection is still alive,
1309 * or an RST if the peer has closed the connection
1310 * due to timeout or reboot.
1311 * Using sequence number tp->snd_una-1
1312 * causes the transmitted zero-length segment
1313 * to lie outside the receive window;
1314 * by the protocol spec, this requires the
1315 * correspondent TCP to respond.
1316 */
1317 tcpstat.tcps_keepprobe++;
1318 t_template = tcp_maketemplate(tp);
1319 if (t_template) {
1320 struct inpcb *inp = tp->t_inpcb;
1321 struct tcp_respond_args tra;
1322
1323 bzero(&tra, sizeof(tra));
1324 tra.nocell = INP_NO_CELLULAR(inp);
1325 tra.noexpensive = INP_NO_EXPENSIVE(inp);
1326 tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp);
1327 tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp);
1328 if (tp->t_inpcb->inp_flags & INP_BOUND_IF)
1329 tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
1330 else
1331 tra.ifscope = IFSCOPE_NONE;
1332 tcp_respond(tp, t_template->tt_ipgen,
1333 &t_template->tt_t, (struct mbuf *)NULL,
1334 tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
1335 (void) m_free(dtom(t_template));
1336 if (tp->t_flagsext & TF_DETECT_READSTALL)
1337 tp->t_rtimo_probes++;
1338 }
1339 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1340 TCP_CONN_KEEPINTVL(tp));
1341 } else {
1342 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1343 TCP_CONN_KEEPIDLE(tp));
1344 }
1345 if (tp->t_flagsext & TF_DETECT_READSTALL) {
1346 struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1347 bool reenable_probe = false;
1348 /*
1349 * The keep alive packets sent to detect a read
1350 * stall did not get a response from the
1351 * peer. Generate more keep-alives to confirm this.
1352 * If the number of probes sent reaches the limit,
1353 * generate an event.
1354 */
1355 if (tp->t_adaptive_rtimo > 0) {
1356 if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1357 /* Generate an event */
1358 soevent(so,
1359 (SO_FILT_HINT_LOCKED |
1360 SO_FILT_HINT_ADAPTIVE_RTIMO));
1361 tcp_keepalive_reset(tp);
1362 } else {
1363 reenable_probe = true;
1364 }
1365 } else if (outifp != NULL &&
1366 (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1367 tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1368 reenable_probe = true;
1369 } else {
1370 tp->t_flagsext &= ~TF_DETECT_READSTALL;
1371 }
1372 if (reenable_probe) {
1373 int ind = min(tp->t_rtimo_probes,
1374 TCP_MAXRXTSHIFT);
1375 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
1376 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
1377 }
1378 }
1379 if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1380 int ind;
1381
1382 tp->t_tfo_probes++;
1383 ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1384
1385 /*
1386 * We take the minimum among the time set by true
1387 * keepalive (see above) and the backoff'd RTO. That
1388 * way we backoff in case of packet-loss but will never
1389 * timeout slower than regular keepalive due to the
1390 * backing off.
1391 */
1392 tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1393 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1394 tp->t_timer[TCPT_KEEP]);
1395 } else if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1396 tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
1397 /* Still no data! Let's assume a TFO-error and err out... */
1398 tcp_heuristic_tfo_middlebox(tp);
1399
1400 so->so_error = ENODATA;
1401 sorwakeup(so);
1402 tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
1403 tcpstat.tcps_tfo_blackhole++;
1404 }
1405 break;
1406 case TCPT_DELACK:
1407 if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1408 tp->t_flags &= ~TF_DELACK;
1409 tp->t_timer[TCPT_DELACK] = 0;
1410 tp->t_flags |= TF_ACKNOW;
1411
1412 /*
1413 * If delayed ack timer fired while stretching
1414 * acks, count the number of times the streaming
1415 * detection was not correct. If this exceeds a
1416 * threshold, disable strech ack on this
1417 * connection
1418 *
1419 * Also, go back to acking every other packet.
1420 */
1421 if ((tp->t_flags & TF_STRETCHACK)) {
1422 if (tp->t_unacksegs > 1 &&
1423 tp->t_unacksegs < maxseg_unacked)
1424 tp->t_stretchack_delayed++;
1425
1426 if (tp->t_stretchack_delayed >
1427 TCP_STRETCHACK_DELAY_THRESHOLD) {
1428 tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1429 /*
1430 * Note the time at which stretch
1431 * ack was disabled automatically
1432 */
1433 tp->rcv_nostrack_ts = tcp_now;
1434 tcpstat.tcps_nostretchack++;
1435 tp->t_stretchack_delayed = 0;
1436 tp->rcv_nostrack_pkts = 0;
1437 }
1438 tcp_reset_stretch_ack(tp);
1439 }
1440
1441 /*
1442 * If we are measuring inter packet arrival jitter
1443 * for throttling a connection, this delayed ack
1444 * might be the reason for accumulating some
1445 * jitter. So let's restart the measurement.
1446 */
1447 CLEAR_IAJ_STATE(tp);
1448
1449 tcpstat.tcps_delack++;
1450 (void) tcp_output(tp);
1451 }
1452 break;
1453
1454 #if MPTCP
1455 case TCPT_JACK_RXMT:
1456 if ((tp->t_state == TCPS_ESTABLISHED) &&
1457 (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1458 (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1459 if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1460 tcpstat.tcps_timeoutdrop++;
1461 postevent(so, 0, EV_TIMEOUT);
1462 soevent(so,
1463 (SO_FILT_HINT_LOCKED|
1464 SO_FILT_HINT_TIMEOUT));
1465 tp = tcp_drop(tp, tp->t_softerror ?
1466 tp->t_softerror : ETIMEDOUT);
1467 break;
1468 }
1469 tcpstat.tcps_join_rxmts++;
1470 tp->t_mpflags |= TMPF_SND_JACK;
1471 tp->t_flags |= TF_ACKNOW;
1472
1473 /*
1474 * No backoff is implemented for simplicity for this
1475 * corner case.
1476 */
1477 (void) tcp_output(tp);
1478 }
1479 break;
1480 #endif /* MPTCP */
1481
1482 case TCPT_PTO:
1483 {
1484 int32_t snd_len;
1485 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1486
1487 /*
1488 * Check if the connection is in the right state to
1489 * send a probe
1490 */
1491 if (tp->t_state != TCPS_ESTABLISHED ||
1492 (tp->t_rxtshift > 0 && !(tp->t_flagsext & TF_PROBING))
1493 || tp->snd_max == tp->snd_una ||
1494 !SACK_ENABLED(tp) || !TAILQ_EMPTY(&tp->snd_holes) ||
1495 IN_FASTRECOVERY(tp))
1496 break;
1497
1498 /*
1499 * If there is no new data to send or if the
1500 * connection is limited by receive window then
1501 * retransmit the last segment, otherwise send
1502 * new data.
1503 */
1504 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1505 - (tp->snd_max - tp->snd_una);
1506 if (snd_len > 0) {
1507 tp->snd_nxt = tp->snd_max;
1508 } else {
1509 snd_len = min((tp->snd_max - tp->snd_una),
1510 tp->t_maxseg);
1511 tp->snd_nxt = tp->snd_max - snd_len;
1512 }
1513
1514 tcpstat.tcps_pto++;
1515 if (tp->t_flagsext & TF_PROBING)
1516 tcpstat.tcps_probe_if++;
1517
1518 /* If timing a segment in this window, stop the timer */
1519 tp->t_rtttime = 0;
1520 /* Note that tail loss probe is being sent */
1521 tp->t_flagsext |= TF_SENT_TLPROBE;
1522 tp->t_tlpstart = tcp_now;
1523
1524 tp->snd_cwnd += tp->t_maxseg;
1525 (void )tcp_output(tp);
1526 tp->snd_cwnd -= tp->t_maxseg;
1527
1528 tp->t_tlphighrxt = tp->snd_nxt;
1529 break;
1530 }
1531 case TCPT_DELAYFR:
1532 tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1533
1534 /*
1535 * Don't do anything if one of the following is true:
1536 * - the connection is already in recovery
1537 * - sequence until snd_recover has been acknowledged.
1538 * - retransmit timeout has fired
1539 */
1540 if (IN_FASTRECOVERY(tp) ||
1541 SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1542 tp->t_rxtshift > 0)
1543 break;
1544
1545 VERIFY(SACK_ENABLED(tp));
1546 tcp_rexmt_save_state(tp);
1547 if (CC_ALGO(tp)->pre_fr != NULL) {
1548 CC_ALGO(tp)->pre_fr(tp);
1549 if (TCP_ECN_ENABLED(tp))
1550 tp->ecn_flags |= TE_SENDCWR;
1551 }
1552 ENTER_FASTRECOVERY(tp);
1553
1554 tp->t_timer[TCPT_REXMT] = 0;
1555 tcpstat.tcps_sack_recovery_episode++;
1556 tp->t_sack_recovery_episode++;
1557 tp->sack_newdata = tp->snd_nxt;
1558 tp->snd_cwnd = tp->t_maxseg;
1559 tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1560 (void) tcp_output(tp);
1561 break;
1562 dropit:
1563 tcpstat.tcps_keepdrops++;
1564 postevent(so, 0, EV_TIMEOUT);
1565 soevent(so,
1566 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
1567 tp = tcp_drop(tp, ETIMEDOUT);
1568 break;
1569 }
1570 #if TCPDEBUG
1571 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
1572 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
1573 PRU_SLOWTIMO);
1574 #endif
1575 return (tp);
1576 }
1577
1578 /* Remove a timer entry from timer list */
1579 void
1580 tcp_remove_timer(struct tcpcb *tp)
1581 {
1582 struct tcptimerlist *listp = &tcp_timer_list;
1583
1584 socket_lock_assert_owned(tp->t_inpcb->inp_socket);
1585 if (!(TIMER_IS_ON_LIST(tp))) {
1586 return;
1587 }
1588 lck_mtx_lock(listp->mtx);
1589
1590 /* Check if pcb is on timer list again after acquiring the lock */
1591 if (!(TIMER_IS_ON_LIST(tp))) {
1592 lck_mtx_unlock(listp->mtx);
1593 return;
1594 }
1595
1596 if (listp->next_te != NULL && listp->next_te == &tp->tentry)
1597 listp->next_te = LIST_NEXT(&tp->tentry, le);
1598
1599 LIST_REMOVE(&tp->tentry, le);
1600 tp->t_flags &= ~(TF_TIMER_ONLIST);
1601
1602 listp->entries--;
1603
1604 tp->tentry.le.le_next = NULL;
1605 tp->tentry.le.le_prev = NULL;
1606 lck_mtx_unlock(listp->mtx);
1607 }
1608
1609 /*
1610 * Function to check if the timerlist needs to be rescheduled to run
1611 * the timer entry correctly. Basically, this is to check if we can avoid
1612 * taking the list lock.
1613 */
1614
1615 static boolean_t
1616 need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1617 {
1618 struct tcptimerlist *listp = &tcp_timer_list;
1619 int32_t diff;
1620
1621 /*
1622 * If the list is being processed then the state of the list is
1623 * in flux. In this case always acquire the lock and set the state
1624 * correctly.
1625 */
1626 if (listp->running)
1627 return (TRUE);
1628
1629 if (!listp->scheduled)
1630 return (TRUE);
1631
1632 diff = timer_diff(listp->runtime, 0, runtime, 0);
1633 if (diff <= 0) {
1634 /* The list is going to run before this timer */
1635 return (FALSE);
1636 } else {
1637 if (mode & TCP_TIMERLIST_10MS_MODE) {
1638 if (diff <= TCP_TIMER_10MS_QUANTUM)
1639 return (FALSE);
1640 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
1641 if (diff <= TCP_TIMER_100MS_QUANTUM)
1642 return (FALSE);
1643 } else {
1644 if (diff <= TCP_TIMER_500MS_QUANTUM)
1645 return (FALSE);
1646 }
1647 }
1648 return (TRUE);
1649 }
1650
1651 void
1652 tcp_sched_timerlist(uint32_t offset)
1653 {
1654 uint64_t deadline = 0;
1655 struct tcptimerlist *listp = &tcp_timer_list;
1656
1657 LCK_MTX_ASSERT(listp->mtx, LCK_MTX_ASSERT_OWNED);
1658
1659 offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
1660 listp->runtime = tcp_now + offset;
1661 listp->schedtime = tcp_now;
1662 if (listp->runtime == 0) {
1663 listp->runtime++;
1664 offset++;
1665 }
1666
1667 clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
1668
1669 thread_call_enter_delayed(listp->call, deadline);
1670 listp->scheduled = TRUE;
1671 }
1672
1673 /*
1674 * Function to run the timers for a connection.
1675 *
1676 * Returns the offset of next timer to be run for this connection which
1677 * can be used to reschedule the timerlist.
1678 *
1679 * te_mode is an out parameter that indicates the modes of active
1680 * timers for this connection.
1681 */
1682 u_int32_t
1683 tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
1684 u_int16_t probe_if_index)
1685 {
1686 struct socket *so;
1687 u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
1688 u_int32_t timer_val, offset = 0, lo_timer = 0;
1689 int32_t diff;
1690 boolean_t needtorun[TCPT_NTIMERS];
1691 int count = 0;
1692
1693 VERIFY(tp != NULL);
1694 bzero(needtorun, sizeof(needtorun));
1695 *te_mode = 0;
1696
1697 socket_lock(tp->t_inpcb->inp_socket, 1);
1698
1699 so = tp->t_inpcb->inp_socket;
1700 /* Release the want count on inp */
1701 if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
1702 == WNT_STOPUSING) {
1703 if (TIMER_IS_ON_LIST(tp)) {
1704 tcp_remove_timer(tp);
1705 }
1706
1707 /* Looks like the TCP connection got closed while we
1708 * were waiting for the lock.. Done
1709 */
1710 goto done;
1711 }
1712
1713 /*
1714 * If this connection is over an interface that needs to
1715 * be probed, send probe packets to reinitiate communication.
1716 */
1717 if (probe_if_index > 0 && tp->t_inpcb->inp_last_outifp != NULL &&
1718 tp->t_inpcb->inp_last_outifp->if_index == probe_if_index) {
1719 tp->t_flagsext |= TF_PROBING;
1720 tcp_timers(tp, TCPT_PTO);
1721 tp->t_timer[TCPT_PTO] = 0;
1722 tp->t_flagsext &= ~TF_PROBING;
1723 }
1724
1725 /*
1726 * Since the timer thread needs to wait for tcp lock, it may race
1727 * with another thread that can cancel or reschedule the timer
1728 * that is about to run. Check if we need to run anything.
1729 */
1730 if ((index = tp->tentry.index) == TCPT_NONE)
1731 goto done;
1732
1733 timer_val = tp->t_timer[index];
1734
1735 diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
1736 if (diff > 0) {
1737 if (tp->tentry.index != TCPT_NONE) {
1738 offset = diff;
1739 *(te_mode) = tp->tentry.mode;
1740 }
1741 goto done;
1742 }
1743
1744 tp->t_timer[index] = 0;
1745 if (timer_val > 0) {
1746 tp = tcp_timers(tp, index);
1747 if (tp == NULL)
1748 goto done;
1749 }
1750
1751 /*
1752 * Check if there are any other timers that need to be run.
1753 * While doing it, adjust the timer values wrt tcp_now.
1754 */
1755 tp->tentry.mode = 0;
1756 for (i = 0; i < TCPT_NTIMERS; ++i) {
1757 if (tp->t_timer[i] != 0) {
1758 diff = timer_diff(tp->tentry.timer_start,
1759 tp->t_timer[i], tcp_now, 0);
1760 if (diff <= 0) {
1761 needtorun[i] = TRUE;
1762 count++;
1763 } else {
1764 tp->t_timer[i] = diff;
1765 needtorun[i] = FALSE;
1766 if (lo_timer == 0 || diff < lo_timer) {
1767 lo_timer = diff;
1768 lo_index = i;
1769 }
1770 TCP_SET_TIMER_MODE(tp->tentry.mode, i);
1771 }
1772 }
1773 }
1774
1775 tp->tentry.timer_start = tcp_now;
1776 tp->tentry.index = lo_index;
1777 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
1778
1779 if (tp->tentry.index != TCPT_NONE) {
1780 tp->tentry.runtime = tp->tentry.timer_start +
1781 tp->t_timer[tp->tentry.index];
1782 if (tp->tentry.runtime == 0)
1783 tp->tentry.runtime++;
1784 }
1785
1786 if (count > 0) {
1787 /* run any other timers outstanding at this time. */
1788 for (i = 0; i < TCPT_NTIMERS; ++i) {
1789 if (needtorun[i]) {
1790 tp->t_timer[i] = 0;
1791 tp = tcp_timers(tp, i);
1792 if (tp == NULL) {
1793 offset = 0;
1794 *(te_mode) = 0;
1795 goto done;
1796 }
1797 }
1798 }
1799 tcp_set_lotimer_index(tp);
1800 }
1801
1802 if (tp->tentry.index < TCPT_NONE) {
1803 offset = tp->t_timer[tp->tentry.index];
1804 *(te_mode) = tp->tentry.mode;
1805 }
1806
1807 done:
1808 if (tp != NULL && tp->tentry.index == TCPT_NONE) {
1809 tcp_remove_timer(tp);
1810 offset = 0;
1811 }
1812
1813 socket_unlock(so, 1);
1814 return(offset);
1815 }
1816
1817 void
1818 tcp_run_timerlist(void * arg1, void * arg2)
1819 {
1820 #pragma unused(arg1, arg2)
1821 struct tcptimerentry *te, *next_te;
1822 struct tcptimerlist *listp = &tcp_timer_list;
1823 struct tcpcb *tp;
1824 uint32_t next_timer = 0; /* offset of the next timer on the list */
1825 u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */
1826 u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
1827 uint32_t active_count = 0;
1828
1829 calculate_tcp_clock();
1830
1831 lck_mtx_lock(listp->mtx);
1832
1833 int32_t drift = tcp_now - listp->runtime;
1834 if (drift <= 1) {
1835 tcpstat.tcps_timer_drift_le_1_ms++;
1836 } else if (drift <= 10) {
1837 tcpstat.tcps_timer_drift_le_10_ms++;
1838 } else if (drift <= 20) {
1839 tcpstat.tcps_timer_drift_le_20_ms++;
1840 } else if (drift <= 50) {
1841 tcpstat.tcps_timer_drift_le_50_ms++;
1842 } else if (drift <= 100) {
1843 tcpstat.tcps_timer_drift_le_100_ms++;
1844 } else if (drift <= 200) {
1845 tcpstat.tcps_timer_drift_le_200_ms++;
1846 } else if (drift <= 500) {
1847 tcpstat.tcps_timer_drift_le_500_ms++;
1848 } else if (drift <= 1000) {
1849 tcpstat.tcps_timer_drift_le_1000_ms++;
1850 } else {
1851 tcpstat.tcps_timer_drift_gt_1000_ms++;
1852 }
1853
1854 listp->running = TRUE;
1855
1856 LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
1857 uint32_t offset = 0;
1858 uint32_t runtime = te->runtime;
1859 if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) {
1860 offset = timer_diff(runtime, 0, tcp_now, 0);
1861 if (next_timer == 0 || offset < next_timer) {
1862 next_timer = offset;
1863 }
1864 list_mode |= te->mode;
1865 continue;
1866 }
1867
1868 tp = TIMERENTRY_TO_TP(te);
1869
1870 /*
1871 * Acquire an inp wantcnt on the inpcb so that the socket
1872 * won't get detached even if tcp_close is called
1873 */
1874 if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
1875 == WNT_STOPUSING) {
1876 /*
1877 * Some how this pcb went into dead state while
1878 * on the timer list, just take it off the list.
1879 * Since the timer list entry pointers are
1880 * protected by the timer list lock, we can
1881 * do it here without the socket lock.
1882 */
1883 if (TIMER_IS_ON_LIST(tp)) {
1884 tp->t_flags &= ~(TF_TIMER_ONLIST);
1885 LIST_REMOVE(&tp->tentry, le);
1886 listp->entries--;
1887
1888 tp->tentry.le.le_next = NULL;
1889 tp->tentry.le.le_prev = NULL;
1890 }
1891 continue;
1892 }
1893 active_count++;
1894
1895 /*
1896 * Store the next timerentry pointer before releasing the
1897 * list lock. If that entry has to be removed when we
1898 * release the lock, this pointer will be updated to the
1899 * element after that.
1900 */
1901 listp->next_te = next_te;
1902
1903 VERIFY_NEXT_LINK(&tp->tentry, le);
1904 VERIFY_PREV_LINK(&tp->tentry, le);
1905
1906 lck_mtx_unlock(listp->mtx);
1907
1908 offset = tcp_run_conn_timer(tp, &te_mode,
1909 listp->probe_if_index);
1910
1911 lck_mtx_lock(listp->mtx);
1912
1913 next_te = listp->next_te;
1914 listp->next_te = NULL;
1915
1916 if (offset > 0 && te_mode != 0) {
1917 list_mode |= te_mode;
1918
1919 if (next_timer == 0 || offset < next_timer)
1920 next_timer = offset;
1921 }
1922 }
1923
1924 if (!LIST_EMPTY(&listp->lhead)) {
1925 u_int16_t next_mode = 0;
1926 if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
1927 (listp->pref_mode & TCP_TIMERLIST_10MS_MODE))
1928 next_mode = TCP_TIMERLIST_10MS_MODE;
1929 else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
1930 (listp->pref_mode & TCP_TIMERLIST_100MS_MODE))
1931 next_mode = TCP_TIMERLIST_100MS_MODE;
1932 else
1933 next_mode = TCP_TIMERLIST_500MS_MODE;
1934
1935 if (next_mode != TCP_TIMERLIST_500MS_MODE) {
1936 listp->idleruns = 0;
1937 } else {
1938 /*
1939 * the next required mode is slow mode, but if
1940 * the last one was a faster mode and we did not
1941 * have enough idle runs, repeat the last mode.
1942 *
1943 * We try to keep the timer list in fast mode for
1944 * some idle time in expectation of new data.
1945 */
1946 if (listp->mode != next_mode &&
1947 listp->idleruns < timer_fastmode_idlemax) {
1948 listp->idleruns++;
1949 next_mode = listp->mode;
1950 next_timer = TCP_TIMER_100MS_QUANTUM;
1951 } else {
1952 listp->idleruns = 0;
1953 }
1954 }
1955 listp->mode = next_mode;
1956 if (listp->pref_offset != 0)
1957 next_timer = min(listp->pref_offset, next_timer);
1958
1959 if (listp->mode == TCP_TIMERLIST_500MS_MODE)
1960 next_timer = max(next_timer,
1961 TCP_TIMER_500MS_QUANTUM);
1962
1963 tcp_sched_timerlist(next_timer);
1964 } else {
1965 /*
1966 * No need to reschedule this timer, but always run
1967 * periodically at a much higher granularity.
1968 */
1969 tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
1970 }
1971
1972 listp->running = FALSE;
1973 listp->pref_mode = 0;
1974 listp->pref_offset = 0;
1975 listp->probe_if_index = 0;
1976
1977 lck_mtx_unlock(listp->mtx);
1978 }
1979
1980 /*
1981 * Function to check if the timerlist needs to be rescheduled to run this
1982 * connection's timers correctly.
1983 */
1984 void
1985 tcp_sched_timers(struct tcpcb *tp)
1986 {
1987 struct tcptimerentry *te = &tp->tentry;
1988 u_int16_t index = te->index;
1989 u_int16_t mode = te->mode;
1990 struct tcptimerlist *listp = &tcp_timer_list;
1991 int32_t offset = 0;
1992 boolean_t list_locked = FALSE;
1993
1994 if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
1995 /* Just return without adding the dead pcb to the list */
1996 if (TIMER_IS_ON_LIST(tp)) {
1997 tcp_remove_timer(tp);
1998 }
1999 return;
2000 }
2001
2002 if (index == TCPT_NONE) {
2003 /* Nothing to run */
2004 tcp_remove_timer(tp);
2005 return;
2006 }
2007
2008 /*
2009 * compute the offset at which the next timer for this connection
2010 * has to run.
2011 */
2012 offset = timer_diff(te->runtime, 0, tcp_now, 0);
2013 if (offset <= 0) {
2014 offset = 1;
2015 tcp_timer_advanced++;
2016 }
2017
2018 if (!TIMER_IS_ON_LIST(tp)) {
2019 if (!list_locked) {
2020 lck_mtx_lock(listp->mtx);
2021 list_locked = TRUE;
2022 }
2023
2024 if (!TIMER_IS_ON_LIST(tp)) {
2025 LIST_INSERT_HEAD(&listp->lhead, te, le);
2026 tp->t_flags |= TF_TIMER_ONLIST;
2027
2028 listp->entries++;
2029 if (listp->entries > listp->maxentries)
2030 listp->maxentries = listp->entries;
2031
2032 /* if the list is not scheduled, just schedule it */
2033 if (!listp->scheduled)
2034 goto schedule;
2035 }
2036 }
2037
2038 /*
2039 * Timer entry is currently on the list, check if the list needs
2040 * to be rescheduled.
2041 */
2042 if (need_to_resched_timerlist(te->runtime, mode)) {
2043 tcp_resched_timerlist++;
2044
2045 if (!list_locked) {
2046 lck_mtx_lock(listp->mtx);
2047 list_locked = TRUE;
2048 }
2049
2050 VERIFY_NEXT_LINK(te, le);
2051 VERIFY_PREV_LINK(te, le);
2052
2053 if (listp->running) {
2054 listp->pref_mode |= mode;
2055 if (listp->pref_offset == 0 ||
2056 offset < listp->pref_offset) {
2057 listp->pref_offset = offset;
2058 }
2059 } else {
2060 /*
2061 * The list could have got rescheduled while
2062 * this thread was waiting for the lock
2063 */
2064 if (listp->scheduled) {
2065 int32_t diff;
2066 diff = timer_diff(listp->runtime, 0,
2067 tcp_now, offset);
2068 if (diff <= 0)
2069 goto done;
2070 else
2071 goto schedule;
2072 } else {
2073 goto schedule;
2074 }
2075 }
2076 }
2077 goto done;
2078
2079 schedule:
2080 /*
2081 * Since a connection with timers is getting scheduled, the timer
2082 * list moves from idle to active state and that is why idlegen is
2083 * reset
2084 */
2085 if (mode & TCP_TIMERLIST_10MS_MODE) {
2086 listp->mode = TCP_TIMERLIST_10MS_MODE;
2087 listp->idleruns = 0;
2088 offset = min(offset, TCP_TIMER_10MS_QUANTUM);
2089 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
2090 if (listp->mode > TCP_TIMERLIST_100MS_MODE)
2091 listp->mode = TCP_TIMERLIST_100MS_MODE;
2092 listp->idleruns = 0;
2093 offset = min(offset, TCP_TIMER_100MS_QUANTUM);
2094 }
2095 tcp_sched_timerlist(offset);
2096
2097 done:
2098 if (list_locked)
2099 lck_mtx_unlock(listp->mtx);
2100
2101 return;
2102 }
2103
2104 static inline void
2105 tcp_set_lotimer_index(struct tcpcb *tp)
2106 {
2107 uint16_t i, lo_index = TCPT_NONE, mode = 0;
2108 uint32_t lo_timer = 0;
2109 for (i = 0; i < TCPT_NTIMERS; ++i) {
2110 if (tp->t_timer[i] != 0) {
2111 TCP_SET_TIMER_MODE(mode, i);
2112 if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
2113 lo_timer = tp->t_timer[i];
2114 lo_index = i;
2115 }
2116 }
2117 }
2118 tp->tentry.index = lo_index;
2119 tp->tentry.mode = mode;
2120 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2121
2122 if (tp->tentry.index != TCPT_NONE) {
2123 tp->tentry.runtime = tp->tentry.timer_start
2124 + tp->t_timer[tp->tentry.index];
2125 if (tp->tentry.runtime == 0)
2126 tp->tentry.runtime++;
2127 }
2128 }
2129
2130 void
2131 tcp_check_timer_state(struct tcpcb *tp)
2132 {
2133 socket_lock_assert_owned(tp->t_inpcb->inp_socket);
2134
2135 if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT)
2136 return;
2137
2138 tcp_set_lotimer_index(tp);
2139
2140 tcp_sched_timers(tp);
2141 return;
2142 }
2143
2144 static inline void
2145 tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
2146 {
2147 /* handle wrap around */
2148 int32_t diff = (int32_t) (cur - *prev);
2149 if (diff > 0)
2150 *dest = diff;
2151 else
2152 *dest = 0;
2153 *prev = cur;
2154 return;
2155 }
2156
2157 static inline void
2158 tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest)
2159 {
2160 /* handle wrap around */
2161 int64_t diff = (int64_t) (cur - *prev);
2162 if (diff > 0)
2163 *dest = diff;
2164 else
2165 *dest = 0;
2166 *prev = cur;
2167 return;
2168 }
2169
2170 __private_extern__ void
2171 tcp_report_stats(void)
2172 {
2173 struct nstat_sysinfo_data data;
2174 struct sockaddr_in dst;
2175 struct sockaddr_in6 dst6;
2176 struct rtentry *rt = NULL;
2177 static struct tcp_last_report_stats prev;
2178 u_int64_t var, uptime;
2179
2180 #define stat data.u.tcp_stats
2181 if (((uptime = net_uptime()) - tcp_last_report_time) <
2182 tcp_report_stats_interval)
2183 return;
2184
2185 tcp_last_report_time = uptime;
2186
2187 bzero(&data, sizeof(data));
2188 data.flags = NSTAT_SYSINFO_TCP_STATS;
2189
2190 bzero(&dst, sizeof(dst));
2191 dst.sin_len = sizeof(dst);
2192 dst.sin_family = AF_INET;
2193
2194 /* ipv4 avg rtt */
2195 lck_mtx_lock(rnh_lock);
2196 rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
2197 rt_tables[AF_INET], IFSCOPE_NONE);
2198 lck_mtx_unlock(rnh_lock);
2199 if (rt != NULL) {
2200 RT_LOCK(rt);
2201 if (rt_primary_default(rt, rt_key(rt)) &&
2202 rt->rt_stats != NULL) {
2203 stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
2204 }
2205 RT_UNLOCK(rt);
2206 rtfree(rt);
2207 rt = NULL;
2208 }
2209
2210 /* ipv6 avg rtt */
2211 bzero(&dst6, sizeof(dst6));
2212 dst6.sin6_len = sizeof(dst6);
2213 dst6.sin6_family = AF_INET6;
2214
2215 lck_mtx_lock(rnh_lock);
2216 rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL,
2217 rt_tables[AF_INET6], IFSCOPE_NONE);
2218 lck_mtx_unlock(rnh_lock);
2219 if (rt != NULL) {
2220 RT_LOCK(rt);
2221 if (rt_primary_default(rt, rt_key(rt)) &&
2222 rt->rt_stats != NULL) {
2223 stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
2224 }
2225 RT_UNLOCK(rt);
2226 rtfree(rt);
2227 rt = NULL;
2228 }
2229
2230 /* send packet loss rate, shift by 10 for precision */
2231 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
2232 var = tcpstat.tcps_sndrexmitpack << 10;
2233 stat.send_plr = (var * 100) / tcpstat.tcps_sndpack;
2234 }
2235
2236 /* recv packet loss rate, shift by 10 for precision */
2237 if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2238 var = tcpstat.tcps_recovered_pkts << 10;
2239 stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack;
2240 }
2241
2242 /* RTO after tail loss, shift by 10 for precision */
2243 if (tcpstat.tcps_sndrexmitpack > 0
2244 && tcpstat.tcps_tailloss_rto > 0) {
2245 var = tcpstat.tcps_tailloss_rto << 10;
2246 stat.send_tlrto_rate =
2247 (var * 100) / tcpstat.tcps_sndrexmitpack;
2248 }
2249
2250 /* packet reordering */
2251 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2252 var = tcpstat.tcps_reordered_pkts << 10;
2253 stat.send_reorder_rate =
2254 (var * 100) / tcpstat.tcps_sndpack;
2255 }
2256
2257 if (tcp_ecn_outbound == 1)
2258 stat.ecn_client_enabled = 1;
2259 if (tcp_ecn_inbound == 1)
2260 stat.ecn_server_enabled = 1;
2261 tcp_cumulative_stat(tcpstat.tcps_connattempt,
2262 &prev.tcps_connattempt, &stat.connection_attempts);
2263 tcp_cumulative_stat(tcpstat.tcps_accepts,
2264 &prev.tcps_accepts, &stat.connection_accepts);
2265 tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2266 &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2267 tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2268 &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2269 tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2270 &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2271 tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2272 &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2273 tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2274 &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2275 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2276 &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2277 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2278 &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2279 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2280 &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2281 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2282 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2283 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2284 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2285 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2286 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2287 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2288 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2289 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2290 &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2291 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2292 &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2293 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2294 &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2295 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2296 &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2297 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2298 &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
2299 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2300 &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2301 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2302 &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2303 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2304 &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
2305 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2306 &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2307 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2308 &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2309 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2310 &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2311 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2312 &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2313 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2314 &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2315 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2316 &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2317 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2318 &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2319 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2320 &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2321 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2322 &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2323 tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2324 &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
2325 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
2326 &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
2327 tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
2328 &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
2329 tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
2330 &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
2331 tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
2332 &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
2333
2334
2335 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt,
2336 &prev.tcps_mptcp_handover_attempt , &stat.mptcp_handover_attempt);
2337 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt,
2338 &prev.tcps_mptcp_interactive_attempt , &stat.mptcp_interactive_attempt);
2339 tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt,
2340 &prev.tcps_mptcp_aggregate_attempt , &stat.mptcp_aggregate_attempt);
2341 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt,
2342 &prev.tcps_mptcp_fp_handover_attempt , &stat.mptcp_fp_handover_attempt);
2343 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt,
2344 &prev.tcps_mptcp_fp_interactive_attempt , &stat.mptcp_fp_interactive_attempt);
2345 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt,
2346 &prev.tcps_mptcp_fp_aggregate_attempt , &stat.mptcp_fp_aggregate_attempt);
2347 tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback,
2348 &prev.tcps_mptcp_heuristic_fallback , &stat.mptcp_heuristic_fallback);
2349 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback,
2350 &prev.tcps_mptcp_fp_heuristic_fallback , &stat.mptcp_fp_heuristic_fallback);
2351 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi,
2352 &prev.tcps_mptcp_handover_success_wifi , &stat.mptcp_handover_success_wifi);
2353 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell,
2354 &prev.tcps_mptcp_handover_success_cell , &stat.mptcp_handover_success_cell);
2355 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success,
2356 &prev.tcps_mptcp_interactive_success , &stat.mptcp_interactive_success);
2357 tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success,
2358 &prev.tcps_mptcp_aggregate_success , &stat.mptcp_aggregate_success);
2359 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi,
2360 &prev.tcps_mptcp_fp_handover_success_wifi , &stat.mptcp_fp_handover_success_wifi);
2361 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell,
2362 &prev.tcps_mptcp_fp_handover_success_cell , &stat.mptcp_fp_handover_success_cell);
2363 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success,
2364 &prev.tcps_mptcp_fp_interactive_success , &stat.mptcp_fp_interactive_success);
2365 tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success,
2366 &prev.tcps_mptcp_fp_aggregate_success , &stat.mptcp_fp_aggregate_success);
2367 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi,
2368 &prev.tcps_mptcp_handover_cell_from_wifi , &stat.mptcp_handover_cell_from_wifi);
2369 tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell,
2370 &prev.tcps_mptcp_handover_wifi_from_cell , &stat.mptcp_handover_wifi_from_cell);
2371 tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi,
2372 &prev.tcps_mptcp_interactive_cell_from_wifi , &stat.mptcp_interactive_cell_from_wifi);
2373 tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes,
2374 &prev.tcps_mptcp_handover_cell_bytes , &stat.mptcp_handover_cell_bytes);
2375 tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes,
2376 &prev.tcps_mptcp_interactive_cell_bytes , &stat.mptcp_interactive_cell_bytes);
2377 tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes,
2378 &prev.tcps_mptcp_aggregate_cell_bytes , &stat.mptcp_aggregate_cell_bytes);
2379 tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes,
2380 &prev.tcps_mptcp_handover_all_bytes , &stat.mptcp_handover_all_bytes);
2381 tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes,
2382 &prev.tcps_mptcp_interactive_all_bytes , &stat.mptcp_interactive_all_bytes);
2383 tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes,
2384 &prev.tcps_mptcp_aggregate_all_bytes , &stat.mptcp_aggregate_all_bytes);
2385 tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi,
2386 &prev.tcps_mptcp_back_to_wifi , &stat.mptcp_back_to_wifi);
2387 tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy,
2388 &prev.tcps_mptcp_wifi_proxy , &stat.mptcp_wifi_proxy);
2389 tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy,
2390 &prev.tcps_mptcp_cell_proxy , &stat.mptcp_cell_proxy);
2391
2392
2393 nstat_sysinfo_send_data(&data);
2394
2395 #undef stat
2396 }
2397
2398 void
2399 tcp_interface_send_probe(u_int16_t probe_if_index)
2400 {
2401 int32_t offset = 0;
2402 struct tcptimerlist *listp = &tcp_timer_list;
2403
2404 /* Make sure TCP clock is up to date */
2405 calculate_tcp_clock();
2406
2407 lck_mtx_lock(listp->mtx);
2408 if (listp->probe_if_index > 0) {
2409 tcpstat.tcps_probe_if_conflict++;
2410 goto done;
2411 }
2412
2413 listp->probe_if_index = probe_if_index;
2414 if (listp->running)
2415 goto done;
2416
2417 /*
2418 * Reschedule the timerlist to run within the next 10ms, which is
2419 * the fastest that we can do.
2420 */
2421 offset = TCP_TIMER_10MS_QUANTUM;
2422 if (listp->scheduled) {
2423 int32_t diff;
2424 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2425 if (diff <= 0) {
2426 /* The timer will fire sooner than what's needed */
2427 goto done;
2428 }
2429 }
2430 listp->mode = TCP_TIMERLIST_10MS_MODE;
2431 listp->idleruns = 0;
2432
2433 tcp_sched_timerlist(offset);
2434
2435 done:
2436 lck_mtx_unlock(listp->mtx);
2437 return;
2438 }
2439
2440 /*
2441 * Enable read probes on this connection, if:
2442 * - it is in established state
2443 * - doesn't have any data outstanding
2444 * - the outgoing ifp matches
2445 * - we have not already sent any read probes
2446 */
2447 static void
2448 tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2449 {
2450 if (tp->t_state == TCPS_ESTABLISHED &&
2451 tp->snd_max == tp->snd_una &&
2452 tp->t_inpcb->inp_last_outifp == ifp &&
2453 !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2454 tp->t_rtimo_probes == 0) {
2455 tp->t_flagsext |= TF_DETECT_READSTALL;
2456 tp->t_rtimo_probes = 0;
2457 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2458 TCP_TIMER_10MS_QUANTUM);
2459 if (tp->tentry.index == TCPT_NONE) {
2460 tp->tentry.index = TCPT_KEEP;
2461 tp->tentry.runtime = tcp_now +
2462 TCP_TIMER_10MS_QUANTUM;
2463 } else {
2464 int32_t diff = 0;
2465
2466 /* Reset runtime to be in next 10ms */
2467 diff = timer_diff(tp->tentry.runtime, 0,
2468 tcp_now, TCP_TIMER_10MS_QUANTUM);
2469 if (diff > 0) {
2470 tp->tentry.index = TCPT_KEEP;
2471 tp->tentry.runtime = tcp_now +
2472 TCP_TIMER_10MS_QUANTUM;
2473 if (tp->tentry.runtime == 0)
2474 tp->tentry.runtime++;
2475 }
2476 }
2477 }
2478 }
2479
2480 /*
2481 * Disable read probe and reset the keep alive timer
2482 */
2483 static void
2484 tcp_disable_read_probe(struct tcpcb *tp)
2485 {
2486 if (tp->t_adaptive_rtimo == 0 &&
2487 ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2488 tp->t_rtimo_probes > 0)) {
2489 tcp_keepalive_reset(tp);
2490
2491 if (tp->t_mpsub)
2492 mptcp_reset_keepalive(tp);
2493 }
2494 }
2495
2496 /*
2497 * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2498 * probes on connections going over a particular interface.
2499 */
2500 void
2501 tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2502 {
2503 int32_t offset;
2504 struct tcptimerlist *listp = &tcp_timer_list;
2505 struct inpcbinfo *pcbinfo = &tcbinfo;
2506 struct inpcb *inp, *nxt;
2507
2508 if (ifp == NULL)
2509 return;
2510
2511 /* update clock */
2512 calculate_tcp_clock();
2513
2514 /*
2515 * Enable keep alive timer on all connections that are
2516 * active/established on this interface.
2517 */
2518 lck_rw_lock_shared(pcbinfo->ipi_lock);
2519
2520 LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2521 struct tcpcb *tp = NULL;
2522 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2523 WNT_STOPUSING)
2524 continue;
2525
2526 /* Acquire lock to look at the state of the connection */
2527 socket_lock(inp->inp_socket, 1);
2528
2529 /* Release the want count */
2530 if (inp->inp_ppcb == NULL ||
2531 (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
2532 socket_unlock(inp->inp_socket, 1);
2533 continue;
2534 }
2535 tp = intotcpcb(inp);
2536 if (enable)
2537 tcp_enable_read_probe(tp, ifp);
2538 else
2539 tcp_disable_read_probe(tp);
2540
2541 socket_unlock(inp->inp_socket, 1);
2542 }
2543 lck_rw_done(pcbinfo->ipi_lock);
2544
2545 lck_mtx_lock(listp->mtx);
2546 if (listp->running) {
2547 listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2548 goto done;
2549 }
2550
2551 /* Reschedule within the next 10ms */
2552 offset = TCP_TIMER_10MS_QUANTUM;
2553 if (listp->scheduled) {
2554 int32_t diff;
2555 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2556 if (diff <= 0) {
2557 /* The timer will fire sooner than what's needed */
2558 goto done;
2559 }
2560 }
2561 listp->mode = TCP_TIMERLIST_10MS_MODE;
2562 listp->idleruns = 0;
2563
2564 tcp_sched_timerlist(offset);
2565 done:
2566 lck_mtx_unlock(listp->mtx);
2567 return;
2568 }
2569
2570 inline void
2571 tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
2572 {
2573 struct if_cellular_status_v1 *ifsr;
2574 u_int32_t optlen;
2575 ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2576 if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2577 optlen = tp->t_maxopd - tp->t_maxseg;
2578
2579 if (ifsr->mss_recommended ==
2580 IF_CELL_UL_MSS_RECOMMENDED_NONE &&
2581 tp->t_cached_maxopd > 0 &&
2582 tp->t_maxopd < tp->t_cached_maxopd) {
2583 tp->t_maxopd = tp->t_cached_maxopd;
2584 tcpstat.tcps_mss_to_default++;
2585 } else if (ifsr->mss_recommended ==
2586 IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
2587 tp->t_maxopd > tcp_mss_rec_medium) {
2588 tp->t_cached_maxopd = tp->t_maxopd;
2589 tp->t_maxopd = tcp_mss_rec_medium;
2590 tcpstat.tcps_mss_to_medium++;
2591 } else if (ifsr->mss_recommended ==
2592 IF_CELL_UL_MSS_RECOMMENDED_LOW &&
2593 tp->t_maxopd > tcp_mss_rec_low) {
2594 tp->t_cached_maxopd = tp->t_maxopd;
2595 tp->t_maxopd = tcp_mss_rec_low;
2596 tcpstat.tcps_mss_to_low++;
2597 }
2598 tp->t_maxseg = tp->t_maxopd - optlen;
2599
2600 /*
2601 * clear the cached value if it is same as the current
2602 */
2603 if (tp->t_maxopd == tp->t_cached_maxopd)
2604 tp->t_cached_maxopd = 0;
2605 }
2606 }
2607
2608 void
2609 tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
2610 {
2611 struct inpcb *inp = sotoinpcb(so);
2612 struct tcpcb *tp = intotcpcb(inp);
2613
2614 if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL)
2615 return;
2616
2617 if (!IFNET_IS_CELLULAR(ifp)) {
2618 /*
2619 * This optimization is implemented for cellular
2620 * networks only
2621 */
2622 return;
2623 }
2624 if ( tp->t_state <= TCPS_CLOSE_WAIT) {
2625 /*
2626 * If the connection is currently doing or has done PMTU
2627 * blackhole detection, do not change the MSS
2628 */
2629 if (tp->t_flags & TF_BLACKHOLE)
2630 return;
2631 if (ifp->if_link_status == NULL)
2632 return;
2633 tcp_update_mss_core(tp, ifp);
2634 }
2635 }
2636
2637 void
2638 tcp_itimer(struct inpcbinfo *ipi)
2639 {
2640 struct inpcb *inp, *nxt;
2641
2642 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
2643 if (tcp_itimer_done == TRUE) {
2644 tcp_itimer_done = FALSE;
2645 atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
2646 return;
2647 }
2648 /* Upgrade failed, lost lock now take it again exclusive */
2649 lck_rw_lock_exclusive(ipi->ipi_lock);
2650 }
2651 tcp_itimer_done = TRUE;
2652
2653 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
2654 struct socket *so;
2655 struct ifnet *ifp;
2656
2657 if (inp->inp_ppcb == NULL ||
2658 in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
2659 continue;
2660 so = inp->inp_socket;
2661 ifp = inp->inp_last_outifp;
2662 socket_lock(so, 1);
2663 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2664 socket_unlock(so, 1);
2665 continue;
2666 }
2667 so_check_extended_bk_idle_time(so);
2668 if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
2669 tcp_update_mss_locked(so, NULL);
2670 }
2671 socket_unlock(so, 1);
2672
2673 /*
2674 * Defunct all system-initiated background sockets if the
2675 * socket is using the cellular interface and the interface
2676 * has its LQM set to abort.
2677 */
2678 if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) &&
2679 IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) &&
2680 ifp != NULL && IFNET_IS_CELLULAR(ifp) &&
2681 (ifp->if_interface_state.valid_bitmask &
2682 IF_INTERFACE_STATE_LQM_STATE_VALID) &&
2683 ifp->if_interface_state.lqm_state ==
2684 IFNET_LQM_THRESH_ABORT) {
2685 socket_defunct(current_proc(), so,
2686 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
2687 }
2688 }
2689
2690 ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT);
2691 lck_rw_done(ipi->ipi_lock);
2692 }