2 * Copyright (c) 2013-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/syslog.h>
33 #include <sys/protosw.h>
34 #include <sys/socketvar.h>
35 #include <sys/kern_control.h>
36 #include <sys/domain.h>
38 #include <netinet/in.h>
39 #include <netinet/tcp.h>
40 #include <netinet/tcp_var.h>
41 #include <netinet/tcp_cc.h>
43 #include <libkern/OSAtomic.h>
45 struct tcp_cc_debug_state
{
47 char ccd_srcaddr
[INET6_ADDRSTRLEN
];
49 char ccd_destaddr
[INET6_ADDRSTRLEN
];
50 uint16_t ccd_destport
;
51 uint32_t ccd_snd_cwnd
;
53 uint32_t ccd_snd_ssthresh
;
60 uint32_t ccd_sndhiwat
;
61 uint32_t ccd_bytes_acked
;
64 uint32_t ccd_last_max
;
66 uint32_t ccd_target_win
;
67 uint32_t ccd_avg_lastmax
;
68 uint32_t ccd_mean_deviation
;
74 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, cc_debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
75 &tcp_cc_debug
, 0, "Enable debug data collection");
77 extern struct tcp_cc_algo tcp_cc_newreno
;
78 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, newreno_sockets
,
79 CTLFLAG_RD
| CTLFLAG_LOCKED
, &tcp_cc_newreno
.num_sockets
,
80 0, "Number of sockets using newreno");
82 extern struct tcp_cc_algo tcp_cc_ledbat
;
83 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, background_sockets
,
84 CTLFLAG_RD
| CTLFLAG_LOCKED
, &tcp_cc_ledbat
.num_sockets
,
85 0, "Number of sockets using background transport");
87 extern struct tcp_cc_algo tcp_cc_cubic
;
88 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, cubic_sockets
,
89 CTLFLAG_RD
| CTLFLAG_LOCKED
,&tcp_cc_cubic
.num_sockets
,
90 0, "Number of sockets using cubic");
92 int tcp_use_newreno
= 0;
93 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, use_newreno
,
94 CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_use_newreno
, 0,
95 "Use TCP NewReno by default");
97 static int tcp_check_cwnd_nonvalidated
= 1;
98 #if (DEBUG || DEVELOPMENT)
99 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, cwnd_nonvalidated
,
100 CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_check_cwnd_nonvalidated
, 0,
101 "Check if congestion window is non-validated");
102 #endif /* (DEBUG || DEVELOPMENT) */
104 #define SET_SNDSB_IDEAL_SIZE(sndsb, size) \
105 sndsb->sb_idealsize = min(max(tcp_sendspace, tp->snd_ssthresh), \
108 /* Array containing pointers to currently implemented TCP CC algorithms */
109 struct tcp_cc_algo
* tcp_cc_algo_list
[TCP_CC_ALGO_COUNT
];
110 struct zone
*tcp_cc_zone
;
112 /* Information for colelcting TCP debug information using control socket */
113 #define TCP_CCDEBUG_CONTROL_NAME "com.apple.network.tcp_ccdebug"
114 #define TCP_CCDBG_NOUNIT 0xffffffff
115 static kern_ctl_ref tcp_ccdbg_ctlref
= NULL
;
116 volatile UInt32 tcp_ccdbg_unit
= TCP_CCDBG_NOUNIT
;
118 void tcp_cc_init(void);
119 static void tcp_cc_control_register(void);
120 static errno_t
tcp_ccdbg_control_connect(kern_ctl_ref kctl
,
121 struct sockaddr_ctl
*sac
, void **uinfo
);
122 static errno_t
tcp_ccdbg_control_disconnect(kern_ctl_ref kctl
,
123 u_int32_t unit
, void *uinfo
);
124 static struct tcp_cc_algo tcp_cc_algo_none
;
126 * Initialize TCP congestion control algorithms.
132 bzero(&tcp_cc_algo_list
, sizeof(tcp_cc_algo_list
));
133 bzero(&tcp_cc_algo_none
, sizeof(tcp_cc_algo_none
));
135 tcp_cc_algo_list
[TCP_CC_ALGO_NONE
] = &tcp_cc_algo_none
;
136 tcp_cc_algo_list
[TCP_CC_ALGO_NEWRENO_INDEX
] = &tcp_cc_newreno
;
137 tcp_cc_algo_list
[TCP_CC_ALGO_BACKGROUND_INDEX
] = &tcp_cc_ledbat
;
138 tcp_cc_algo_list
[TCP_CC_ALGO_CUBIC_INDEX
] = &tcp_cc_cubic
;
140 tcp_cc_control_register();
144 tcp_cc_control_register(void)
146 struct kern_ctl_reg ccdbg_control
;
149 bzero(&ccdbg_control
, sizeof(ccdbg_control
));
150 strlcpy(ccdbg_control
.ctl_name
, TCP_CCDEBUG_CONTROL_NAME
,
151 sizeof(ccdbg_control
.ctl_name
));
152 ccdbg_control
.ctl_connect
= tcp_ccdbg_control_connect
;
153 ccdbg_control
.ctl_disconnect
= tcp_ccdbg_control_disconnect
;
154 ccdbg_control
.ctl_flags
|= CTL_FLAG_PRIVILEGED
;
155 ccdbg_control
.ctl_flags
|= CTL_FLAG_REG_SOCK_STREAM
;
157 err
= ctl_register(&ccdbg_control
, &tcp_ccdbg_ctlref
);
159 log(LOG_ERR
, "failed to register tcp_cc debug control");
163 /* Allow only one socket to connect at any time for debugging */
165 tcp_ccdbg_control_connect(kern_ctl_ref kctl
, struct sockaddr_ctl
*sac
,
169 #pragma unused(uinfo)
171 UInt32 old_value
= TCP_CCDBG_NOUNIT
;
172 UInt32 new_value
= sac
->sc_unit
;
174 if (tcp_ccdbg_unit
!= old_value
)
177 if (OSCompareAndSwap(old_value
, new_value
, &tcp_ccdbg_unit
))
184 tcp_ccdbg_control_disconnect(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
)
186 #pragma unused(kctl, unit, uinfo)
188 if (unit
== tcp_ccdbg_unit
) {
189 UInt32 old_value
= tcp_ccdbg_unit
;
190 UInt32 new_value
= TCP_CCDBG_NOUNIT
;
191 if (tcp_ccdbg_unit
== new_value
)
194 if (!OSCompareAndSwap(old_value
, new_value
,
197 "failed to disconnect tcp_cc debug control");
203 tcp_ccdbg_trace(struct tcpcb
*tp
, struct tcphdr
*th
, int32_t event
)
207 #endif /* !CONFIG_DTRACE */
208 struct inpcb
*inp
= tp
->t_inpcb
;
210 if (tcp_cc_debug
&& tcp_ccdbg_unit
> 0) {
211 struct tcp_cc_debug_state dbg_state
;
214 bzero(&dbg_state
, sizeof(dbg_state
));
217 /* Take time in seconds */
218 dbg_state
.ccd_tsns
= (tv
.tv_sec
* 1000000000) + tv
.tv_nsec
;
219 inet_ntop(SOCK_DOM(inp
->inp_socket
),
220 ((SOCK_DOM(inp
->inp_socket
) == PF_INET
) ?
221 (void *)&inp
->inp_laddr
.s_addr
:
222 (void *)&inp
->in6p_laddr
), dbg_state
.ccd_srcaddr
,
223 sizeof(dbg_state
.ccd_srcaddr
));
224 dbg_state
.ccd_srcport
= ntohs(inp
->inp_lport
);
225 inet_ntop(SOCK_DOM(inp
->inp_socket
),
226 ((SOCK_DOM(inp
->inp_socket
) == PF_INET
) ?
227 (void *)&inp
->inp_faddr
.s_addr
:
228 (void *)&inp
->in6p_faddr
), dbg_state
.ccd_destaddr
,
229 sizeof(dbg_state
.ccd_destaddr
));
230 dbg_state
.ccd_destport
= ntohs(inp
->inp_fport
);
232 dbg_state
.ccd_snd_cwnd
= tp
->snd_cwnd
;
233 dbg_state
.ccd_snd_wnd
= tp
->snd_wnd
;
234 dbg_state
.ccd_snd_ssthresh
= tp
->snd_ssthresh
;
235 dbg_state
.ccd_pipeack
= tp
->t_pipeack
;
236 dbg_state
.ccd_rttcur
= tp
->t_rttcur
;
237 dbg_state
.ccd_rxtcur
= tp
->t_rxtcur
;
238 dbg_state
.ccd_srtt
= tp
->t_srtt
>> TCP_RTT_SHIFT
;
239 dbg_state
.ccd_event
= event
;
240 dbg_state
.ccd_sndcc
= inp
->inp_socket
->so_snd
.sb_cc
;
241 dbg_state
.ccd_sndhiwat
= inp
->inp_socket
->so_snd
.sb_hiwat
;
242 dbg_state
.ccd_bytes_acked
= tp
->t_bytes_acked
;
243 switch (tp
->tcp_cc_index
) {
244 case TCP_CC_ALGO_CUBIC_INDEX
:
245 dbg_state
.u
.cubic_state
.ccd_last_max
=
246 tp
->t_ccstate
->cub_last_max
;
247 dbg_state
.u
.cubic_state
.ccd_tcp_win
=
248 tp
->t_ccstate
->cub_tcp_win
;
249 dbg_state
.u
.cubic_state
.ccd_target_win
=
250 tp
->t_ccstate
->cub_target_win
;
251 dbg_state
.u
.cubic_state
.ccd_avg_lastmax
=
252 tp
->t_ccstate
->cub_avg_lastmax
;
253 dbg_state
.u
.cubic_state
.ccd_mean_deviation
=
254 tp
->t_ccstate
->cub_mean_dev
;
260 ctl_enqueuedata(tcp_ccdbg_ctlref
, tcp_ccdbg_unit
,
261 &dbg_state
, sizeof(dbg_state
), 0);
263 DTRACE_TCP5(cc
, void, NULL
, struct inpcb
*, inp
,
264 struct tcpcb
*, tp
, struct tcphdr
*, th
, int32_t, event
);
267 void tcp_cc_resize_sndbuf(struct tcpcb
*tp
)
271 * If the send socket buffer size is bigger than ssthresh,
272 * it is time to trim it because we do not want to hold
273 * too many mbufs in the socket buffer
275 sb
= &tp
->t_inpcb
->inp_socket
->so_snd
;
276 if (sb
->sb_hiwat
> tp
->snd_ssthresh
&&
277 (sb
->sb_flags
& SB_AUTOSIZE
)) {
278 if (sb
->sb_idealsize
> tp
->snd_ssthresh
) {
279 SET_SNDSB_IDEAL_SIZE(sb
, tp
->snd_ssthresh
);
281 sb
->sb_flags
|= SB_TRIM
;
285 void tcp_bad_rexmt_fix_sndbuf(struct tcpcb
*tp
)
288 sb
= &tp
->t_inpcb
->inp_socket
->so_snd
;
289 if ((sb
->sb_flags
& (SB_TRIM
|SB_AUTOSIZE
)) == (SB_TRIM
|SB_AUTOSIZE
)) {
291 * If there was a retransmission that was not necessary
292 * then the size of socket buffer can be restored to
295 SET_SNDSB_IDEAL_SIZE(sb
, tp
->snd_ssthresh
);
296 if (sb
->sb_hiwat
<= sb
->sb_idealsize
) {
297 sbreserve(sb
, sb
->sb_idealsize
);
298 sb
->sb_flags
&= ~SB_TRIM
;
304 * Calculate initial cwnd according to RFC3390.
306 * Keep the old ss_fltsz sysctl for ABI compabitility issues.
307 * but it will be overriden if tcp_do_rfc3390 sysctl when it is set.
310 tcp_cc_cwnd_init_or_reset(struct tcpcb
*tp
)
312 if (tp
->t_flags
& TF_LOCAL
) {
313 tp
->snd_cwnd
= tp
->t_maxseg
* ss_fltsz_local
;
315 /* initial congestion window according to RFC 3390 */
317 tp
->snd_cwnd
= min(4 * tp
->t_maxseg
,
318 max(2 * tp
->t_maxseg
, TCP_CC_CWND_INIT_BYTES
));
320 tp
->snd_cwnd
= tp
->t_maxseg
* ss_fltsz
;
325 * Indicate whether this ack should be delayed.
326 * Here is the explanation for different settings of tcp_delack_enabled:
327 * - when set to 1, the bhavior is same as when set to 2. We kept this
328 * for binary compatibility.
329 * - when set to 2, will "ack every other packet"
330 * - if our last ack wasn't a 0-sized window.
331 * - if the peer hasn't sent us a TH_PUSH data packet (radar 3649245).
332 * If TH_PUSH is set, take this as a clue that we need to ACK
333 * with no delay. This helps higher level protocols who
334 * won't send us more data even if the window is open
335 * because their last "segment" hasn't been ACKed
336 * - when set to 3, will do "streaming detection"
337 * - if we receive more than "maxseg_unacked" full packets
339 * - if the connection is not in slow-start or idle or
340 * loss/recovery states
341 * - if those criteria aren't met, it will ack every other packet.
344 tcp_cc_delay_ack(struct tcpcb
*tp
, struct tcphdr
*th
)
346 /* If any flags other than TH_ACK is set, set "end-of-write" bit */
347 if ((th
->th_flags
& ~TH_ACK
))
348 tp
->t_flagsext
|= TF_STREAMEOW
;
350 tp
->t_flagsext
&= ~(TF_STREAMEOW
);
352 switch (tcp_delack_enabled
) {
355 if ((tp
->t_flags
& TF_RXWIN0SENT
) == 0 &&
356 (th
->th_flags
& TH_PUSH
) == 0 &&
357 (tp
->t_unacksegs
== 1))
361 if ((tp
->t_flags
& TF_RXWIN0SENT
) == 0 &&
362 (th
->th_flags
& TH_PUSH
) == 0 &&
363 ((tp
->t_unacksegs
== 1) ||
364 ((tp
->t_flags
& TF_STRETCHACK
) != 0 &&
365 tp
->t_unacksegs
< (maxseg_unacked
))))
373 tcp_cc_allocate_state(struct tcpcb
*tp
)
375 if (tp
->tcp_cc_index
== TCP_CC_ALGO_CUBIC_INDEX
&&
376 tp
->t_ccstate
== NULL
) {
377 tp
->t_ccstate
= (struct tcp_ccstate
*)zalloc(tcp_cc_zone
);
380 * If we could not allocate memory for congestion control
381 * state, revert to using TCP NewReno as it does not
384 if (tp
->t_ccstate
== NULL
)
385 tp
->tcp_cc_index
= TCP_CC_ALGO_NEWRENO_INDEX
;
387 bzero(tp
->t_ccstate
, sizeof(*tp
->t_ccstate
));
392 * If stretch ack was disabled automatically on long standing connections,
393 * re-evaluate the situation after 15 minutes to enable it.
395 #define TCP_STRETCHACK_DISABLE_WIN (15 * 60 * TCP_RETRANSHZ)
397 tcp_cc_after_idle_stretchack(struct tcpcb
*tp
)
401 if (!(tp
->t_flagsext
& TF_DISABLE_STRETCHACK
))
404 tdiff
= timer_diff(tcp_now
, 0, tp
->rcv_nostrack_ts
, 0);
408 if (tdiff
> TCP_STRETCHACK_DISABLE_WIN
) {
409 tp
->t_flagsext
&= ~TF_DISABLE_STRETCHACK
;
410 tp
->t_stretchack_delayed
= 0;
412 tcp_reset_stretch_ack(tp
);
417 * Detect if the congestion window is non-vlidated according to
418 * draft-ietf-tcpm-newcwv-07
422 tcp_cc_is_cwnd_nonvalidated(struct tcpcb
*tp
)
424 if (tp
->t_pipeack
== 0 || tcp_check_cwnd_nonvalidated
== 0) {
425 tp
->t_flagsext
&= ~TF_CWND_NONVALIDATED
;
428 if (tp
->t_pipeack
>= (tp
->snd_cwnd
) >> 1)
429 tp
->t_flagsext
&= ~TF_CWND_NONVALIDATED
;
431 tp
->t_flagsext
|= TF_CWND_NONVALIDATED
;
432 return (tp
->t_flagsext
& TF_CWND_NONVALIDATED
);
436 * Adjust congestion window in response to congestion in non-validated
440 tcp_cc_adjust_nonvalidated_cwnd(struct tcpcb
*tp
)
442 tp
->t_pipeack
= tcp_get_max_pipeack(tp
);
443 tcp_clear_pipeack_state(tp
);
444 tp
->snd_cwnd
= (max(tp
->t_pipeack
, tp
->t_lossflightsize
) >> 1);
445 tp
->snd_cwnd
= max(tp
->snd_cwnd
, TCP_CC_CWND_INIT_BYTES
);
446 tp
->snd_cwnd
+= tp
->t_maxseg
* tcprexmtthresh
;
447 tp
->t_flagsext
&= ~TF_CWND_NONVALIDATED
;
451 * Return maximum of all the pipeack samples. Since the number of samples
452 * TCP_PIPEACK_SAMPLE_COUNT is 3 at this time, it will be simpler to do
453 * a comparision. We should change ths if the number of samples increases.
456 tcp_get_max_pipeack(struct tcpcb
*tp
)
458 u_int32_t max_pipeack
= 0;
459 max_pipeack
= (tp
->t_pipeack_sample
[0] > tp
->t_pipeack_sample
[1]) ?
460 tp
->t_pipeack_sample
[0] : tp
->t_pipeack_sample
[1];
461 max_pipeack
= (tp
->t_pipeack_sample
[2] > max_pipeack
) ?
462 tp
->t_pipeack_sample
[2] : max_pipeack
;
464 return (max_pipeack
);
468 tcp_clear_pipeack_state(struct tcpcb
*tp
)
470 bzero(tp
->t_pipeack_sample
, sizeof(tp
->t_pipeack_sample
));
471 tp
->t_pipeack_ind
= 0;
472 tp
->t_lossflightsize
= 0;