]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_usrreq.c
c2389347c57c4bacfb2767a7d9c9a4c5d6dec79e
[apple/xnu.git] / bsd / netinet / tcp_usrreq.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94
61 * $FreeBSD: src/sys/netinet/tcp_usrreq.c,v 1.51.2.9 2001/08/22 00:59:12 silby Exp $
62 */
63
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/sysctl.h>
69 #include <sys/mbuf.h>
70 #include <sys/domain.h>
71 #if XNU_TARGET_OS_OSX
72 #include <sys/kasl.h>
73 #endif /* XNU_TARGET_OS_OSX */
74 #include <sys/priv.h>
75 #include <sys/socket.h>
76 #include <sys/socketvar.h>
77 #include <sys/protosw.h>
78 #include <sys/syslog.h>
79
80 #include <net/if.h>
81 #include <net/route.h>
82 #include <net/ntstat.h>
83 #include <net/content_filter.h>
84 #include <net/multi_layer_pkt_log.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip6.h>
89 #include <netinet/in_pcb.h>
90 #include <netinet6/in6_pcb.h>
91 #include <netinet/in_var.h>
92 #include <netinet/ip_var.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet/tcp.h>
95 #include <netinet/tcp_fsm.h>
96 #include <netinet/tcp_seq.h>
97 #include <netinet/tcp_timer.h>
98 #include <netinet/tcp_var.h>
99 #include <netinet/tcpip.h>
100 #include <netinet/tcp_cc.h>
101 #include <netinet/tcp_log.h>
102 #include <mach/sdt.h>
103 #if TCPDEBUG
104 #include <netinet/tcp_debug.h>
105 #endif
106 #if MPTCP
107 #include <netinet/mptcp_var.h>
108 #endif /* MPTCP */
109
110 #if IPSEC
111 #include <netinet6/ipsec.h>
112 #endif /*IPSEC*/
113
114 #if FLOW_DIVERT
115 #include <netinet/flow_divert.h>
116 #endif /* FLOW_DIVERT */
117
118 errno_t tcp_fill_info_for_info_tuple(struct info_tuple *, struct tcp_info *);
119
120 int tcp_sysctl_info(struct sysctl_oid *, void *, int, struct sysctl_req *);
121 static void tcp_connection_fill_info(struct tcpcb *tp,
122 struct tcp_connection_info *tci);
123 static int tcp_get_mpkl_send_info(struct mbuf *, struct so_mpkl_send_info *);
124
125 /*
126 * TCP protocol interface to socket abstraction.
127 */
128 static int tcp_attach(struct socket *, struct proc *);
129 static int tcp_connect(struct tcpcb *, struct sockaddr *, struct proc *);
130 static int tcp6_connect(struct tcpcb *, struct sockaddr *, struct proc *);
131 static int tcp6_usr_connect(struct socket *, struct sockaddr *,
132 struct proc *);
133 static struct tcpcb *tcp_disconnect(struct tcpcb *);
134 static struct tcpcb *tcp_usrclosed(struct tcpcb *);
135 extern void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb);
136
137 #if TCPDEBUG
138 #define TCPDEBUG0 int ostate = 0
139 #define TCPDEBUG1() ostate = tp ? tp->t_state : 0
140 #define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \
141 tcp_trace(TA_USER, ostate, tp, 0, 0, req)
142 #else
143 #define TCPDEBUG0
144 #define TCPDEBUG1()
145 #define TCPDEBUG2(req)
146 #endif
147
148 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, info,
149 CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY | CTLFLAG_KERN,
150 0, 0, tcp_sysctl_info, "S", "TCP info per tuple");
151
152 /*
153 * TCP attaches to socket via pru_attach(), reserving space,
154 * and an internet control block.
155 *
156 * Returns: 0 Success
157 * EISCONN
158 * tcp_attach:ENOBUFS
159 * tcp_attach:ENOMEM
160 * tcp_attach:??? [IPSEC specific]
161 */
162 static int
163 tcp_usr_attach(struct socket *so, __unused int proto, struct proc *p)
164 {
165 int error;
166 struct inpcb *inp = sotoinpcb(so);
167 struct tcpcb *tp = 0;
168 TCPDEBUG0;
169
170 TCPDEBUG1();
171 if (inp) {
172 error = EISCONN;
173 goto out;
174 }
175
176 error = tcp_attach(so, p);
177 if (error) {
178 goto out;
179 }
180
181 if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
182 so->so_linger = (short)(TCP_LINGERTIME * hz);
183 }
184 tp = sototcpcb(so);
185 out:
186 TCPDEBUG2(PRU_ATTACH);
187 return error;
188 }
189
190 /*
191 * pru_detach() detaches the TCP protocol from the socket.
192 * If the protocol state is non-embryonic, then can't
193 * do this directly: have to initiate a pru_disconnect(),
194 * which may finish later; embryonic TCB's can just
195 * be discarded here.
196 */
197 static int
198 tcp_usr_detach(struct socket *so)
199 {
200 int error = 0;
201 struct inpcb *inp = sotoinpcb(so);
202 struct tcpcb *tp;
203 TCPDEBUG0;
204
205 if (inp == 0 || (inp->inp_state == INPCB_STATE_DEAD)) {
206 return EINVAL; /* XXX */
207 }
208 socket_lock_assert_owned(so);
209 tp = intotcpcb(inp);
210 /* In case we got disconnected from the peer */
211 if (tp == NULL) {
212 goto out;
213 }
214 TCPDEBUG1();
215
216 calculate_tcp_clock();
217
218 tp = tcp_disconnect(tp);
219 out:
220 TCPDEBUG2(PRU_DETACH);
221 return error;
222 }
223
224 #if NECP
225 #define COMMON_START_ALLOW_FLOW_DIVERT(allow) TCPDEBUG0; \
226 do { \
227 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \
228 return (EINVAL); \
229 if (!(allow) && necp_socket_should_use_flow_divert(inp)) \
230 return (EPROTOTYPE); \
231 tp = intotcpcb(inp); \
232 TCPDEBUG1(); \
233 calculate_tcp_clock(); \
234 } while (0)
235 #else /* NECP */
236 #define COMMON_START_ALLOW_FLOW_DIVERT(allow) TCPDEBUG0; \
237 do { \
238 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \
239 return (EINVAL); \
240 tp = intotcpcb(inp); \
241 TCPDEBUG1(); \
242 calculate_tcp_clock(); \
243 } while (0)
244 #endif /* !NECP */
245
246 #define COMMON_START() COMMON_START_ALLOW_FLOW_DIVERT(false)
247 #define COMMON_END(req) out: TCPDEBUG2(req); return error; goto out
248
249
250 /*
251 * Give the socket an address.
252 *
253 * Returns: 0 Success
254 * EINVAL Invalid argument [COMMON_START]
255 * EAFNOSUPPORT Address family not supported
256 * in_pcbbind:EADDRNOTAVAIL Address not available.
257 * in_pcbbind:EINVAL Invalid argument
258 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
259 * in_pcbbind:EACCES Permission denied
260 * in_pcbbind:EADDRINUSE Address in use
261 * in_pcbbind:EAGAIN Resource unavailable, try again
262 * in_pcbbind:EPERM Operation not permitted
263 */
264 static int
265 tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
266 {
267 int error = 0;
268 struct inpcb *inp = sotoinpcb(so);
269 struct tcpcb *tp;
270 struct sockaddr_in *sinp;
271
272 COMMON_START_ALLOW_FLOW_DIVERT(true);
273
274 if (nam->sa_family != 0 && nam->sa_family != AF_INET) {
275 error = EAFNOSUPPORT;
276 goto out;
277 }
278
279 /*
280 * Must check for multicast addresses and disallow binding
281 * to them.
282 */
283 sinp = (struct sockaddr_in *)(void *)nam;
284 if (sinp->sin_family == AF_INET &&
285 IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
286 error = EAFNOSUPPORT;
287 goto out;
288 }
289 error = in_pcbbind(inp, nam, p);
290 if (error) {
291 goto out;
292 }
293
294 #if NECP
295 /* Update NECP client with bind result if not in middle of connect */
296 if ((inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS) &&
297 !uuid_is_null(inp->necp_client_uuid)) {
298 socket_unlock(so, 0);
299 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
300 socket_lock(so, 0);
301 }
302 #endif /* NECP */
303
304 COMMON_END(PRU_BIND);
305 }
306
307 static int
308 tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
309 {
310 int error = 0;
311 struct inpcb *inp = sotoinpcb(so);
312 struct tcpcb *tp;
313 struct sockaddr_in6 *sin6p;
314
315 COMMON_START_ALLOW_FLOW_DIVERT(true);
316
317 if (nam->sa_family != 0 && nam->sa_family != AF_INET6) {
318 error = EAFNOSUPPORT;
319 goto out;
320 }
321
322 /*
323 * Must check for multicast addresses and disallow binding
324 * to them.
325 */
326 sin6p = (struct sockaddr_in6 *)(void *)nam;
327 if (sin6p->sin6_family == AF_INET6 &&
328 IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) {
329 error = EAFNOSUPPORT;
330 goto out;
331 }
332 inp->inp_vflag &= ~INP_IPV4;
333 inp->inp_vflag |= INP_IPV6;
334 if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
335 if (IN6_IS_ADDR_UNSPECIFIED(&sin6p->sin6_addr)) {
336 inp->inp_vflag |= INP_IPV4;
337 } else if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) {
338 struct sockaddr_in sin;
339
340 in6_sin6_2_sin(&sin, sin6p);
341 inp->inp_vflag |= INP_IPV4;
342 inp->inp_vflag &= ~INP_IPV6;
343 error = in_pcbbind(inp, (struct sockaddr *)&sin, p);
344 goto out;
345 }
346 }
347 error = in6_pcbbind(inp, nam, p);
348 if (error) {
349 goto out;
350 }
351 COMMON_END(PRU_BIND);
352 }
353
354 /*
355 * Prepare to accept connections.
356 *
357 * Returns: 0 Success
358 * EINVAL [COMMON_START]
359 * in_pcbbind:EADDRNOTAVAIL Address not available.
360 * in_pcbbind:EINVAL Invalid argument
361 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
362 * in_pcbbind:EACCES Permission denied
363 * in_pcbbind:EADDRINUSE Address in use
364 * in_pcbbind:EAGAIN Resource unavailable, try again
365 * in_pcbbind:EPERM Operation not permitted
366 */
367 static int
368 tcp_usr_listen(struct socket *so, struct proc *p)
369 {
370 int error = 0;
371 struct inpcb *inp = sotoinpcb(so);
372 struct tcpcb *tp;
373
374 COMMON_START();
375 if (inp->inp_lport == 0) {
376 error = in_pcbbind(inp, NULL, p);
377 }
378 if (error == 0) {
379 tp->t_state = TCPS_LISTEN;
380 }
381 TCP_LOG_LISTEN(tp, error);
382 COMMON_END(PRU_LISTEN);
383 }
384
385 static int
386 tcp6_usr_listen(struct socket *so, struct proc *p)
387 {
388 int error = 0;
389 struct inpcb *inp = sotoinpcb(so);
390 struct tcpcb *tp;
391
392 COMMON_START();
393 if (inp->inp_lport == 0) {
394 inp->inp_vflag &= ~INP_IPV4;
395 if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
396 inp->inp_vflag |= INP_IPV4;
397 }
398 error = in6_pcbbind(inp, NULL, p);
399 }
400 if (error == 0) {
401 tp->t_state = TCPS_LISTEN;
402 }
403 TCP_LOG_LISTEN(tp, error);
404 COMMON_END(PRU_LISTEN);
405 }
406
407 static int
408 tcp_connect_complete(struct socket *so)
409 {
410 struct tcpcb *tp = sototcpcb(so);
411 struct inpcb *inp = sotoinpcb(so);
412 int error = 0;
413
414 /* TFO delays the tcp_output until later, when the app calls write() */
415 if (so->so_flags1 & SOF1_PRECONNECT_DATA) {
416 if (!necp_socket_is_allowed_to_send_recv(sotoinpcb(so), NULL, 0, NULL, NULL, NULL, NULL)) {
417 TCP_LOG_DROP_NECP(NULL, NULL, tp, true);
418 return EHOSTUNREACH;
419 }
420
421 /* Initialize enough state so that we can actually send data */
422 tcp_mss(tp, -1, IFSCOPE_NONE);
423 tp->snd_wnd = tp->t_maxseg;
424 tp->max_sndwnd = tp->snd_wnd;
425 } else {
426 error = tcp_output(tp);
427 }
428
429 #if NECP
430 /* Update NECP client with connected five-tuple */
431 if (error == 0 && !uuid_is_null(inp->necp_client_uuid)) {
432 socket_unlock(so, 0);
433 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
434 socket_lock(so, 0);
435 }
436 #endif /* NECP */
437
438 return error;
439 }
440
441 /*
442 * Initiate connection to peer.
443 * Create a template for use in transmissions on this connection.
444 * Enter SYN_SENT state, and mark socket as connecting.
445 * Start keep-alive timer, and seed output sequence space.
446 * Send initial segment on connection.
447 */
448 static int
449 tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
450 {
451 int error = 0;
452 struct inpcb *inp = sotoinpcb(so);
453 struct tcpcb *tp;
454 struct sockaddr_in *sinp;
455
456 TCPDEBUG0;
457 if (inp == NULL) {
458 return EINVAL;
459 } else if (inp->inp_state == INPCB_STATE_DEAD) {
460 if (so->so_error) {
461 error = so->so_error;
462 so->so_error = 0;
463 return error;
464 } else {
465 return EINVAL;
466 }
467 }
468 #if NECP
469 #if CONTENT_FILTER
470 error = cfil_sock_attach(so, NULL, nam, CFS_CONNECTION_DIR_OUT);
471 if (error != 0) {
472 return error;
473 }
474 #endif /* CONTENT_FILTER */
475 #if FLOW_DIVERT
476 if (necp_socket_should_use_flow_divert(inp)) {
477 error = flow_divert_pcb_init(so);
478 if (error == 0) {
479 error = flow_divert_connect_out(so, nam, p);
480 }
481 return error;
482 }
483 #endif /* FLOW_DIVERT */
484 #endif /* NECP */
485 tp = intotcpcb(inp);
486 TCPDEBUG1();
487
488 calculate_tcp_clock();
489
490 if (nam->sa_family != 0 && nam->sa_family != AF_INET) {
491 error = EAFNOSUPPORT;
492 goto out;
493 }
494 /*
495 * Must disallow TCP ``connections'' to multicast addresses.
496 */
497 sinp = (struct sockaddr_in *)(void *)nam;
498 if (sinp->sin_family == AF_INET
499 && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
500 error = EAFNOSUPPORT;
501 goto out;
502 }
503
504 if ((error = tcp_connect(tp, nam, p)) != 0) {
505 TCP_LOG_CONNECT(tp, true, error);
506 goto out;
507 }
508
509 error = tcp_connect_complete(so);
510
511 TCP_LOG_CONNECT(tp, true, error);
512
513 COMMON_END(PRU_CONNECT);
514 }
515
516 static int
517 tcp_usr_connectx_common(struct socket *so, int af,
518 struct sockaddr *src, struct sockaddr *dst,
519 struct proc *p, uint32_t ifscope, sae_associd_t aid, sae_connid_t *pcid,
520 uint32_t flags, void *arg, uint32_t arglen, struct uio *auio,
521 user_ssize_t *bytes_written)
522 {
523 #pragma unused(aid, flags, arg, arglen)
524 struct inpcb *inp = sotoinpcb(so);
525 int error = 0;
526 user_ssize_t datalen = 0;
527
528 if (inp == NULL) {
529 return EINVAL;
530 }
531
532 VERIFY(dst != NULL);
533
534 ASSERT(!(inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS));
535 inp->inp_flags2 |= INP2_CONNECT_IN_PROGRESS;
536
537 #if NECP
538 inp_update_necp_policy(inp, src, dst, ifscope);
539 #endif /* NECP */
540
541 if ((so->so_flags1 & SOF1_DATA_IDEMPOTENT) &&
542 (tcp_fastopen & TCP_FASTOPEN_CLIENT)) {
543 sototcpcb(so)->t_flagsext |= TF_FASTOPEN;
544 }
545
546 /* bind socket to the specified interface, if requested */
547 if (ifscope != IFSCOPE_NONE &&
548 (error = inp_bindif(inp, ifscope, NULL)) != 0) {
549 goto done;
550 }
551
552 /* if source address and/or port is specified, bind to it */
553 if (src != NULL) {
554 error = sobindlock(so, src, 0); /* already locked */
555 if (error != 0) {
556 goto done;
557 }
558 }
559
560 switch (af) {
561 case AF_INET:
562 error = tcp_usr_connect(so, dst, p);
563 break;
564 case AF_INET6:
565 error = tcp6_usr_connect(so, dst, p);
566 break;
567 default:
568 VERIFY(0);
569 /* NOTREACHED */
570 }
571
572 if (error != 0) {
573 goto done;
574 }
575
576 /* if there is data, copy it */
577 if (auio != NULL) {
578 socket_unlock(so, 0);
579
580 VERIFY(bytes_written != NULL);
581
582 datalen = uio_resid(auio);
583 error = so->so_proto->pr_usrreqs->pru_sosend(so, NULL,
584 (uio_t)auio, NULL, NULL, 0);
585 socket_lock(so, 0);
586
587 if (error == 0 || error == EWOULDBLOCK) {
588 *bytes_written = datalen - uio_resid(auio);
589 }
590
591 /*
592 * sosend returns EWOULDBLOCK if it's a non-blocking
593 * socket or a timeout occured (this allows to return
594 * the amount of queued data through sendit()).
595 *
596 * However, connectx() returns EINPROGRESS in case of a
597 * blocking socket. So we change the return value here.
598 */
599 if (error == EWOULDBLOCK) {
600 error = EINPROGRESS;
601 }
602 }
603
604 if (error == 0 && pcid != NULL) {
605 *pcid = 1; /* there is only one connection in regular TCP */
606 }
607 done:
608 if (error && error != EINPROGRESS) {
609 so->so_flags1 &= ~SOF1_PRECONNECT_DATA;
610 }
611
612 inp->inp_flags2 &= ~INP2_CONNECT_IN_PROGRESS;
613 return error;
614 }
615
616 static int
617 tcp_usr_connectx(struct socket *so, struct sockaddr *src,
618 struct sockaddr *dst, struct proc *p, uint32_t ifscope,
619 sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
620 uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written)
621 {
622 return tcp_usr_connectx_common(so, AF_INET, src, dst, p, ifscope, aid,
623 pcid, flags, arg, arglen, uio, bytes_written);
624 }
625
626 static int
627 tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
628 {
629 int error = 0;
630 struct inpcb *inp = sotoinpcb(so);
631 struct tcpcb *tp;
632 struct sockaddr_in6 *sin6p;
633
634 TCPDEBUG0;
635 if (inp == NULL) {
636 return EINVAL;
637 } else if (inp->inp_state == INPCB_STATE_DEAD) {
638 if (so->so_error) {
639 error = so->so_error;
640 so->so_error = 0;
641 return error;
642 } else {
643 return EINVAL;
644 }
645 }
646 #if NECP
647 #if CONTENT_FILTER
648 error = cfil_sock_attach(so, NULL, nam, CFS_CONNECTION_DIR_OUT);
649 if (error != 0) {
650 return error;
651 }
652 #endif /* CONTENT_FILTER */
653 #if FLOW_DIVERT
654 if (necp_socket_should_use_flow_divert(inp)) {
655 error = flow_divert_pcb_init(so);
656 if (error == 0) {
657 error = flow_divert_connect_out(so, nam, p);
658 }
659 return error;
660 }
661 #endif /* FLOW_DIVERT */
662 #endif /* NECP */
663
664 tp = intotcpcb(inp);
665 TCPDEBUG1();
666
667 calculate_tcp_clock();
668
669 if (nam->sa_family != 0 && nam->sa_family != AF_INET6) {
670 error = EAFNOSUPPORT;
671 goto out;
672 }
673
674 /*
675 * Must disallow TCP ``connections'' to multicast addresses.
676 */
677 sin6p = (struct sockaddr_in6 *)(void *)nam;
678 if (sin6p->sin6_family == AF_INET6
679 && IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) {
680 error = EAFNOSUPPORT;
681 goto out;
682 }
683
684 if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) {
685 struct sockaddr_in sin;
686
687 if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) {
688 return EINVAL;
689 }
690
691 in6_sin6_2_sin(&sin, sin6p);
692 inp->inp_vflag |= INP_IPV4;
693 inp->inp_vflag &= ~INP_IPV6;
694 if ((error = tcp_connect(tp, (struct sockaddr *)&sin, p)) != 0) {
695 TCP_LOG_CONNECT(tp, true, error);
696 goto out;
697 }
698
699 error = tcp_connect_complete(so);
700 goto out;
701 }
702 inp->inp_vflag &= ~INP_IPV4;
703 inp->inp_vflag |= INP_IPV6;
704 if ((error = tcp6_connect(tp, nam, p)) != 0) {
705 TCP_LOG_CONNECT(tp, true, error);
706 goto out;
707 }
708
709 error = tcp_connect_complete(so);
710
711 TCP_LOG_CONNECT(tp, true, error);
712
713 COMMON_END(PRU_CONNECT);
714 }
715
716 static int
717 tcp6_usr_connectx(struct socket *so, struct sockaddr*src,
718 struct sockaddr *dst, struct proc *p, uint32_t ifscope,
719 sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
720 uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written)
721 {
722 return tcp_usr_connectx_common(so, AF_INET6, src, dst, p, ifscope, aid,
723 pcid, flags, arg, arglen, uio, bytes_written);
724 }
725
726 /*
727 * Initiate disconnect from peer.
728 * If connection never passed embryonic stage, just drop;
729 * else if don't need to let data drain, then can just drop anyways,
730 * else have to begin TCP shutdown process: mark socket disconnecting,
731 * drain unread data, state switch to reflect user close, and
732 * send segment (e.g. FIN) to peer. Socket will be really disconnected
733 * when peer sends FIN and acks ours.
734 *
735 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
736 */
737 static int
738 tcp_usr_disconnect(struct socket *so)
739 {
740 int error = 0;
741 struct inpcb *inp = sotoinpcb(so);
742 struct tcpcb *tp;
743
744 socket_lock_assert_owned(so);
745 COMMON_START();
746 /* In case we got disconnected from the peer */
747 if (tp == NULL) {
748 goto out;
749 }
750 tp = tcp_disconnect(tp);
751 COMMON_END(PRU_DISCONNECT);
752 }
753
754 /*
755 * User-protocol pru_disconnectx callback.
756 */
757 static int
758 tcp_usr_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid)
759 {
760 #pragma unused(cid)
761 if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) {
762 return EINVAL;
763 }
764
765 return tcp_usr_disconnect(so);
766 }
767
768 /*
769 * Accept a connection. Essentially all the work is
770 * done at higher levels; just return the address
771 * of the peer, storing through addr.
772 */
773 static int
774 tcp_usr_accept(struct socket *so, struct sockaddr **nam)
775 {
776 int error = 0;
777 struct inpcb *inp = sotoinpcb(so);
778 struct tcpcb *tp = NULL;
779 TCPDEBUG0;
780
781 in_getpeeraddr(so, nam);
782
783 if (so->so_state & SS_ISDISCONNECTED) {
784 error = ECONNABORTED;
785 goto out;
786 }
787 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) {
788 return EINVAL;
789 }
790 #if NECP
791 else if (necp_socket_should_use_flow_divert(inp)) {
792 return EPROTOTYPE;
793 }
794
795 #endif /* NECP */
796
797 tp = intotcpcb(inp);
798 TCPDEBUG1();
799
800 TCP_LOG_ACCEPT(tp, 0);
801
802 calculate_tcp_clock();
803
804 COMMON_END(PRU_ACCEPT);
805 }
806
807 static int
808 tcp6_usr_accept(struct socket *so, struct sockaddr **nam)
809 {
810 int error = 0;
811 struct inpcb *inp = sotoinpcb(so);
812 struct tcpcb *tp = NULL;
813 TCPDEBUG0;
814
815 if (so->so_state & SS_ISDISCONNECTED) {
816 error = ECONNABORTED;
817 goto out;
818 }
819 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) {
820 return EINVAL;
821 }
822 #if NECP
823 else if (necp_socket_should_use_flow_divert(inp)) {
824 return EPROTOTYPE;
825 }
826
827 #endif /* NECP */
828
829 tp = intotcpcb(inp);
830 TCPDEBUG1();
831
832 TCP_LOG_ACCEPT(tp, 0);
833
834 calculate_tcp_clock();
835
836 in6_mapped_peeraddr(so, nam);
837 COMMON_END(PRU_ACCEPT);
838 }
839
840 /*
841 * Mark the connection as being incapable of further output.
842 *
843 * Returns: 0 Success
844 * EINVAL [COMMON_START]
845 * tcp_output:EADDRNOTAVAIL
846 * tcp_output:ENOBUFS
847 * tcp_output:EMSGSIZE
848 * tcp_output:EHOSTUNREACH
849 * tcp_output:ENETUNREACH
850 * tcp_output:ENETDOWN
851 * tcp_output:ENOMEM
852 * tcp_output:EACCES
853 * tcp_output:EMSGSIZE
854 * tcp_output:ENOBUFS
855 * tcp_output:??? [ignorable: mostly IPSEC/firewall/DLIL]
856 */
857 static int
858 tcp_usr_shutdown(struct socket *so)
859 {
860 int error = 0;
861 struct inpcb *inp = sotoinpcb(so);
862 struct tcpcb *tp;
863
864 TCPDEBUG0;
865 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) {
866 return EINVAL;
867 }
868
869 socantsendmore(so);
870
871 /*
872 * In case we got disconnected from the peer, or if this is
873 * a socket that is to be flow-diverted (but not yet).
874 */
875 tp = intotcpcb(inp);
876 TCPDEBUG1();
877
878 if (tp == NULL
879 #if NECP
880 || (necp_socket_should_use_flow_divert(inp))
881 #endif /* NECP */
882 ) {
883 if (tp != NULL) {
884 error = EPROTOTYPE;
885 }
886 goto out;
887 }
888
889 calculate_tcp_clock();
890
891 tp = tcp_usrclosed(tp);
892 #if MPTCP
893 /* A reset has been sent but socket exists, do not send FIN */
894 if ((so->so_flags & SOF_MP_SUBFLOW) &&
895 (tp) && (tp->t_mpflags & TMPF_RESET)) {
896 goto out;
897 }
898 #endif
899 #if CONTENT_FILTER
900 /* Don't send a FIN yet */
901 if (tp && !(so->so_state & SS_ISDISCONNECTED) &&
902 cfil_sock_data_pending(&so->so_snd)) {
903 goto out;
904 }
905 #endif /* CONTENT_FILTER */
906 if (tp) {
907 error = tcp_output(tp);
908 }
909 COMMON_END(PRU_SHUTDOWN);
910 }
911
912 /*
913 * After a receive, possibly send window update to peer.
914 */
915 static int
916 tcp_usr_rcvd(struct socket *so, int flags)
917 {
918 int error = 0;
919 struct inpcb *inp = sotoinpcb(so);
920 struct tcpcb *tp;
921
922 COMMON_START();
923 /* In case we got disconnected from the peer */
924 if (tp == NULL) {
925 goto out;
926 }
927 tcp_sbrcv_trim(tp, &so->so_rcv);
928
929 if (flags & MSG_WAITALL) {
930 tp->t_flags |= TF_ACKNOW;
931 }
932
933 /*
934 * This tcp_output is solely there to trigger window-updates.
935 * However, we really do not want these window-updates while we
936 * are still in SYN_SENT or SYN_RECEIVED.
937 */
938 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
939 tcp_output(tp);
940 }
941
942 #if CONTENT_FILTER
943 cfil_sock_buf_update(&so->so_rcv);
944 #endif /* CONTENT_FILTER */
945
946 COMMON_END(PRU_RCVD);
947 }
948
949 /*
950 * Do a send by putting data in output queue and updating urgent
951 * marker if URG set. Possibly send more data. Unlike the other
952 * pru_*() routines, the mbuf chains are our responsibility. We
953 * must either enqueue them or free them. The other pru_* routines
954 * generally are caller-frees.
955 *
956 * Returns: 0 Success
957 * ECONNRESET
958 * EINVAL
959 * ENOBUFS
960 * tcp_connect:EADDRINUSE Address in use
961 * tcp_connect:EADDRNOTAVAIL Address not available.
962 * tcp_connect:EINVAL Invalid argument
963 * tcp_connect:EAFNOSUPPORT Address family not supported [notdef]
964 * tcp_connect:EACCES Permission denied
965 * tcp_connect:EAGAIN Resource unavailable, try again
966 * tcp_connect:EPERM Operation not permitted
967 * tcp_output:EADDRNOTAVAIL
968 * tcp_output:ENOBUFS
969 * tcp_output:EMSGSIZE
970 * tcp_output:EHOSTUNREACH
971 * tcp_output:ENETUNREACH
972 * tcp_output:ENETDOWN
973 * tcp_output:ENOMEM
974 * tcp_output:EACCES
975 * tcp_output:EMSGSIZE
976 * tcp_output:ENOBUFS
977 * tcp_output:??? [ignorable: mostly IPSEC/firewall/DLIL]
978 * tcp6_connect:??? [IPV6 only]
979 */
980 static int
981 tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
982 struct sockaddr *nam, struct mbuf *control, struct proc *p)
983 {
984 int error = 0;
985 struct inpcb *inp = sotoinpcb(so);
986 struct tcpcb *tp;
987 uint32_t mpkl_len = 0; /* length of mbuf chain */
988 uint32_t mpkl_seq; /* sequence number where new data is added */
989 struct so_mpkl_send_info mpkl_send_info = {};
990
991 int isipv6;
992 TCPDEBUG0;
993
994 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD
995 #if NECP
996 || (necp_socket_should_use_flow_divert(inp))
997 #endif /* NECP */
998 ) {
999 /*
1000 * OOPS! we lost a race, the TCP session got reset after
1001 * we checked SS_CANTSENDMORE, eg: while doing uiomove or a
1002 * network interrupt in the non-splnet() section of sosend().
1003 */
1004 if (m != NULL) {
1005 m_freem(m);
1006 }
1007 if (control != NULL) {
1008 m_freem(control);
1009 control = NULL;
1010 }
1011
1012 if (inp == NULL) {
1013 error = ECONNRESET; /* XXX EPIPE? */
1014 } else {
1015 error = EPROTOTYPE;
1016 }
1017 tp = NULL;
1018 TCPDEBUG1();
1019 goto out;
1020 }
1021 isipv6 = nam && nam->sa_family == AF_INET6;
1022 tp = intotcpcb(inp);
1023 TCPDEBUG1();
1024
1025 calculate_tcp_clock();
1026
1027 if (net_mpklog_enabled) {
1028 mpkl_seq = tp->snd_una + so->so_snd.sb_cc;
1029 if (m) {
1030 mpkl_len = m_length(m);
1031 }
1032 if (so->so_flags1 & SOF1_MPKL_SEND_INFO) {
1033 uuid_copy(mpkl_send_info.mpkl_uuid, so->so_mpkl_send_uuid);
1034 mpkl_send_info.mpkl_proto = so->so_mpkl_send_proto;
1035 }
1036 }
1037
1038 if (control != NULL) {
1039 if (control->m_len > 0 && net_mpklog_enabled) {
1040 error = tcp_get_mpkl_send_info(control, &mpkl_send_info);
1041 /*
1042 * Intepretation of the returned code:
1043 * 0: client wants us to use value passed in SCM_MPKL_SEND_INFO
1044 * 1: SCM_MPKL_SEND_INFO was not present
1045 * other: failure
1046 */
1047 if (error != 0 && error != ENOMSG) {
1048 m_freem(control);
1049 if (m != NULL) {
1050 m_freem(m);
1051 }
1052 control = NULL;
1053 m = NULL;
1054 goto out;
1055 }
1056 }
1057 /*
1058 * Silently drop unsupported ancillary data messages
1059 */
1060 m_freem(control);
1061 control = NULL;
1062 }
1063
1064 /* MPTCP sublow socket buffers must not be compressed */
1065 VERIFY(!(so->so_flags & SOF_MP_SUBFLOW) ||
1066 (so->so_snd.sb_flags & SB_NOCOMPRESS));
1067
1068 if (!(flags & PRUS_OOB) || (so->so_flags1 & SOF1_PRECONNECT_DATA)) {
1069 sbappendstream(&so->so_snd, m);
1070
1071 if (nam && tp->t_state < TCPS_SYN_SENT) {
1072 /*
1073 * Do implied connect if not yet connected,
1074 * initialize window to default value, and
1075 * initialize maxseg/maxopd using peer's cached
1076 * MSS.
1077 */
1078 if (isipv6) {
1079 error = tcp6_connect(tp, nam, p);
1080 } else {
1081 error = tcp_connect(tp, nam, p);
1082 }
1083 if (error) {
1084 TCP_LOG_CONNECT(tp, true, error);
1085 goto out;
1086 }
1087 tp->snd_wnd = TTCP_CLIENT_SND_WND;
1088 tp->max_sndwnd = tp->snd_wnd;
1089 tcp_mss(tp, -1, IFSCOPE_NONE);
1090
1091 TCP_LOG_CONNECT(tp, true, error);
1092
1093 /* The sequence number of the data is past the SYN */
1094 mpkl_seq = tp->iss + 1;
1095 }
1096
1097 if (flags & PRUS_EOF) {
1098 /*
1099 * Close the send side of the connection after
1100 * the data is sent.
1101 */
1102 socantsendmore(so);
1103 tp = tcp_usrclosed(tp);
1104 }
1105 if (tp != NULL) {
1106 if (flags & PRUS_MORETOCOME) {
1107 tp->t_flags |= TF_MORETOCOME;
1108 }
1109 error = tcp_output(tp);
1110 if (flags & PRUS_MORETOCOME) {
1111 tp->t_flags &= ~TF_MORETOCOME;
1112 }
1113 }
1114 } else {
1115 if (sbspace(&so->so_snd) == 0) {
1116 /* if no space is left in sockbuf,
1117 * do not try to squeeze in OOB traffic */
1118 m_freem(m);
1119 error = ENOBUFS;
1120 goto out;
1121 }
1122 /*
1123 * According to RFC961 (Assigned Protocols),
1124 * the urgent pointer points to the last octet
1125 * of urgent data. We continue, however,
1126 * to consider it to indicate the first octet
1127 * of data past the urgent section.
1128 * Otherwise, snd_up should be one lower.
1129 */
1130 sbappendstream(&so->so_snd, m);
1131 if (nam && tp->t_state < TCPS_SYN_SENT) {
1132 /*
1133 * Do implied connect if not yet connected,
1134 * initialize window to default value, and
1135 * initialize maxseg/maxopd using peer's cached
1136 * MSS.
1137 */
1138 if (isipv6) {
1139 error = tcp6_connect(tp, nam, p);
1140 } else {
1141 error = tcp_connect(tp, nam, p);
1142 }
1143 if (error) {
1144 TCP_LOG_CONNECT(tp, true, error);
1145 goto out;
1146 }
1147 tp->snd_wnd = TTCP_CLIENT_SND_WND;
1148 tp->max_sndwnd = tp->snd_wnd;
1149 tcp_mss(tp, -1, IFSCOPE_NONE);
1150
1151 TCP_LOG_CONNECT(tp, true, error);
1152 }
1153 tp->snd_up = tp->snd_una + so->so_snd.sb_cc;
1154 tp->t_flagsext |= TF_FORCE;
1155 error = tcp_output(tp);
1156 tp->t_flagsext &= ~TF_FORCE;
1157 }
1158
1159 if (net_mpklog_enabled && (inp = tp->t_inpcb) != NULL &&
1160 ((inp->inp_last_outifp != NULL &&
1161 (inp->inp_last_outifp->if_xflags & IFXF_MPK_LOG)) ||
1162 (inp->inp_boundifp != NULL &&
1163 (inp->inp_boundifp->if_xflags & IFXF_MPK_LOG)))) {
1164 MPKL_TCP_SEND(tcp_mpkl_log_object,
1165 mpkl_send_info.mpkl_proto, mpkl_send_info.mpkl_uuid,
1166 ntohs(inp->inp_lport), ntohs(inp->inp_fport),
1167 mpkl_seq, mpkl_len,
1168 so->last_pid, so->so_log_seqn++);
1169 }
1170
1171 /*
1172 * We wait for the socket to successfully connect before returning.
1173 * This allows us to signal a timeout to the application.
1174 */
1175 if (so->so_state & SS_ISCONNECTING) {
1176 if (so->so_state & SS_NBIO) {
1177 error = EWOULDBLOCK;
1178 } else {
1179 error = sbwait(&so->so_snd);
1180 }
1181 }
1182
1183 COMMON_END((flags & PRUS_OOB) ? PRU_SENDOOB :
1184 ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
1185 }
1186
1187 /*
1188 * Abort the TCP.
1189 */
1190 static int
1191 tcp_usr_abort(struct socket *so)
1192 {
1193 int error = 0;
1194 struct inpcb *inp = sotoinpcb(so);
1195 struct tcpcb *tp;
1196
1197 COMMON_START();
1198 /* In case we got disconnected from the peer */
1199 if (tp == NULL) {
1200 goto out;
1201 }
1202 tp = tcp_drop(tp, ECONNABORTED);
1203 VERIFY(so->so_usecount > 0);
1204 so->so_usecount--;
1205 COMMON_END(PRU_ABORT);
1206 }
1207
1208 /*
1209 * Receive out-of-band data.
1210 *
1211 * Returns: 0 Success
1212 * EINVAL [COMMON_START]
1213 * EINVAL
1214 * EWOULDBLOCK
1215 */
1216 static int
1217 tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags)
1218 {
1219 int error = 0;
1220 struct inpcb *inp = sotoinpcb(so);
1221 struct tcpcb *tp;
1222
1223 COMMON_START();
1224 if ((so->so_oobmark == 0 &&
1225 (so->so_state & SS_RCVATMARK) == 0) ||
1226 so->so_options & SO_OOBINLINE ||
1227 tp->t_oobflags & TCPOOB_HADDATA) {
1228 error = EINVAL;
1229 goto out;
1230 }
1231 if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
1232 error = EWOULDBLOCK;
1233 goto out;
1234 }
1235 m->m_len = 1;
1236 *mtod(m, caddr_t) = tp->t_iobc;
1237 so->so_state &= ~SS_RCVATMARK;
1238 if ((flags & MSG_PEEK) == 0) {
1239 tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
1240 }
1241 COMMON_END(PRU_RCVOOB);
1242 }
1243
1244 static int
1245 tcp_usr_preconnect(struct socket *so)
1246 {
1247 struct inpcb *inp = sotoinpcb(so);
1248 int error = 0;
1249
1250 #if NECP
1251 if (necp_socket_should_use_flow_divert(inp)) {
1252 /* May happen, if in tcp_usr_connect we did not had a chance
1253 * to set the usrreqs (due to some error). So, let's get out
1254 * of here.
1255 */
1256 goto out;
1257 }
1258 #endif /* NECP */
1259
1260 error = tcp_output(sototcpcb(so));
1261
1262 soclearfastopen(so);
1263
1264 COMMON_END(PRU_PRECONNECT);
1265 }
1266
1267 /* xxx - should be const */
1268 struct pr_usrreqs tcp_usrreqs = {
1269 .pru_abort = tcp_usr_abort,
1270 .pru_accept = tcp_usr_accept,
1271 .pru_attach = tcp_usr_attach,
1272 .pru_bind = tcp_usr_bind,
1273 .pru_connect = tcp_usr_connect,
1274 .pru_connectx = tcp_usr_connectx,
1275 .pru_control = in_control,
1276 .pru_detach = tcp_usr_detach,
1277 .pru_disconnect = tcp_usr_disconnect,
1278 .pru_disconnectx = tcp_usr_disconnectx,
1279 .pru_listen = tcp_usr_listen,
1280 .pru_peeraddr = in_getpeeraddr,
1281 .pru_rcvd = tcp_usr_rcvd,
1282 .pru_rcvoob = tcp_usr_rcvoob,
1283 .pru_send = tcp_usr_send,
1284 .pru_shutdown = tcp_usr_shutdown,
1285 .pru_sockaddr = in_getsockaddr,
1286 .pru_sosend = sosend,
1287 .pru_soreceive = soreceive,
1288 .pru_preconnect = tcp_usr_preconnect,
1289 };
1290
1291 struct pr_usrreqs tcp6_usrreqs = {
1292 .pru_abort = tcp_usr_abort,
1293 .pru_accept = tcp6_usr_accept,
1294 .pru_attach = tcp_usr_attach,
1295 .pru_bind = tcp6_usr_bind,
1296 .pru_connect = tcp6_usr_connect,
1297 .pru_connectx = tcp6_usr_connectx,
1298 .pru_control = in6_control,
1299 .pru_detach = tcp_usr_detach,
1300 .pru_disconnect = tcp_usr_disconnect,
1301 .pru_disconnectx = tcp_usr_disconnectx,
1302 .pru_listen = tcp6_usr_listen,
1303 .pru_peeraddr = in6_mapped_peeraddr,
1304 .pru_rcvd = tcp_usr_rcvd,
1305 .pru_rcvoob = tcp_usr_rcvoob,
1306 .pru_send = tcp_usr_send,
1307 .pru_shutdown = tcp_usr_shutdown,
1308 .pru_sockaddr = in6_mapped_sockaddr,
1309 .pru_sosend = sosend,
1310 .pru_soreceive = soreceive,
1311 .pru_preconnect = tcp_usr_preconnect,
1312 };
1313
1314 /*
1315 * Common subroutine to open a TCP connection to remote host specified
1316 * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
1317 * port number if needed. Call in_pcbladdr to do the routing and to choose
1318 * a local host address (interface). If there is an existing incarnation
1319 * of the same connection in TIME-WAIT state and if the remote host was
1320 * sending CC options and if the connection duration was < MSL, then
1321 * truncate the previous TIME-WAIT state and proceed.
1322 * Initialize connection parameters and enter SYN-SENT state.
1323 *
1324 * Returns: 0 Success
1325 * EADDRINUSE
1326 * EINVAL
1327 * in_pcbbind:EADDRNOTAVAIL Address not available.
1328 * in_pcbbind:EINVAL Invalid argument
1329 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
1330 * in_pcbbind:EACCES Permission denied
1331 * in_pcbbind:EADDRINUSE Address in use
1332 * in_pcbbind:EAGAIN Resource unavailable, try again
1333 * in_pcbbind:EPERM Operation not permitted
1334 * in_pcbladdr:EINVAL Invalid argument
1335 * in_pcbladdr:EAFNOSUPPORT Address family not supported
1336 * in_pcbladdr:EADDRNOTAVAIL Address not available
1337 */
1338 static int
1339 tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p)
1340 {
1341 struct inpcb *inp = tp->t_inpcb, *oinp;
1342 struct socket *so = inp->inp_socket;
1343 struct tcpcb *otp;
1344 struct sockaddr_in *sin = (struct sockaddr_in *)(void *)nam;
1345 struct in_addr laddr;
1346 int error = 0;
1347 struct ifnet *outif = NULL;
1348
1349 if (inp->inp_lport == 0) {
1350 error = in_pcbbind(inp, NULL, p);
1351 if (error) {
1352 goto done;
1353 }
1354 }
1355
1356 /*
1357 * Cannot simply call in_pcbconnect, because there might be an
1358 * earlier incarnation of this same connection still in
1359 * TIME_WAIT state, creating an ADDRINUSE error.
1360 */
1361 error = in_pcbladdr(inp, nam, &laddr, IFSCOPE_NONE, &outif, 0);
1362 if (error) {
1363 goto done;
1364 }
1365
1366 socket_unlock(inp->inp_socket, 0);
1367 oinp = in_pcblookup_hash(inp->inp_pcbinfo,
1368 sin->sin_addr, sin->sin_port,
1369 inp->inp_laddr.s_addr != INADDR_ANY ? inp->inp_laddr : laddr,
1370 inp->inp_lport, 0, NULL);
1371
1372 socket_lock(inp->inp_socket, 0);
1373 if (oinp) {
1374 if (oinp != inp) { /* 4143933: avoid deadlock if inp == oinp */
1375 socket_lock(oinp->inp_socket, 1);
1376 }
1377 if (in_pcb_checkstate(oinp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1378 if (oinp != inp) {
1379 socket_unlock(oinp->inp_socket, 1);
1380 }
1381 goto skip_oinp;
1382 }
1383
1384 if (oinp != inp && (otp = intotcpcb(oinp)) != NULL &&
1385 otp->t_state == TCPS_TIME_WAIT &&
1386 ((int)(tcp_now - otp->t_starttime)) < tcp_msl &&
1387 (otp->t_flags & TF_RCVD_CC)) {
1388 otp = tcp_close(otp);
1389 } else {
1390 printf("tcp_connect: inp=0x%llx err=EADDRINUSE\n",
1391 (uint64_t)VM_KERNEL_ADDRPERM(inp));
1392 if (oinp != inp) {
1393 socket_unlock(oinp->inp_socket, 1);
1394 }
1395 error = EADDRINUSE;
1396 goto done;
1397 }
1398 if (oinp != inp) {
1399 socket_unlock(oinp->inp_socket, 1);
1400 }
1401 }
1402 skip_oinp:
1403 if ((inp->inp_laddr.s_addr == INADDR_ANY ? laddr.s_addr :
1404 inp->inp_laddr.s_addr) == sin->sin_addr.s_addr &&
1405 inp->inp_lport == sin->sin_port) {
1406 error = EINVAL;
1407 goto done;
1408 }
1409 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1410 /*lock inversion issue, mostly with udp multicast packets */
1411 socket_unlock(inp->inp_socket, 0);
1412 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1413 socket_lock(inp->inp_socket, 0);
1414 }
1415 if (inp->inp_laddr.s_addr == INADDR_ANY) {
1416 inp->inp_laddr = laddr;
1417 /* no reference needed */
1418 inp->inp_last_outifp = outif;
1419
1420 inp->inp_flags |= INP_INADDR_ANY;
1421 }
1422 inp->inp_faddr = sin->sin_addr;
1423 inp->inp_fport = sin->sin_port;
1424 in_pcbrehash(inp);
1425 lck_rw_done(inp->inp_pcbinfo->ipi_lock);
1426
1427 if (inp->inp_flowhash == 0) {
1428 inp->inp_flowhash = inp_calc_flowhash(inp);
1429 }
1430
1431 tcp_set_max_rwinscale(tp, so);
1432
1433 soisconnecting(so);
1434 tcpstat.tcps_connattempt++;
1435 tp->t_state = TCPS_SYN_SENT;
1436 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_CONN_KEEPINIT(tp));
1437 tp->iss = tcp_new_isn(tp);
1438 tcp_sendseqinit(tp);
1439 tp->t_connect_time = tcp_now;
1440 if (nstat_collect) {
1441 nstat_route_connect_attempt(inp->inp_route.ro_rt);
1442 }
1443
1444 tcp_add_fsw_flow(tp, outif);
1445
1446 done:
1447 if (outif != NULL) {
1448 ifnet_release(outif);
1449 }
1450
1451 return error;
1452 }
1453
1454 static int
1455 tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p)
1456 {
1457 struct inpcb *inp = tp->t_inpcb, *oinp;
1458 struct socket *so = inp->inp_socket;
1459 struct tcpcb *otp;
1460 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)(void *)nam;
1461 struct in6_addr addr6;
1462 int error = 0;
1463 struct ifnet *outif = NULL;
1464
1465 if (inp->inp_lport == 0) {
1466 error = in6_pcbbind(inp, NULL, p);
1467 if (error) {
1468 goto done;
1469 }
1470 }
1471
1472 /*
1473 * Cannot simply call in_pcbconnect, because there might be an
1474 * earlier incarnation of this same connection still in
1475 * TIME_WAIT state, creating an ADDRINUSE error.
1476 *
1477 * in6_pcbladdr() might return an ifp with its reference held
1478 * even in the error case, so make sure that it's released
1479 * whenever it's non-NULL.
1480 */
1481 error = in6_pcbladdr(inp, nam, &addr6, &outif);
1482 if (error) {
1483 goto done;
1484 }
1485 socket_unlock(inp->inp_socket, 0);
1486 oinp = in6_pcblookup_hash(inp->inp_pcbinfo,
1487 &sin6->sin6_addr, sin6->sin6_port,
1488 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)
1489 ? &addr6
1490 : &inp->in6p_laddr,
1491 inp->inp_lport, 0, NULL);
1492 socket_lock(inp->inp_socket, 0);
1493 if (oinp) {
1494 if (oinp != inp && (otp = intotcpcb(oinp)) != NULL &&
1495 otp->t_state == TCPS_TIME_WAIT &&
1496 ((int)(tcp_now - otp->t_starttime)) < tcp_msl &&
1497 (otp->t_flags & TF_RCVD_CC)) {
1498 otp = tcp_close(otp);
1499 } else {
1500 error = EADDRINUSE;
1501 goto done;
1502 }
1503 }
1504 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1505 /*lock inversion issue, mostly with udp multicast packets */
1506 socket_unlock(inp->inp_socket, 0);
1507 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1508 socket_lock(inp->inp_socket, 0);
1509 }
1510 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
1511 inp->in6p_laddr = addr6;
1512 inp->in6p_last_outifp = outif; /* no reference needed */
1513 inp->in6p_flags |= INP_IN6ADDR_ANY;
1514 }
1515 inp->in6p_faddr = sin6->sin6_addr;
1516 inp->inp_fport = sin6->sin6_port;
1517 if ((sin6->sin6_flowinfo & IPV6_FLOWINFO_MASK) != 0) {
1518 inp->inp_flow = sin6->sin6_flowinfo;
1519 }
1520 in_pcbrehash(inp);
1521 lck_rw_done(inp->inp_pcbinfo->ipi_lock);
1522
1523 if (inp->inp_flowhash == 0) {
1524 inp->inp_flowhash = inp_calc_flowhash(inp);
1525 }
1526 /* update flowinfo - RFC 6437 */
1527 if (inp->inp_flow == 0 && inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
1528 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
1529 inp->inp_flow |=
1530 (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
1531 }
1532
1533 tcp_set_max_rwinscale(tp, so);
1534
1535 soisconnecting(so);
1536 tcpstat.tcps_connattempt++;
1537 tp->t_state = TCPS_SYN_SENT;
1538 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1539 TCP_CONN_KEEPINIT(tp));
1540 tp->iss = tcp_new_isn(tp);
1541 tcp_sendseqinit(tp);
1542 tp->t_connect_time = tcp_now;
1543 if (nstat_collect) {
1544 nstat_route_connect_attempt(inp->inp_route.ro_rt);
1545 }
1546
1547 tcp_add_fsw_flow(tp, outif);
1548
1549 done:
1550 if (outif != NULL) {
1551 ifnet_release(outif);
1552 }
1553
1554 return error;
1555 }
1556
1557 /*
1558 * Export TCP internal state information via a struct tcp_info
1559 */
1560 void
1561 tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti)
1562 {
1563 struct inpcb *inp = tp->t_inpcb;
1564
1565 bzero(ti, sizeof(*ti));
1566
1567 ti->tcpi_state = (uint8_t)tp->t_state;
1568 ti->tcpi_flowhash = inp->inp_flowhash;
1569
1570 if (tp->t_state > TCPS_LISTEN) {
1571 if (TSTMP_SUPPORTED(tp)) {
1572 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1573 }
1574 if (SACK_ENABLED(tp)) {
1575 ti->tcpi_options |= TCPI_OPT_SACK;
1576 }
1577 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
1578 ti->tcpi_options |= TCPI_OPT_WSCALE;
1579 ti->tcpi_snd_wscale = tp->snd_scale;
1580 ti->tcpi_rcv_wscale = tp->rcv_scale;
1581 }
1582 if (TCP_ECN_ENABLED(tp)) {
1583 ti->tcpi_options |= TCPI_OPT_ECN;
1584 }
1585
1586 /* Are we in retranmission episode */
1587 if (IN_FASTRECOVERY(tp) || tp->t_rxtshift > 0) {
1588 ti->tcpi_flags |= TCPI_FLAG_LOSSRECOVERY;
1589 }
1590
1591 if (tp->t_flags & TF_STREAMING_ON) {
1592 ti->tcpi_flags |= TCPI_FLAG_STREAMING_ON;
1593 }
1594
1595 ti->tcpi_rto = tp->t_timer[TCPT_REXMT] ? tp->t_rxtcur : 0;
1596 ti->tcpi_snd_mss = tp->t_maxseg;
1597 ti->tcpi_rcv_mss = tp->t_maxseg;
1598
1599 ti->tcpi_rttcur = tp->t_rttcur;
1600 ti->tcpi_srtt = tp->t_srtt >> TCP_RTT_SHIFT;
1601 ti->tcpi_rttvar = tp->t_rttvar >> TCP_RTTVAR_SHIFT;
1602 ti->tcpi_rttbest = tp->t_rttbest >> TCP_RTT_SHIFT;
1603
1604 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
1605 ti->tcpi_snd_cwnd = tp->snd_cwnd;
1606 ti->tcpi_snd_sbbytes = inp->inp_socket->so_snd.sb_cc;
1607
1608 ti->tcpi_rcv_space = tp->rcv_wnd;
1609
1610 ti->tcpi_snd_wnd = tp->snd_wnd;
1611 ti->tcpi_snd_nxt = tp->snd_nxt;
1612 ti->tcpi_rcv_nxt = tp->rcv_nxt;
1613
1614 /* convert bytes/msec to bits/sec */
1615 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
1616 tp->t_bwmeas != NULL) {
1617 ti->tcpi_snd_bw = (tp->t_bwmeas->bw_sndbw * 8000);
1618 }
1619
1620 ti->tcpi_last_outif = (tp->t_inpcb->inp_last_outifp == NULL) ? 0 :
1621 tp->t_inpcb->inp_last_outifp->if_index;
1622
1623 //atomic_get_64(ti->tcpi_txbytes, &inp->inp_stat->txbytes);
1624 ti->tcpi_txpackets = inp->inp_stat->txpackets;
1625 ti->tcpi_txbytes = inp->inp_stat->txbytes;
1626 ti->tcpi_txretransmitbytes = tp->t_stat.txretransmitbytes;
1627 ti->tcpi_txretransmitpackets = tp->t_stat.rxmitpkts;
1628 ti->tcpi_txunacked = tp->snd_max - tp->snd_una;
1629
1630 //atomic_get_64(ti->tcpi_rxbytes, &inp->inp_stat->rxbytes);
1631 ti->tcpi_rxpackets = inp->inp_stat->rxpackets;
1632 ti->tcpi_rxbytes = inp->inp_stat->rxbytes;
1633 ti->tcpi_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1634 ti->tcpi_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1635
1636 if (tp->t_state > TCPS_LISTEN) {
1637 ti->tcpi_synrexmits = (uint8_t)tp->t_stat.rxmitsyns;
1638 }
1639 ti->tcpi_cell_rxpackets = inp->inp_cstat->rxpackets;
1640 ti->tcpi_cell_rxbytes = inp->inp_cstat->rxbytes;
1641 ti->tcpi_cell_txpackets = inp->inp_cstat->txpackets;
1642 ti->tcpi_cell_txbytes = inp->inp_cstat->txbytes;
1643
1644 ti->tcpi_wifi_rxpackets = inp->inp_wstat->rxpackets;
1645 ti->tcpi_wifi_rxbytes = inp->inp_wstat->rxbytes;
1646 ti->tcpi_wifi_txpackets = inp->inp_wstat->txpackets;
1647 ti->tcpi_wifi_txbytes = inp->inp_wstat->txbytes;
1648
1649 ti->tcpi_wired_rxpackets = inp->inp_Wstat->rxpackets;
1650 ti->tcpi_wired_rxbytes = inp->inp_Wstat->rxbytes;
1651 ti->tcpi_wired_txpackets = inp->inp_Wstat->txpackets;
1652 ti->tcpi_wired_txbytes = inp->inp_Wstat->txbytes;
1653 tcp_get_connectivity_status(tp, &ti->tcpi_connstatus);
1654
1655 ti->tcpi_tfo_syn_data_rcv = !!(tp->t_tfo_stats & TFO_S_SYNDATA_RCV);
1656 ti->tcpi_tfo_cookie_req_rcv = !!(tp->t_tfo_stats & TFO_S_COOKIEREQ_RECV);
1657 ti->tcpi_tfo_cookie_sent = !!(tp->t_tfo_stats & TFO_S_COOKIE_SENT);
1658 ti->tcpi_tfo_cookie_invalid = !!(tp->t_tfo_stats & TFO_S_COOKIE_INVALID);
1659
1660 ti->tcpi_tfo_cookie_req = !!(tp->t_tfo_stats & TFO_S_COOKIE_REQ);
1661 ti->tcpi_tfo_cookie_rcv = !!(tp->t_tfo_stats & TFO_S_COOKIE_RCV);
1662 ti->tcpi_tfo_syn_data_sent = !!(tp->t_tfo_stats & TFO_S_SYN_DATA_SENT);
1663 ti->tcpi_tfo_syn_data_acked = !!(tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED);
1664 ti->tcpi_tfo_syn_loss = !!(tp->t_tfo_stats & TFO_S_SYN_LOSS);
1665 ti->tcpi_tfo_cookie_wrong = !!(tp->t_tfo_stats & TFO_S_COOKIE_WRONG);
1666 ti->tcpi_tfo_no_cookie_rcv = !!(tp->t_tfo_stats & TFO_S_NO_COOKIE_RCV);
1667 ti->tcpi_tfo_heuristics_disable = !!(tp->t_tfo_stats & TFO_S_HEURISTICS_DISABLE);
1668 ti->tcpi_tfo_send_blackhole = !!(tp->t_tfo_stats & TFO_S_SEND_BLACKHOLE);
1669 ti->tcpi_tfo_recv_blackhole = !!(tp->t_tfo_stats & TFO_S_RECV_BLACKHOLE);
1670 ti->tcpi_tfo_onebyte_proxy = !!(tp->t_tfo_stats & TFO_S_ONE_BYTE_PROXY);
1671
1672 ti->tcpi_ecn_client_setup = !!(tp->ecn_flags & TE_SETUPSENT);
1673 ti->tcpi_ecn_server_setup = !!(tp->ecn_flags & TE_SETUPRECEIVED);
1674 ti->tcpi_ecn_success = (tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON ? 1 : 0;
1675 ti->tcpi_ecn_lost_syn = !!(tp->ecn_flags & TE_LOST_SYN);
1676 ti->tcpi_ecn_lost_synack = !!(tp->ecn_flags & TE_LOST_SYNACK);
1677
1678 ti->tcpi_local_peer = !!(tp->t_flags & TF_LOCAL);
1679
1680 if (tp->t_inpcb->inp_last_outifp != NULL) {
1681 if (IFNET_IS_CELLULAR(tp->t_inpcb->inp_last_outifp)) {
1682 ti->tcpi_if_cell = 1;
1683 }
1684 if (IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
1685 ti->tcpi_if_wifi = 1;
1686 }
1687 if (IFNET_IS_WIRED(tp->t_inpcb->inp_last_outifp)) {
1688 ti->tcpi_if_wired = 1;
1689 }
1690 if (IFNET_IS_WIFI_INFRA(tp->t_inpcb->inp_last_outifp)) {
1691 ti->tcpi_if_wifi_infra = 1;
1692 }
1693 if (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) {
1694 ti->tcpi_if_wifi_awdl = 1;
1695 }
1696 }
1697 if (tp->tcp_cc_index == TCP_CC_ALGO_BACKGROUND_INDEX) {
1698 ti->tcpi_snd_background = 1;
1699 }
1700 if (tcp_recv_bg == 1 ||
1701 IS_TCP_RECV_BG(tp->t_inpcb->inp_socket)) {
1702 ti->tcpi_rcv_background = 1;
1703 }
1704
1705 ti->tcpi_ecn_recv_ce = tp->t_ecn_recv_ce;
1706 ti->tcpi_ecn_recv_cwr = tp->t_ecn_recv_cwr;
1707
1708 ti->tcpi_rcvoopack = tp->t_rcvoopack;
1709 ti->tcpi_pawsdrop = tp->t_pawsdrop;
1710 ti->tcpi_sack_recovery_episode = tp->t_sack_recovery_episode;
1711 ti->tcpi_reordered_pkts = tp->t_reordered_pkts;
1712 ti->tcpi_dsack_sent = tp->t_dsack_sent;
1713 ti->tcpi_dsack_recvd = tp->t_dsack_recvd;
1714 }
1715 }
1716
1717 __private_extern__ errno_t
1718 tcp_fill_info_for_info_tuple(struct info_tuple *itpl, struct tcp_info *ti)
1719 {
1720 struct inpcbinfo *pcbinfo = NULL;
1721 struct inpcb *inp = NULL;
1722 struct socket *so;
1723 struct tcpcb *tp;
1724
1725 if (itpl->itpl_proto == IPPROTO_TCP) {
1726 pcbinfo = &tcbinfo;
1727 } else {
1728 return EINVAL;
1729 }
1730
1731 if (itpl->itpl_local_sa.sa_family == AF_INET &&
1732 itpl->itpl_remote_sa.sa_family == AF_INET) {
1733 inp = in_pcblookup_hash(pcbinfo,
1734 itpl->itpl_remote_sin.sin_addr,
1735 itpl->itpl_remote_sin.sin_port,
1736 itpl->itpl_local_sin.sin_addr,
1737 itpl->itpl_local_sin.sin_port,
1738 0, NULL);
1739 } else if (itpl->itpl_local_sa.sa_family == AF_INET6 &&
1740 itpl->itpl_remote_sa.sa_family == AF_INET6) {
1741 struct in6_addr ina6_local;
1742 struct in6_addr ina6_remote;
1743
1744 ina6_local = itpl->itpl_local_sin6.sin6_addr;
1745 if (IN6_IS_SCOPE_LINKLOCAL(&ina6_local) &&
1746 itpl->itpl_local_sin6.sin6_scope_id) {
1747 ina6_local.s6_addr16[1] = htons((uint16_t)itpl->itpl_local_sin6.sin6_scope_id);
1748 }
1749
1750 ina6_remote = itpl->itpl_remote_sin6.sin6_addr;
1751 if (IN6_IS_SCOPE_LINKLOCAL(&ina6_remote) &&
1752 itpl->itpl_remote_sin6.sin6_scope_id) {
1753 ina6_remote.s6_addr16[1] = htons((uint16_t)itpl->itpl_remote_sin6.sin6_scope_id);
1754 }
1755
1756 inp = in6_pcblookup_hash(pcbinfo,
1757 &ina6_remote,
1758 itpl->itpl_remote_sin6.sin6_port,
1759 &ina6_local,
1760 itpl->itpl_local_sin6.sin6_port,
1761 0, NULL);
1762 } else {
1763 return EINVAL;
1764 }
1765 if (inp == NULL || (so = inp->inp_socket) == NULL) {
1766 return ENOENT;
1767 }
1768
1769 socket_lock(so, 0);
1770 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1771 socket_unlock(so, 0);
1772 return ENOENT;
1773 }
1774 tp = intotcpcb(inp);
1775
1776 tcp_fill_info(tp, ti);
1777 socket_unlock(so, 0);
1778
1779 return 0;
1780 }
1781
1782 static void
1783 tcp_connection_fill_info(struct tcpcb *tp, struct tcp_connection_info *tci)
1784 {
1785 struct inpcb *inp = tp->t_inpcb;
1786
1787 bzero(tci, sizeof(*tci));
1788 tci->tcpi_state = (uint8_t)tp->t_state;
1789 if (tp->t_state > TCPS_LISTEN) {
1790 if (TSTMP_SUPPORTED(tp)) {
1791 tci->tcpi_options |= TCPCI_OPT_TIMESTAMPS;
1792 }
1793 if (SACK_ENABLED(tp)) {
1794 tci->tcpi_options |= TCPCI_OPT_SACK;
1795 }
1796 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
1797 tci->tcpi_options |= TCPCI_OPT_WSCALE;
1798 tci->tcpi_snd_wscale = tp->snd_scale;
1799 tci->tcpi_rcv_wscale = tp->rcv_scale;
1800 }
1801 if (TCP_ECN_ENABLED(tp)) {
1802 tci->tcpi_options |= TCPCI_OPT_ECN;
1803 }
1804 if (IN_FASTRECOVERY(tp) || tp->t_rxtshift > 0) {
1805 tci->tcpi_flags |= TCPCI_FLAG_LOSSRECOVERY;
1806 }
1807 if (tp->t_flagsext & TF_PKTS_REORDERED) {
1808 tci->tcpi_flags |= TCPCI_FLAG_REORDERING_DETECTED;
1809 }
1810 tci->tcpi_rto = (tp->t_timer[TCPT_REXMT] > 0) ?
1811 tp->t_rxtcur : 0;
1812 tci->tcpi_maxseg = tp->t_maxseg;
1813 tci->tcpi_snd_ssthresh = tp->snd_ssthresh;
1814 tci->tcpi_snd_cwnd = tp->snd_cwnd;
1815 tci->tcpi_snd_wnd = tp->snd_wnd;
1816 tci->tcpi_snd_sbbytes = inp->inp_socket->so_snd.sb_cc;
1817 tci->tcpi_rcv_wnd = tp->rcv_wnd;
1818 tci->tcpi_rttcur = tp->t_rttcur;
1819 tci->tcpi_srtt = (tp->t_srtt >> TCP_RTT_SHIFT);
1820 tci->tcpi_rttvar = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1821 tci->tcpi_txpackets = inp->inp_stat->txpackets;
1822 tci->tcpi_txbytes = inp->inp_stat->txbytes;
1823 tci->tcpi_txretransmitbytes = tp->t_stat.txretransmitbytes;
1824 tci->tcpi_txretransmitpackets = tp->t_stat.rxmitpkts;
1825 tci->tcpi_rxpackets = inp->inp_stat->rxpackets;
1826 tci->tcpi_rxbytes = inp->inp_stat->rxbytes;
1827 tci->tcpi_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1828
1829 tci->tcpi_tfo_syn_data_rcv = !!(tp->t_tfo_stats & TFO_S_SYNDATA_RCV);
1830 tci->tcpi_tfo_cookie_req_rcv = !!(tp->t_tfo_stats & TFO_S_COOKIEREQ_RECV);
1831 tci->tcpi_tfo_cookie_sent = !!(tp->t_tfo_stats & TFO_S_COOKIE_SENT);
1832 tci->tcpi_tfo_cookie_invalid = !!(tp->t_tfo_stats & TFO_S_COOKIE_INVALID);
1833 tci->tcpi_tfo_cookie_req = !!(tp->t_tfo_stats & TFO_S_COOKIE_REQ);
1834 tci->tcpi_tfo_cookie_rcv = !!(tp->t_tfo_stats & TFO_S_COOKIE_RCV);
1835 tci->tcpi_tfo_syn_data_sent = !!(tp->t_tfo_stats & TFO_S_SYN_DATA_SENT);
1836 tci->tcpi_tfo_syn_data_acked = !!(tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED);
1837 tci->tcpi_tfo_syn_loss = !!(tp->t_tfo_stats & TFO_S_SYN_LOSS);
1838 tci->tcpi_tfo_cookie_wrong = !!(tp->t_tfo_stats & TFO_S_COOKIE_WRONG);
1839 tci->tcpi_tfo_no_cookie_rcv = !!(tp->t_tfo_stats & TFO_S_NO_COOKIE_RCV);
1840 tci->tcpi_tfo_heuristics_disable = !!(tp->t_tfo_stats & TFO_S_HEURISTICS_DISABLE);
1841 tci->tcpi_tfo_send_blackhole = !!(tp->t_tfo_stats & TFO_S_SEND_BLACKHOLE);
1842 tci->tcpi_tfo_recv_blackhole = !!(tp->t_tfo_stats & TFO_S_RECV_BLACKHOLE);
1843 tci->tcpi_tfo_onebyte_proxy = !!(tp->t_tfo_stats & TFO_S_ONE_BYTE_PROXY);
1844 }
1845 }
1846
1847
1848 __private_extern__ int
1849 tcp_sysctl_info(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1850 {
1851 int error;
1852 struct tcp_info ti = {};
1853 struct info_tuple itpl;
1854
1855 if (req->newptr == USER_ADDR_NULL) {
1856 return EINVAL;
1857 }
1858 if (req->newlen < sizeof(struct info_tuple)) {
1859 return EINVAL;
1860 }
1861 error = SYSCTL_IN(req, &itpl, sizeof(struct info_tuple));
1862 if (error != 0) {
1863 return error;
1864 }
1865 error = tcp_fill_info_for_info_tuple(&itpl, &ti);
1866 if (error != 0) {
1867 return error;
1868 }
1869 error = SYSCTL_OUT(req, &ti, sizeof(struct tcp_info));
1870 if (error != 0) {
1871 return error;
1872 }
1873
1874 return 0;
1875 }
1876
1877 static int
1878 tcp_lookup_peer_pid_locked(struct socket *so, pid_t *out_pid)
1879 {
1880 int error = EHOSTUNREACH;
1881 *out_pid = -1;
1882 if ((so->so_state & SS_ISCONNECTED) == 0) {
1883 return ENOTCONN;
1884 }
1885
1886 struct inpcb *inp = (struct inpcb*)so->so_pcb;
1887 uint16_t lport = inp->inp_lport;
1888 uint16_t fport = inp->inp_fport;
1889 struct inpcb *finp = NULL;
1890 struct in6_addr laddr6, faddr6;
1891 struct in_addr laddr4, faddr4;
1892
1893 if (inp->inp_vflag & INP_IPV6) {
1894 laddr6 = inp->in6p_laddr;
1895 faddr6 = inp->in6p_faddr;
1896 } else if (inp->inp_vflag & INP_IPV4) {
1897 laddr4 = inp->inp_laddr;
1898 faddr4 = inp->inp_faddr;
1899 }
1900
1901 socket_unlock(so, 0);
1902 if (inp->inp_vflag & INP_IPV6) {
1903 finp = in6_pcblookup_hash(&tcbinfo, &laddr6, lport, &faddr6, fport, 0, NULL);
1904 } else if (inp->inp_vflag & INP_IPV4) {
1905 finp = in_pcblookup_hash(&tcbinfo, laddr4, lport, faddr4, fport, 0, NULL);
1906 }
1907
1908 if (finp) {
1909 *out_pid = finp->inp_socket->last_pid;
1910 error = 0;
1911 in_pcb_checkstate(finp, WNT_RELEASE, 0);
1912 }
1913 socket_lock(so, 0);
1914
1915 return error;
1916 }
1917
1918 void
1919 tcp_getconninfo(struct socket *so, struct conninfo_tcp *tcp_ci)
1920 {
1921 (void) tcp_lookup_peer_pid_locked(so, &tcp_ci->tcpci_peer_pid);
1922 tcp_fill_info(sototcpcb(so), &tcp_ci->tcpci_tcp_info);
1923 }
1924
1925 void
1926 tcp_clear_keep_alive_offload(struct socket *so)
1927 {
1928 struct inpcb *inp;
1929 struct ifnet *ifp;
1930
1931 inp = sotoinpcb(so);
1932 if (inp == NULL) {
1933 return;
1934 }
1935
1936 if ((inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD) == 0) {
1937 return;
1938 }
1939
1940 ifp = inp->inp_boundifp != NULL ? inp->inp_boundifp :
1941 inp->inp_last_outifp;
1942 if (ifp == NULL) {
1943 panic("%s: so %p inp %p ifp NULL",
1944 __func__, so, inp);
1945 }
1946
1947 ifnet_lock_exclusive(ifp);
1948
1949 if (ifp->if_tcp_kao_cnt == 0) {
1950 panic("%s: so %p inp %p ifp %p if_tcp_kao_cnt == 0",
1951 __func__, so, inp, ifp);
1952 }
1953 ifp->if_tcp_kao_cnt--;
1954 inp->inp_flags2 &= ~INP2_KEEPALIVE_OFFLOAD;
1955
1956 ifnet_lock_done(ifp);
1957 }
1958
1959 static int
1960 tcp_set_keep_alive_offload(struct socket *so, struct proc *proc)
1961 {
1962 int error = 0;
1963 struct inpcb *inp;
1964 struct ifnet *ifp;
1965
1966 inp = sotoinpcb(so);
1967 if (inp == NULL) {
1968 return ECONNRESET;
1969 }
1970 if ((inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD) != 0) {
1971 return 0;
1972 }
1973
1974 ifp = inp->inp_boundifp != NULL ? inp->inp_boundifp :
1975 inp->inp_last_outifp;
1976 if (ifp == NULL) {
1977 error = ENXIO;
1978 os_log_info(OS_LOG_DEFAULT,
1979 "%s: error %d for proc %s[%u] out ifp is not set\n",
1980 __func__, error,
1981 proc != NULL ? proc->p_comm : "kernel",
1982 proc != NULL ? proc->p_pid : 0);
1983 return ENXIO;
1984 }
1985
1986 error = if_get_tcp_kao_max(ifp);
1987 if (error != 0) {
1988 return error;
1989 }
1990
1991 ifnet_lock_exclusive(ifp);
1992 if (ifp->if_tcp_kao_cnt < ifp->if_tcp_kao_max) {
1993 ifp->if_tcp_kao_cnt++;
1994 inp->inp_flags2 |= INP2_KEEPALIVE_OFFLOAD;
1995 } else {
1996 error = ETOOMANYREFS;
1997 os_log_info(OS_LOG_DEFAULT,
1998 "%s: error %d for proc %s[%u] if_tcp_kao_max %u\n",
1999 __func__, error,
2000 proc != NULL ? proc->p_comm : "kernel",
2001 proc != NULL ? proc->p_pid : 0,
2002 ifp->if_tcp_kao_max);
2003 }
2004 ifnet_lock_done(ifp);
2005
2006 return error;
2007 }
2008
2009 /*
2010 * The new sockopt interface makes it possible for us to block in the
2011 * copyin/out step (if we take a page fault). Taking a page fault at
2012 * splnet() is probably a Bad Thing. (Since sockets and pcbs both now
2013 * use TSM, there probably isn't any need for this function to run at
2014 * splnet() any more. This needs more examination.)
2015 */
2016 int
2017 tcp_ctloutput(struct socket *so, struct sockopt *sopt)
2018 {
2019 int error = 0, opt = 0, optval = 0;
2020 struct inpcb *inp;
2021 struct tcpcb *tp;
2022
2023 inp = sotoinpcb(so);
2024 if (inp == NULL) {
2025 return ECONNRESET;
2026 }
2027 /* Allow <SOL_SOCKET,SO_FLUSH/SO_TRAFFIC_MGT_BACKGROUND> at this level */
2028 if (sopt->sopt_level != IPPROTO_TCP &&
2029 !(sopt->sopt_level == SOL_SOCKET && (sopt->sopt_name == SO_FLUSH ||
2030 sopt->sopt_name == SO_TRAFFIC_MGT_BACKGROUND))) {
2031 if (SOCK_CHECK_DOM(so, PF_INET6)) {
2032 error = ip6_ctloutput(so, sopt);
2033 } else {
2034 error = ip_ctloutput(so, sopt);
2035 }
2036 return error;
2037 }
2038 tp = intotcpcb(inp);
2039 if (tp == NULL) {
2040 return ECONNRESET;
2041 }
2042
2043 calculate_tcp_clock();
2044
2045 switch (sopt->sopt_dir) {
2046 case SOPT_SET:
2047 switch (sopt->sopt_name) {
2048 case TCP_NODELAY:
2049 case TCP_NOOPT:
2050 case TCP_NOPUSH:
2051 error = sooptcopyin(sopt, &optval, sizeof optval,
2052 sizeof optval);
2053 if (error) {
2054 break;
2055 }
2056
2057 switch (sopt->sopt_name) {
2058 case TCP_NODELAY:
2059 opt = TF_NODELAY;
2060 break;
2061 case TCP_NOOPT:
2062 opt = TF_NOOPT;
2063 break;
2064 case TCP_NOPUSH:
2065 opt = TF_NOPUSH;
2066 break;
2067 default:
2068 opt = 0; /* dead code to fool gcc */
2069 break;
2070 }
2071
2072 if (optval) {
2073 tp->t_flags |= opt;
2074 } else {
2075 tp->t_flags &= ~opt;
2076 }
2077 break;
2078 case TCP_RXT_FINDROP:
2079 case TCP_NOTIMEWAIT:
2080 error = sooptcopyin(sopt, &optval, sizeof optval,
2081 sizeof optval);
2082 if (error) {
2083 break;
2084 }
2085 switch (sopt->sopt_name) {
2086 case TCP_RXT_FINDROP:
2087 opt = TF_RXTFINDROP;
2088 break;
2089 case TCP_NOTIMEWAIT:
2090 opt = TF_NOTIMEWAIT;
2091 break;
2092 default:
2093 opt = 0;
2094 break;
2095 }
2096 if (optval) {
2097 tp->t_flagsext |= opt;
2098 } else {
2099 tp->t_flagsext &= ~opt;
2100 }
2101 break;
2102 case TCP_MEASURE_SND_BW:
2103 error = sooptcopyin(sopt, &optval, sizeof optval,
2104 sizeof optval);
2105 if (error) {
2106 break;
2107 }
2108 opt = TF_MEASURESNDBW;
2109 if (optval) {
2110 if (tp->t_bwmeas == NULL) {
2111 tp->t_bwmeas = tcp_bwmeas_alloc(tp);
2112 if (tp->t_bwmeas == NULL) {
2113 error = ENOMEM;
2114 break;
2115 }
2116 }
2117 tp->t_flagsext |= opt;
2118 } else {
2119 tp->t_flagsext &= ~opt;
2120 /* Reset snd bw measurement state */
2121 tp->t_flagsext &= ~(TF_BWMEAS_INPROGRESS);
2122 if (tp->t_bwmeas != NULL) {
2123 tcp_bwmeas_free(tp);
2124 }
2125 }
2126 break;
2127 case TCP_MEASURE_BW_BURST: {
2128 struct tcp_measure_bw_burst in;
2129 uint32_t minpkts, maxpkts;
2130 bzero(&in, sizeof(in));
2131
2132 error = sooptcopyin(sopt, &in, sizeof(in),
2133 sizeof(in));
2134 if (error) {
2135 break;
2136 }
2137 if ((tp->t_flagsext & TF_MEASURESNDBW) == 0 ||
2138 tp->t_bwmeas == NULL) {
2139 error = EINVAL;
2140 break;
2141 }
2142 minpkts = (in.min_burst_size != 0) ? in.min_burst_size :
2143 tp->t_bwmeas->bw_minsizepkts;
2144 maxpkts = (in.max_burst_size != 0) ? in.max_burst_size :
2145 tp->t_bwmeas->bw_maxsizepkts;
2146 if (minpkts > maxpkts) {
2147 error = EINVAL;
2148 break;
2149 }
2150 tp->t_bwmeas->bw_minsizepkts = minpkts;
2151 tp->t_bwmeas->bw_maxsizepkts = maxpkts;
2152 tp->t_bwmeas->bw_minsize = (minpkts * tp->t_maxseg);
2153 tp->t_bwmeas->bw_maxsize = (maxpkts * tp->t_maxseg);
2154 break;
2155 }
2156 case TCP_MAXSEG:
2157 error = sooptcopyin(sopt, &optval, sizeof optval,
2158 sizeof optval);
2159 if (error) {
2160 break;
2161 }
2162
2163 if (optval > 0 && optval <= tp->t_maxseg &&
2164 optval + 40 >= tcp_minmss) {
2165 tp->t_maxseg = optval;
2166 } else {
2167 error = EINVAL;
2168 }
2169 break;
2170
2171 case TCP_KEEPALIVE:
2172 error = sooptcopyin(sopt, &optval, sizeof optval,
2173 sizeof optval);
2174 if (error) {
2175 break;
2176 }
2177 if (optval < 0 || optval > UINT32_MAX / TCP_RETRANSHZ) {
2178 error = EINVAL;
2179 } else {
2180 tp->t_keepidle = optval * TCP_RETRANSHZ;
2181 /* reset the timer to new value */
2182 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2183 TCP_CONN_KEEPIDLE(tp));
2184 tcp_check_timer_state(tp);
2185 }
2186 break;
2187
2188 case TCP_CONNECTIONTIMEOUT:
2189 error = sooptcopyin(sopt, &optval, sizeof optval,
2190 sizeof optval);
2191 if (error) {
2192 break;
2193 }
2194 if (optval < 0 || optval > UINT32_MAX / TCP_RETRANSHZ) {
2195 error = EINVAL;
2196 } else {
2197 tp->t_keepinit = optval * TCP_RETRANSHZ;
2198 if (tp->t_state == TCPS_SYN_RECEIVED ||
2199 tp->t_state == TCPS_SYN_SENT) {
2200 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2201 TCP_CONN_KEEPINIT(tp));
2202 tcp_check_timer_state(tp);
2203 }
2204 }
2205 break;
2206
2207 case TCP_KEEPINTVL:
2208 error = sooptcopyin(sopt, &optval, sizeof(optval),
2209 sizeof(optval));
2210 if (error) {
2211 break;
2212 }
2213 if (optval < 0 || optval > UINT32_MAX / TCP_RETRANSHZ) {
2214 error = EINVAL;
2215 } else {
2216 tp->t_keepintvl = optval * TCP_RETRANSHZ;
2217 if (tp->t_state == TCPS_FIN_WAIT_2 &&
2218 TCP_CONN_MAXIDLE(tp) > 0) {
2219 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
2220 TCP_CONN_MAXIDLE(tp));
2221 tcp_check_timer_state(tp);
2222 }
2223 }
2224 break;
2225
2226 case TCP_KEEPCNT:
2227 error = sooptcopyin(sopt, &optval, sizeof(optval),
2228 sizeof(optval));
2229 if (error) {
2230 break;
2231 }
2232 if (optval < 0 || optval > INT32_MAX) {
2233 error = EINVAL;
2234 } else {
2235 tp->t_keepcnt = optval;
2236 if (tp->t_state == TCPS_FIN_WAIT_2 &&
2237 TCP_CONN_MAXIDLE(tp) > 0) {
2238 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
2239 TCP_CONN_MAXIDLE(tp));
2240 tcp_check_timer_state(tp);
2241 }
2242 }
2243 break;
2244
2245 case TCP_KEEPALIVE_OFFLOAD:
2246 if ((error = priv_check_cred(kauth_cred_get(),
2247 PRIV_NETINET_TCP_KA_OFFLOAD, 0)) != 0) {
2248 break;
2249 }
2250 error = sooptcopyin(sopt, &optval, sizeof(optval),
2251 sizeof(optval));
2252 if (error) {
2253 break;
2254 }
2255 if (optval < 0 || optval > INT32_MAX) {
2256 error = EINVAL;
2257 break;
2258 }
2259 if (optval != 0) {
2260 error = tcp_set_keep_alive_offload(so,
2261 sopt->sopt_p);
2262 } else {
2263 tcp_clear_keep_alive_offload(so);
2264 }
2265 break;
2266
2267 case PERSIST_TIMEOUT:
2268 error = sooptcopyin(sopt, &optval, sizeof optval,
2269 sizeof optval);
2270 if (error) {
2271 break;
2272 }
2273 if (optval < 0) {
2274 error = EINVAL;
2275 } else {
2276 tp->t_persist_timeout = optval * TCP_RETRANSHZ;
2277 }
2278 break;
2279 case TCP_RXT_CONNDROPTIME:
2280 error = sooptcopyin(sopt, &optval, sizeof(optval),
2281 sizeof(optval));
2282 if (error) {
2283 break;
2284 }
2285 if (optval < 0) {
2286 error = EINVAL;
2287 } else {
2288 tp->t_rxt_conndroptime = optval * TCP_RETRANSHZ;
2289 }
2290 break;
2291 case TCP_NOTSENT_LOWAT:
2292 error = sooptcopyin(sopt, &optval, sizeof(optval),
2293 sizeof(optval));
2294 if (error) {
2295 break;
2296 }
2297 if (optval < 0) {
2298 error = EINVAL;
2299 break;
2300 } else {
2301 if (optval == 0) {
2302 so->so_flags &= ~(SOF_NOTSENT_LOWAT);
2303 tp->t_notsent_lowat = 0;
2304 } else {
2305 so->so_flags |= SOF_NOTSENT_LOWAT;
2306 tp->t_notsent_lowat = optval;
2307 }
2308 }
2309 break;
2310 case TCP_ADAPTIVE_READ_TIMEOUT:
2311 error = sooptcopyin(sopt, &optval, sizeof(optval),
2312 sizeof(optval));
2313 if (error) {
2314 break;
2315 }
2316 if (optval < 0 ||
2317 optval > TCP_ADAPTIVE_TIMEOUT_MAX) {
2318 error = EINVAL;
2319 break;
2320 } else if (optval == 0) {
2321 tp->t_adaptive_rtimo = 0;
2322 tcp_keepalive_reset(tp);
2323
2324 if (tp->t_mpsub) {
2325 mptcp_reset_keepalive(tp);
2326 }
2327 } else {
2328 tp->t_adaptive_rtimo = (uint8_t)optval;
2329 }
2330 break;
2331 case TCP_ADAPTIVE_WRITE_TIMEOUT:
2332 error = sooptcopyin(sopt, &optval, sizeof(optval),
2333 sizeof(optval));
2334 if (error) {
2335 break;
2336 }
2337 if (optval < 0 ||
2338 optval > TCP_ADAPTIVE_TIMEOUT_MAX) {
2339 error = EINVAL;
2340 break;
2341 } else {
2342 tp->t_adaptive_wtimo = (uint8_t)optval;
2343 }
2344 break;
2345 case TCP_SENDMOREACKS:
2346 error = sooptcopyin(sopt, &optval, sizeof(optval),
2347 sizeof(optval));
2348 if (error) {
2349 break;
2350 }
2351 if (optval < 0 || optval > 1) {
2352 error = EINVAL;
2353 } else if (optval == 0) {
2354 tp->t_flagsext &= ~(TF_NOSTRETCHACK);
2355 } else {
2356 tp->t_flagsext |= TF_NOSTRETCHACK;
2357 }
2358 break;
2359 case TCP_DISABLE_BLACKHOLE_DETECTION:
2360 error = sooptcopyin(sopt, &optval, sizeof(optval),
2361 sizeof(optval));
2362 if (error) {
2363 break;
2364 }
2365 if (optval < 0 || optval > 1) {
2366 error = EINVAL;
2367 } else if (optval == 0) {
2368 tp->t_flagsext &= ~TF_NOBLACKHOLE_DETECTION;
2369 } else {
2370 tp->t_flagsext |= TF_NOBLACKHOLE_DETECTION;
2371 if ((tp->t_flags & TF_BLACKHOLE) &&
2372 tp->t_pmtud_saved_maxopd > 0) {
2373 tcp_pmtud_revert_segment_size(tp);
2374 }
2375 }
2376 break;
2377 case TCP_FASTOPEN:
2378 if (!(tcp_fastopen & TCP_FASTOPEN_SERVER)) {
2379 error = ENOTSUP;
2380 break;
2381 }
2382
2383 error = sooptcopyin(sopt, &optval, sizeof(optval),
2384 sizeof(optval));
2385 if (error) {
2386 break;
2387 }
2388 if (optval < 0 || optval > 1) {
2389 error = EINVAL;
2390 break;
2391 }
2392 if (tp->t_state != TCPS_LISTEN) {
2393 error = EINVAL;
2394 break;
2395 }
2396 if (optval) {
2397 tp->t_flagsext |= TF_FASTOPEN;
2398 } else {
2399 tcp_disable_tfo(tp);
2400 }
2401 break;
2402 case TCP_FASTOPEN_FORCE_HEURISTICS:
2403
2404 break;
2405 case TCP_FASTOPEN_FORCE_ENABLE:
2406 error = sooptcopyin(sopt, &optval, sizeof(optval),
2407 sizeof(optval));
2408
2409 if (error) {
2410 break;
2411 }
2412 if (optval < 0 || optval > 1) {
2413 error = EINVAL;
2414 break;
2415 }
2416
2417 if (tp->t_state != TCPS_CLOSED) {
2418 error = EINVAL;
2419 break;
2420 }
2421 if (optval) {
2422 tp->t_flagsext |= TF_FASTOPEN_FORCE_ENABLE;
2423 } else {
2424 tp->t_flagsext &= ~TF_FASTOPEN_FORCE_ENABLE;
2425 }
2426
2427 break;
2428 case TCP_ENABLE_ECN:
2429 error = sooptcopyin(sopt, &optval, sizeof optval,
2430 sizeof optval);
2431 if (error) {
2432 break;
2433 }
2434 if (optval) {
2435 tp->ecn_flags |= TE_ECN_MODE_ENABLE;
2436 tp->ecn_flags &= ~TE_ECN_MODE_DISABLE;
2437 } else {
2438 tp->ecn_flags &= ~TE_ECN_MODE_ENABLE;
2439 tp->ecn_flags |= TE_ECN_MODE_DISABLE;
2440 }
2441 break;
2442 case TCP_ECN_MODE:
2443 error = sooptcopyin(sopt, &optval, sizeof optval,
2444 sizeof optval);
2445 if (error) {
2446 break;
2447 }
2448 if (optval == ECN_MODE_DEFAULT) {
2449 tp->ecn_flags &= ~TE_ECN_MODE_ENABLE;
2450 tp->ecn_flags &= ~TE_ECN_MODE_DISABLE;
2451 } else if (optval == ECN_MODE_ENABLE) {
2452 tp->ecn_flags |= TE_ECN_MODE_ENABLE;
2453 tp->ecn_flags &= ~TE_ECN_MODE_DISABLE;
2454 } else if (optval == ECN_MODE_DISABLE) {
2455 tp->ecn_flags &= ~TE_ECN_MODE_ENABLE;
2456 tp->ecn_flags |= TE_ECN_MODE_DISABLE;
2457 } else {
2458 error = EINVAL;
2459 }
2460 break;
2461 case TCP_NOTIFY_ACKNOWLEDGEMENT:
2462 error = sooptcopyin(sopt, &optval,
2463 sizeof(optval), sizeof(optval));
2464 if (error) {
2465 break;
2466 }
2467 if (optval <= 0) {
2468 error = EINVAL;
2469 break;
2470 }
2471 if (tp->t_notify_ack_count >= TCP_MAX_NOTIFY_ACK) {
2472 error = ETOOMANYREFS;
2473 break;
2474 }
2475
2476 /*
2477 * validate that the given marker id is not
2478 * a duplicate to avoid ambiguity
2479 */
2480 if ((error = tcp_notify_ack_id_valid(tp, so,
2481 optval)) != 0) {
2482 break;
2483 }
2484 error = tcp_add_notify_ack_marker(tp, optval);
2485 break;
2486 case SO_FLUSH:
2487 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
2488 sizeof(optval))) != 0) {
2489 break;
2490 }
2491
2492 error = inp_flush(inp, optval);
2493 break;
2494
2495 case SO_TRAFFIC_MGT_BACKGROUND:
2496 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
2497 sizeof(optval))) != 0) {
2498 break;
2499 }
2500
2501 if (optval) {
2502 socket_set_traffic_mgt_flags_locked(so,
2503 TRAFFIC_MGT_SO_BACKGROUND);
2504 } else {
2505 socket_clear_traffic_mgt_flags_locked(so,
2506 TRAFFIC_MGT_SO_BACKGROUND);
2507 }
2508 break;
2509 case TCP_RXT_MINIMUM_TIMEOUT:
2510 error = sooptcopyin(sopt, &optval, sizeof(optval),
2511 sizeof(optval));
2512 if (error) {
2513 break;
2514 }
2515 if (optval < 0) {
2516 error = EINVAL;
2517 break;
2518 }
2519 if (optval == 0) {
2520 tp->t_rxt_minimum_timeout = 0;
2521 } else {
2522 tp->t_rxt_minimum_timeout = min(optval,
2523 TCP_RXT_MINIMUM_TIMEOUT_LIMIT);
2524 /* convert to milliseconds */
2525 tp->t_rxt_minimum_timeout *= TCP_RETRANSHZ;
2526 }
2527 break;
2528 default:
2529 error = ENOPROTOOPT;
2530 break;
2531 }
2532 break;
2533
2534 case SOPT_GET:
2535 switch (sopt->sopt_name) {
2536 case TCP_NODELAY:
2537 optval = tp->t_flags & TF_NODELAY;
2538 break;
2539 case TCP_MAXSEG:
2540 optval = tp->t_maxseg;
2541 break;
2542 case TCP_KEEPALIVE:
2543 if (tp->t_keepidle > 0) {
2544 optval = tp->t_keepidle / TCP_RETRANSHZ;
2545 } else {
2546 optval = tcp_keepidle / TCP_RETRANSHZ;
2547 }
2548 break;
2549 case TCP_KEEPINTVL:
2550 if (tp->t_keepintvl > 0) {
2551 optval = tp->t_keepintvl / TCP_RETRANSHZ;
2552 } else {
2553 optval = tcp_keepintvl / TCP_RETRANSHZ;
2554 }
2555 break;
2556 case TCP_KEEPCNT:
2557 if (tp->t_keepcnt > 0) {
2558 optval = tp->t_keepcnt;
2559 } else {
2560 optval = tcp_keepcnt;
2561 }
2562 break;
2563 case TCP_KEEPALIVE_OFFLOAD:
2564 optval = !!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD);
2565 break;
2566 case TCP_NOOPT:
2567 optval = tp->t_flags & TF_NOOPT;
2568 break;
2569 case TCP_NOPUSH:
2570 optval = tp->t_flags & TF_NOPUSH;
2571 break;
2572 case TCP_ENABLE_ECN:
2573 optval = (tp->ecn_flags & TE_ECN_MODE_ENABLE) ? 1 : 0;
2574 break;
2575 case TCP_ECN_MODE:
2576 if (tp->ecn_flags & TE_ECN_MODE_ENABLE) {
2577 optval = ECN_MODE_ENABLE;
2578 } else if (tp->ecn_flags & TE_ECN_MODE_DISABLE) {
2579 optval = ECN_MODE_DISABLE;
2580 } else {
2581 optval = ECN_MODE_DEFAULT;
2582 }
2583 break;
2584 case TCP_CONNECTIONTIMEOUT:
2585 optval = tp->t_keepinit / TCP_RETRANSHZ;
2586 break;
2587 case PERSIST_TIMEOUT:
2588 optval = tp->t_persist_timeout / TCP_RETRANSHZ;
2589 break;
2590 case TCP_RXT_CONNDROPTIME:
2591 optval = tp->t_rxt_conndroptime / TCP_RETRANSHZ;
2592 break;
2593 case TCP_RXT_FINDROP:
2594 optval = tp->t_flagsext & TF_RXTFINDROP;
2595 break;
2596 case TCP_NOTIMEWAIT:
2597 optval = (tp->t_flagsext & TF_NOTIMEWAIT) ? 1 : 0;
2598 break;
2599 case TCP_FASTOPEN:
2600 if (tp->t_state != TCPS_LISTEN ||
2601 !(tcp_fastopen & TCP_FASTOPEN_SERVER)) {
2602 error = ENOTSUP;
2603 break;
2604 }
2605 optval = tfo_enabled(tp);
2606 break;
2607 case TCP_FASTOPEN_FORCE_HEURISTICS:
2608 optval = 0;
2609 break;
2610 case TCP_FASTOPEN_FORCE_ENABLE:
2611 optval = (tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) ? 1 : 0;
2612 break;
2613 case TCP_MEASURE_SND_BW:
2614 optval = tp->t_flagsext & TF_MEASURESNDBW;
2615 break;
2616 case TCP_INFO: {
2617 struct tcp_info ti;
2618
2619 tcp_fill_info(tp, &ti);
2620 error = sooptcopyout(sopt, &ti, sizeof(struct tcp_info));
2621 goto done;
2622 /* NOT REACHED */
2623 }
2624 case TCP_CONNECTION_INFO: {
2625 struct tcp_connection_info tci;
2626 tcp_connection_fill_info(tp, &tci);
2627 error = sooptcopyout(sopt, &tci,
2628 sizeof(struct tcp_connection_info));
2629 goto done;
2630 }
2631 case TCP_MEASURE_BW_BURST: {
2632 struct tcp_measure_bw_burst out = {};
2633 if ((tp->t_flagsext & TF_MEASURESNDBW) == 0 ||
2634 tp->t_bwmeas == NULL) {
2635 error = EINVAL;
2636 break;
2637 }
2638 out.min_burst_size = tp->t_bwmeas->bw_minsizepkts;
2639 out.max_burst_size = tp->t_bwmeas->bw_maxsizepkts;
2640 error = sooptcopyout(sopt, &out, sizeof(out));
2641 goto done;
2642 }
2643 case TCP_NOTSENT_LOWAT:
2644 if ((so->so_flags & SOF_NOTSENT_LOWAT) != 0) {
2645 optval = tp->t_notsent_lowat;
2646 } else {
2647 optval = 0;
2648 }
2649 break;
2650 case TCP_SENDMOREACKS:
2651 if (tp->t_flagsext & TF_NOSTRETCHACK) {
2652 optval = 1;
2653 } else {
2654 optval = 0;
2655 }
2656 break;
2657 case TCP_DISABLE_BLACKHOLE_DETECTION:
2658 if (tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) {
2659 optval = 1;
2660 } else {
2661 optval = 0;
2662 }
2663 break;
2664 case TCP_PEER_PID: {
2665 pid_t pid;
2666 error = tcp_lookup_peer_pid_locked(so, &pid);
2667 if (error == 0) {
2668 error = sooptcopyout(sopt, &pid, sizeof(pid));
2669 }
2670 goto done;
2671 }
2672 case TCP_ADAPTIVE_READ_TIMEOUT:
2673 optval = tp->t_adaptive_rtimo;
2674 break;
2675 case TCP_ADAPTIVE_WRITE_TIMEOUT:
2676 optval = tp->t_adaptive_wtimo;
2677 break;
2678 case SO_TRAFFIC_MGT_BACKGROUND:
2679 optval = (so->so_flags1 &
2680 SOF1_TRAFFIC_MGT_SO_BACKGROUND) ? 1 : 0;
2681 break;
2682 case TCP_NOTIFY_ACKNOWLEDGEMENT: {
2683 struct tcp_notify_ack_complete retid;
2684
2685 if (sopt->sopt_valsize != sizeof(retid)) {
2686 error = EINVAL;
2687 break;
2688 }
2689 bzero(&retid, sizeof(retid));
2690 tcp_get_notify_ack_count(tp, &retid);
2691 if (retid.notify_complete_count > 0) {
2692 tcp_get_notify_ack_ids(tp, &retid);
2693 }
2694
2695 error = sooptcopyout(sopt, &retid, sizeof(retid));
2696 goto done;
2697 }
2698 case TCP_RXT_MINIMUM_TIMEOUT:
2699 optval = tp->t_rxt_minimum_timeout / TCP_RETRANSHZ;
2700 break;
2701 default:
2702 error = ENOPROTOOPT;
2703 break;
2704 }
2705 if (error == 0) {
2706 error = sooptcopyout(sopt, &optval, sizeof optval);
2707 }
2708 break;
2709 }
2710 done:
2711 return error;
2712 }
2713
2714 /*
2715 * tcp_sendspace and tcp_recvspace are the default send and receive window
2716 * sizes, respectively. These are obsolescent (this information should
2717 * be set by the route).
2718 */
2719 u_int32_t tcp_sendspace = 1448 * 256;
2720 u_int32_t tcp_recvspace = 1448 * 384;
2721
2722 /* During attach, the size of socket buffer allocated is limited to
2723 * sb_max in sbreserve. Disallow setting the tcp send and recv space
2724 * to be more than sb_max because that will cause tcp_attach to fail
2725 * (see radar 5713060)
2726 */
2727 static int
2728 sysctl_tcp_sospace(struct sysctl_oid *oidp, __unused void *arg1,
2729 int arg2, struct sysctl_req *req)
2730 {
2731 #pragma unused(arg2)
2732 u_int32_t new_value = 0, *space_p = NULL;
2733 int changed = 0, error = 0;
2734 u_quad_t sb_effective_max = (sb_max / (MSIZE + MCLBYTES)) * MCLBYTES;
2735
2736 switch (oidp->oid_number) {
2737 case TCPCTL_SENDSPACE:
2738 space_p = &tcp_sendspace;
2739 break;
2740 case TCPCTL_RECVSPACE:
2741 space_p = &tcp_recvspace;
2742 break;
2743 default:
2744 return EINVAL;
2745 }
2746 error = sysctl_io_number(req, *space_p, sizeof(u_int32_t),
2747 &new_value, &changed);
2748 if (changed) {
2749 if (new_value > 0 && new_value <= sb_effective_max) {
2750 *space_p = new_value;
2751 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, new_value);
2752 } else {
2753 error = ERANGE;
2754 }
2755 }
2756 return error;
2757 }
2758
2759 #if SYSCTL_SKMEM
2760 SYSCTL_PROC(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace,
2761 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_sendspace,
2762 offsetof(skmem_sysctl, tcp.sendspace), sysctl_tcp_sospace,
2763 "IU", "Maximum outgoing TCP datagram size");
2764 SYSCTL_PROC(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace,
2765 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_recvspace,
2766 offsetof(skmem_sysctl, tcp.recvspace), sysctl_tcp_sospace,
2767 "IU", "Maximum incoming TCP datagram size");
2768 #else /* SYSCTL_SKMEM */
2769 SYSCTL_PROC(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2770 &tcp_sendspace, 0, &sysctl_tcp_sospace, "IU", "Maximum outgoing TCP datagram size");
2771 SYSCTL_PROC(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2772 &tcp_recvspace, 0, &sysctl_tcp_sospace, "IU", "Maximum incoming TCP datagram size");
2773 #endif /* SYSCTL_SKMEM */
2774
2775 /*
2776 * Attach TCP protocol to socket, allocating
2777 * internet protocol control block, tcp control block,
2778 * bufer space, and entering LISTEN state if to accept connections.
2779 *
2780 * Returns: 0 Success
2781 * in_pcballoc:ENOBUFS
2782 * in_pcballoc:ENOMEM
2783 * in_pcballoc:??? [IPSEC specific]
2784 * soreserve:ENOBUFS
2785 */
2786 static int
2787 tcp_attach(struct socket *so, struct proc *p)
2788 {
2789 struct tcpcb *tp;
2790 struct inpcb *inp;
2791 int error;
2792 int isipv6 = SOCK_CHECK_DOM(so, PF_INET6) != 0;
2793
2794 error = in_pcballoc(so, &tcbinfo, p);
2795 if (error) {
2796 return error;
2797 }
2798
2799 inp = sotoinpcb(so);
2800
2801 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
2802 error = soreserve(so, tcp_sendspace, tcp_recvspace);
2803 if (error) {
2804 return error;
2805 }
2806 }
2807
2808 if (so->so_snd.sb_preconn_hiwat == 0) {
2809 soreserve_preconnect(so, 2048);
2810 }
2811
2812 if ((so->so_rcv.sb_flags & SB_USRSIZE) == 0) {
2813 so->so_rcv.sb_flags |= SB_AUTOSIZE;
2814 }
2815 if ((so->so_snd.sb_flags & SB_USRSIZE) == 0) {
2816 so->so_snd.sb_flags |= SB_AUTOSIZE;
2817 }
2818
2819 if (isipv6) {
2820 inp->inp_vflag |= INP_IPV6;
2821 inp->in6p_hops = -1; /* use kernel default */
2822 } else {
2823 inp->inp_vflag |= INP_IPV4;
2824 }
2825 tp = tcp_newtcpcb(inp);
2826 if (tp == NULL) {
2827 int nofd = so->so_state & SS_NOFDREF; /* XXX */
2828
2829 so->so_state &= ~SS_NOFDREF; /* don't free the socket yet */
2830 if (isipv6) {
2831 in6_pcbdetach(inp);
2832 } else {
2833 in_pcbdetach(inp);
2834 }
2835 so->so_state |= nofd;
2836 return ENOBUFS;
2837 }
2838 if (nstat_collect) {
2839 nstat_tcp_new_pcb(inp);
2840 }
2841 tp->t_state = TCPS_CLOSED;
2842 return 0;
2843 }
2844
2845 /*
2846 * Initiate (or continue) disconnect.
2847 * If embryonic state, just send reset (once).
2848 * If in ``let data drain'' option and linger null, just drop.
2849 * Otherwise (hard), mark socket disconnecting and drop
2850 * current input data; switch states based on user close, and
2851 * send segment to peer (with FIN).
2852 */
2853 static struct tcpcb *
2854 tcp_disconnect(struct tcpcb *tp)
2855 {
2856 struct socket *so = tp->t_inpcb->inp_socket;
2857
2858 if (so->so_rcv.sb_cc != 0 || tp->t_reassqlen != 0) {
2859 return tcp_drop(tp, 0);
2860 }
2861
2862 if (tp->t_state < TCPS_ESTABLISHED) {
2863 tp = tcp_close(tp);
2864 } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
2865 tp = tcp_drop(tp, 0);
2866 } else {
2867 soisdisconnecting(so);
2868 sbflush(&so->so_rcv);
2869 tp = tcp_usrclosed(tp);
2870 #if MPTCP
2871 /* A reset has been sent but socket exists, do not send FIN */
2872 if ((so->so_flags & SOF_MP_SUBFLOW) &&
2873 (tp) && (tp->t_mpflags & TMPF_RESET)) {
2874 return tp;
2875 }
2876 #endif
2877 if (tp) {
2878 (void) tcp_output(tp);
2879 }
2880 }
2881 return tp;
2882 }
2883
2884 /*
2885 * User issued close, and wish to trail through shutdown states:
2886 * if never received SYN, just forget it. If got a SYN from peer,
2887 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
2888 * If already got a FIN from peer, then almost done; go to LAST_ACK
2889 * state. In all other cases, have already sent FIN to peer (e.g.
2890 * after PRU_SHUTDOWN), and just have to play tedious game waiting
2891 * for peer to send FIN or not respond to keep-alives, etc.
2892 * We can let the user exit from the close as soon as the FIN is acked.
2893 */
2894 static struct tcpcb *
2895 tcp_usrclosed(struct tcpcb *tp)
2896 {
2897 switch (tp->t_state) {
2898 case TCPS_CLOSED:
2899 case TCPS_LISTEN:
2900 case TCPS_SYN_SENT:
2901 tp = tcp_close(tp);
2902 break;
2903
2904 case TCPS_SYN_RECEIVED:
2905 tp->t_flags |= TF_NEEDFIN;
2906 break;
2907
2908 case TCPS_ESTABLISHED:
2909 DTRACE_TCP4(state__change, void, NULL,
2910 struct inpcb *, tp->t_inpcb,
2911 struct tcpcb *, tp,
2912 int32_t, TCPS_FIN_WAIT_1);
2913 tp->t_state = TCPS_FIN_WAIT_1;
2914 TCP_LOG_CONNECTION_SUMMARY(tp);
2915 break;
2916
2917 case TCPS_CLOSE_WAIT:
2918 DTRACE_TCP4(state__change, void, NULL,
2919 struct inpcb *, tp->t_inpcb,
2920 struct tcpcb *, tp,
2921 int32_t, TCPS_LAST_ACK);
2922 tp->t_state = TCPS_LAST_ACK;
2923 TCP_LOG_CONNECTION_SUMMARY(tp);
2924 break;
2925 }
2926 if (tp && tp->t_state >= TCPS_FIN_WAIT_2) {
2927 soisdisconnected(tp->t_inpcb->inp_socket);
2928 /* To prevent the connection hanging in FIN_WAIT_2 forever. */
2929 if (tp->t_state == TCPS_FIN_WAIT_2) {
2930 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
2931 TCP_CONN_MAXIDLE(tp));
2932 }
2933 }
2934 return tp;
2935 }
2936
2937 void
2938 tcp_in_cksum_stats(u_int32_t len)
2939 {
2940 tcpstat.tcps_rcv_swcsum++;
2941 tcpstat.tcps_rcv_swcsum_bytes += len;
2942 }
2943
2944 void
2945 tcp_out_cksum_stats(u_int32_t len)
2946 {
2947 tcpstat.tcps_snd_swcsum++;
2948 tcpstat.tcps_snd_swcsum_bytes += len;
2949 }
2950
2951 void
2952 tcp_in6_cksum_stats(u_int32_t len)
2953 {
2954 tcpstat.tcps_rcv6_swcsum++;
2955 tcpstat.tcps_rcv6_swcsum_bytes += len;
2956 }
2957
2958 void
2959 tcp_out6_cksum_stats(u_int32_t len)
2960 {
2961 tcpstat.tcps_snd6_swcsum++;
2962 tcpstat.tcps_snd6_swcsum_bytes += len;
2963 }
2964
2965 int
2966 tcp_get_mpkl_send_info(struct mbuf *control,
2967 struct so_mpkl_send_info *mpkl_send_info)
2968 {
2969 struct cmsghdr *cm;
2970
2971 if (control == NULL || mpkl_send_info == NULL) {
2972 return EINVAL;
2973 }
2974
2975 for (cm = M_FIRST_CMSGHDR(control); cm;
2976 cm = M_NXT_CMSGHDR(control, cm)) {
2977 if (cm->cmsg_len < sizeof(struct cmsghdr) ||
2978 cm->cmsg_len > control->m_len) {
2979 return EINVAL;
2980 }
2981 if (cm->cmsg_level != SOL_SOCKET ||
2982 cm->cmsg_type != SCM_MPKL_SEND_INFO) {
2983 continue;
2984 }
2985 if (cm->cmsg_len != CMSG_LEN(sizeof(struct so_mpkl_send_info))) {
2986 return EINVAL;
2987 }
2988 memcpy(mpkl_send_info, CMSG_DATA(cm),
2989 sizeof(struct so_mpkl_send_info));
2990 return 0;
2991 }
2992 return ENOMSG;
2993 }