]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_subr.c
cc58735fab7a7bb480b93825a4dfe49e7f1ecfa0
[apple/xnu.git] / bsd / netinet / tcp_subr.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.22 2001/08/22 00:59:12 silby Exp $
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/callout.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <kern/locks.h>
86 #include <kern/zalloc.h>
87
88 #include <net/route.h>
89 #include <net/if.h>
90
91 #define tcp_minmssoverload fring
92 #define _IP_VHL
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_icmp.h>
97 #if INET6
98 #include <netinet/ip6.h>
99 #endif
100 #include <netinet/in_pcb.h>
101 #if INET6
102 #include <netinet6/in6_pcb.h>
103 #endif
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 #include <netinet/icmp_var.h>
107 #if INET6
108 #include <netinet6/ip6_var.h>
109 #endif
110 #include <netinet/tcp.h>
111 #include <netinet/tcp_fsm.h>
112 #include <netinet/tcp_seq.h>
113 #include <netinet/tcp_timer.h>
114 #include <netinet/tcp_var.h>
115 #if INET6
116 #include <netinet6/tcp6_var.h>
117 #endif
118 #include <netinet/tcpip.h>
119 #if TCPDEBUG
120 #include <netinet/tcp_debug.h>
121 #endif
122 #include <netinet6/ip6protosw.h>
123
124 #if IPSEC
125 #include <netinet6/ipsec.h>
126 #if INET6
127 #include <netinet6/ipsec6.h>
128 #endif
129 #endif /*IPSEC*/
130
131 #undef tcp_minmssoverload
132
133 #if CONFIG_MACF_NET
134 #include <security/mac_framework.h>
135 #endif /* MAC_NET */
136
137 #include <libkern/crypto/md5.h>
138 #include <sys/kdebug.h>
139
140 #define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
141
142 extern int tcp_lq_overflow;
143
144 /* temporary: for testing */
145 #if IPSEC
146 extern int ipsec_bypass;
147 #endif
148
149 int tcp_mssdflt = TCP_MSS;
150 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
151 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
152
153 #if INET6
154 int tcp_v6mssdflt = TCP6_MSS;
155 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
156 CTLFLAG_RW, &tcp_v6mssdflt , 0,
157 "Default TCP Maximum Segment Size for IPv6");
158 #endif
159
160 /*
161 * Minimum MSS we accept and use. This prevents DoS attacks where
162 * we are forced to a ridiculous low MSS like 20 and send hundreds
163 * of packets instead of one. The effect scales with the available
164 * bandwidth and quickly saturates the CPU and network interface
165 * with packet generation and sending. Set to zero to disable MINMSS
166 * checking. This setting prevents us from sending too small packets.
167 */
168 int tcp_minmss = TCP_MINMSS;
169 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
170 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
171
172 /*
173 * Number of TCP segments per second we accept from remote host
174 * before we start to calculate average segment size. If average
175 * segment size drops below the minimum TCP MSS we assume a DoS
176 * attack and reset+drop the connection. Care has to be taken not to
177 * set this value too small to not kill interactive type connections
178 * (telnet, SSH) which send many small packets.
179 */
180 #ifdef FIX_WORKAROUND_FOR_3894301
181 __private_extern__ int tcp_minmssoverload = TCP_MINMSSOVERLOAD;
182 #else
183 __private_extern__ int tcp_minmssoverload = 0;
184 #endif
185 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
186 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
187 "be under the MINMSS Size");
188
189 static int tcp_do_rfc1323 = 1;
190 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
191 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
192
193 static int tcp_do_rfc1644 = 0;
194 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
195 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
196
197 static int do_tcpdrain = 0;
198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
199 "Enable tcp_drain routine for extra help when low on mbufs");
200
201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
202 &tcbinfo.ipi_count, 0, "Number of active PCBs");
203
204 static int icmp_may_rst = 1;
205 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
206 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
207
208 static int tcp_strict_rfc1948 = 0;
209 SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW,
210 &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
211
212 static int tcp_isn_reseed_interval = 0;
213 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
214 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
215 static int tcp_background_io_enabled = 1;
216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_io_enabled, CTLFLAG_RW,
217 &tcp_background_io_enabled, 0, "Background IO Enabled");
218
219 int tcp_TCPTV_MIN = 1;
220 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_min, CTLFLAG_RW,
221 &tcp_TCPTV_MIN, 0, "min rtt value allowed");
222
223 __private_extern__ int tcp_use_randomport = 0;
224 SYSCTL_INT(_net_inet_tcp, OID_AUTO, randomize_ports, CTLFLAG_RW,
225 &tcp_use_randomport, 0, "Randomize TCP port numbers");
226
227 static void tcp_cleartaocache(void);
228 static void tcp_notify(struct inpcb *, int);
229 struct zone *sack_hole_zone;
230
231 extern unsigned int total_mb_cnt;
232 extern unsigned int total_cl_cnt;
233 extern int sbspace_factor;
234 extern int tcp_sockthreshold;
235 extern int slowlink_wsize; /* window correction for slow links */
236 extern int path_mtu_discovery;
237
238
239 /*
240 * Target size of TCP PCB hash tables. Must be a power of two.
241 *
242 * Note that this can be overridden by the kernel environment
243 * variable net.inet.tcp.tcbhashsize
244 */
245 #ifndef TCBHASHSIZE
246 #define TCBHASHSIZE CONFIG_TCBHASHSIZE
247 #endif
248
249 __private_extern__ int tcp_tcbhashsize = TCBHASHSIZE;
250 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
251 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
252
253 /*
254 * This is the actual shape of what we allocate using the zone
255 * allocator. Doing it this way allows us to protect both structures
256 * using the same generation count, and also eliminates the overhead
257 * of allocating tcpcbs separately. By hiding the structure here,
258 * we avoid changing most of the rest of the code (although it needs
259 * to be changed, eventually, for greater efficiency).
260 */
261 #define ALIGNMENT 32
262 #define ALIGNM1 (ALIGNMENT - 1)
263 struct inp_tp {
264 union {
265 struct inpcb inp;
266 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
267 } inp_tp_u;
268 struct tcpcb tcb;
269 };
270 #undef ALIGNMENT
271 #undef ALIGNM1
272
273 extern struct inpcbhead time_wait_slots[];
274 extern u_int32_t *delack_bitmask;
275
276 int get_inpcb_str_size(void);
277 int get_tcp_str_size(void);
278
279 static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
280
281 int get_inpcb_str_size(void)
282 {
283 return sizeof(struct inpcb);
284 }
285
286
287 int get_tcp_str_size(void)
288 {
289 return sizeof(struct tcpcb);
290 }
291
292 int tcp_freeq(struct tcpcb *tp);
293
294
295 /*
296 * Tcp initialization
297 */
298 void
299 tcp_init()
300 {
301 vm_size_t str_size;
302 int i;
303 struct inpcbinfo *pcbinfo;
304
305 tcp_ccgen = 1;
306 tcp_cleartaocache();
307
308 tcp_keepinit = TCPTV_KEEP_INIT;
309 tcp_keepidle = TCPTV_KEEP_IDLE;
310 tcp_keepintvl = TCPTV_KEEPINTVL;
311 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
312 tcp_msl = TCPTV_MSL;
313 read_random(&tcp_now, sizeof(tcp_now));
314 tcp_now = tcp_now & 0x3fffffff; /* Starts tcp internal 100ms clock at a random value */
315
316
317 LIST_INIT(&tcb);
318 tcbinfo.listhead = &tcb;
319 pcbinfo = &tcbinfo;
320 if (!powerof2(tcp_tcbhashsize)) {
321 printf("WARNING: TCB hash size not a power of 2\n");
322 tcp_tcbhashsize = 512; /* safe default */
323 }
324 tcbinfo.hashsize = tcp_tcbhashsize;
325 tcbinfo.hashbase = hashinit(tcp_tcbhashsize, M_PCB, &tcbinfo.hashmask);
326 tcbinfo.porthashbase = hashinit(tcp_tcbhashsize, M_PCB,
327 &tcbinfo.porthashmask);
328 str_size = (vm_size_t) sizeof(struct inp_tp);
329 tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "tcpcb");
330 sack_hole_zone = zinit(str_size, 120000*str_size, 8192, "sack_hole zone");
331 tcp_reass_maxseg = nmbclusters / 16;
332
333 #if INET6
334 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
335 #else /* INET6 */
336 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
337 #endif /* INET6 */
338 if (max_protohdr < TCP_MINPROTOHDR)
339 max_protohdr = TCP_MINPROTOHDR;
340 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
341 panic("tcp_init");
342 #undef TCP_MINPROTOHDR
343
344 /*
345 * allocate lock group attribute and group for tcp pcb mutexes
346 */
347 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
348 pcbinfo->mtx_grp = lck_grp_alloc_init("tcppcb", pcbinfo->mtx_grp_attr);
349
350 /*
351 * allocate the lock attribute for tcp pcb mutexes
352 */
353 pcbinfo->mtx_attr = lck_attr_alloc_init();
354
355 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL) {
356 printf("tcp_init: mutex not alloced!\n");
357 return; /* pretty much dead if this fails... */
358 }
359
360 delack_bitmask = _MALLOC((4 * tcp_tcbhashsize)/32, M_PCB, M_WAITOK);
361 if (delack_bitmask == 0)
362 panic("Delack Memory");
363
364 for (i=0; i < (tcbinfo.hashsize / 32); i++)
365 delack_bitmask[i] = 0;
366
367 for (i=0; i < N_TIME_WAIT_SLOTS; i++) {
368 LIST_INIT(&time_wait_slots[i]);
369 }
370
371 timeout(tcp_fasttimo, NULL, hz/TCP_RETRANSHZ);
372 }
373
374 /*
375 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
376 * tcp_template used to store this data in mbufs, but we now recopy it out
377 * of the tcpcb each time to conserve mbufs.
378 */
379 void
380 tcp_fillheaders(tp, ip_ptr, tcp_ptr)
381 struct tcpcb *tp;
382 void *ip_ptr;
383 void *tcp_ptr;
384 {
385 struct inpcb *inp = tp->t_inpcb;
386 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
387
388 #if INET6
389 if ((inp->inp_vflag & INP_IPV6) != 0) {
390 struct ip6_hdr *ip6;
391
392 ip6 = (struct ip6_hdr *)ip_ptr;
393 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
394 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
395 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
396 (IPV6_VERSION & IPV6_VERSION_MASK);
397 ip6->ip6_nxt = IPPROTO_TCP;
398 ip6->ip6_plen = sizeof(struct tcphdr);
399 ip6->ip6_src = inp->in6p_laddr;
400 ip6->ip6_dst = inp->in6p_faddr;
401 tcp_hdr->th_sum = 0;
402 } else
403 #endif
404 {
405 struct ip *ip = (struct ip *) ip_ptr;
406
407 ip->ip_vhl = IP_VHL_BORING;
408 ip->ip_tos = 0;
409 ip->ip_len = 0;
410 ip->ip_id = 0;
411 ip->ip_off = 0;
412 ip->ip_ttl = 0;
413 ip->ip_sum = 0;
414 ip->ip_p = IPPROTO_TCP;
415 ip->ip_src = inp->inp_laddr;
416 ip->ip_dst = inp->inp_faddr;
417 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
418 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
419 }
420
421 tcp_hdr->th_sport = inp->inp_lport;
422 tcp_hdr->th_dport = inp->inp_fport;
423 tcp_hdr->th_seq = 0;
424 tcp_hdr->th_ack = 0;
425 tcp_hdr->th_x2 = 0;
426 tcp_hdr->th_off = 5;
427 tcp_hdr->th_flags = 0;
428 tcp_hdr->th_win = 0;
429 tcp_hdr->th_urp = 0;
430 }
431
432 /*
433 * Create template to be used to send tcp packets on a connection.
434 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
435 * use for this function is in keepalives, which use tcp_respond.
436 */
437 struct tcptemp *
438 tcp_maketemplate(tp)
439 struct tcpcb *tp;
440 {
441 struct mbuf *m;
442 struct tcptemp *n;
443
444 m = m_get(M_DONTWAIT, MT_HEADER);
445 if (m == NULL)
446 return (0);
447 m->m_len = sizeof(struct tcptemp);
448 n = mtod(m, struct tcptemp *);
449
450 tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
451 return (n);
452 }
453
454 /*
455 * Send a single message to the TCP at address specified by
456 * the given TCP/IP header. If m == 0, then we make a copy
457 * of the tcpiphdr at ti and send directly to the addressed host.
458 * This is used to force keep alive messages out using the TCP
459 * template for a connection. If flags are given then we send
460 * a message back to the TCP which originated the * segment ti,
461 * and discard the mbuf containing it and any other attached mbufs.
462 *
463 * In any case the ack and sequence number of the transmitted
464 * segment are as specified by the parameters.
465 *
466 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
467 */
468 void
469 tcp_respond(
470 struct tcpcb *tp,
471 void *ipgen,
472 register struct tcphdr *th,
473 register struct mbuf *m,
474 tcp_seq ack,
475 tcp_seq seq,
476 int flags,
477 unsigned int ifscope
478 )
479 {
480 register int tlen;
481 int win = 0;
482 struct route *ro = 0;
483 struct route sro;
484 struct ip *ip;
485 struct tcphdr *nth;
486 #if INET6
487 struct route_in6 *ro6 = 0;
488 struct route_in6 sro6;
489 struct ip6_hdr *ip6;
490 int isipv6;
491 #endif /* INET6 */
492
493 #if INET6
494 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
495 ip6 = ipgen;
496 #endif /* INET6 */
497 ip = ipgen;
498
499 if (tp) {
500 if (!(flags & TH_RST)) {
501 win = tcp_sbspace(tp);
502 if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale)
503 win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
504 }
505 #if INET6
506 if (isipv6)
507 ro6 = &tp->t_inpcb->in6p_route;
508 else
509 #endif /* INET6 */
510 ro = &tp->t_inpcb->inp_route;
511 } else {
512 #if INET6
513 if (isipv6) {
514 ro6 = &sro6;
515 bzero(ro6, sizeof *ro6);
516 } else
517 #endif /* INET6 */
518 {
519 ro = &sro;
520 bzero(ro, sizeof *ro);
521 }
522 }
523 if (m == 0) {
524 m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */
525 if (m == NULL)
526 return;
527 tlen = 0;
528 m->m_data += max_linkhdr;
529 #if INET6
530 if (isipv6) {
531 bcopy((caddr_t)ip6, mtod(m, caddr_t),
532 sizeof(struct ip6_hdr));
533 ip6 = mtod(m, struct ip6_hdr *);
534 nth = (struct tcphdr *)(ip6 + 1);
535 } else
536 #endif /* INET6 */
537 {
538 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
539 ip = mtod(m, struct ip *);
540 nth = (struct tcphdr *)(ip + 1);
541 }
542 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
543 flags = TH_ACK;
544 } else {
545 m_freem(m->m_next);
546 m->m_next = 0;
547 m->m_data = (caddr_t)ipgen;
548 /* m_len is set later */
549 tlen = 0;
550 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
551 #if INET6
552 if (isipv6) {
553 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
554 nth = (struct tcphdr *)(ip6 + 1);
555 } else
556 #endif /* INET6 */
557 {
558 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
559 nth = (struct tcphdr *)(ip + 1);
560 }
561 if (th != nth) {
562 /*
563 * this is usually a case when an extension header
564 * exists between the IPv6 header and the
565 * TCP header.
566 */
567 nth->th_sport = th->th_sport;
568 nth->th_dport = th->th_dport;
569 }
570 xchg(nth->th_dport, nth->th_sport, n_short);
571 #undef xchg
572 }
573 #if INET6
574 if (isipv6) {
575 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
576 tlen));
577 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
578 } else
579 #endif
580 {
581 tlen += sizeof (struct tcpiphdr);
582 ip->ip_len = tlen;
583 ip->ip_ttl = ip_defttl;
584 }
585 m->m_len = tlen;
586 m->m_pkthdr.len = tlen;
587 m->m_pkthdr.rcvif = 0;
588 #if CONFIG_MACF_NET
589 if (tp != NULL && tp->t_inpcb != NULL) {
590 /*
591 * Packet is associated with a socket, so allow the
592 * label of the response to reflect the socket label.
593 */
594 mac_mbuf_label_associate_inpcb(tp->t_inpcb, m);
595 } else {
596 /*
597 * Packet is not associated with a socket, so possibly
598 * update the label in place.
599 */
600 mac_netinet_tcp_reply(m);
601 }
602 #endif
603
604 nth->th_seq = htonl(seq);
605 nth->th_ack = htonl(ack);
606 nth->th_x2 = 0;
607 nth->th_off = sizeof (struct tcphdr) >> 2;
608 nth->th_flags = flags;
609 if (tp)
610 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
611 else
612 nth->th_win = htons((u_short)win);
613 nth->th_urp = 0;
614 #if INET6
615 if (isipv6) {
616 nth->th_sum = 0;
617 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
618 sizeof(struct ip6_hdr),
619 tlen - sizeof(struct ip6_hdr));
620 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
621 ro6 && ro6->ro_rt ?
622 ro6->ro_rt->rt_ifp :
623 NULL);
624 } else
625 #endif /* INET6 */
626 {
627 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
628 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
629 m->m_pkthdr.csum_flags = CSUM_TCP;
630 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
631 }
632 #if TCPDEBUG
633 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
634 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
635 #endif
636 #if IPSEC
637 if (ipsec_bypass == 0 && ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
638 m_freem(m);
639 return;
640 }
641 #endif
642 #if INET6
643 if (isipv6) {
644 (void)ip6_output(m, NULL, ro6, 0, NULL, NULL, 0);
645 if (ro6 == &sro6 && ro6->ro_rt) {
646 rtfree(ro6->ro_rt);
647 ro6->ro_rt = NULL;
648 }
649 } else
650 #endif /* INET6 */
651 {
652 struct ip_out_args ipoa = { ifscope };
653
654 if (ro != &sro) {
655 /* Copy the cached route and take an extra reference */
656 inp_route_copyout(tp->t_inpcb, &sro);
657 }
658 /*
659 * For consistency, pass a local route copy.
660 */
661 (void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
662
663 if (ro != &sro) {
664 /* Synchronize cached PCB route */
665 inp_route_copyin(tp->t_inpcb, &sro);
666 } else if (sro.ro_rt != NULL) {
667 rtfree(sro.ro_rt);
668 }
669 }
670 }
671
672 /*
673 * Create a new TCP control block, making an
674 * empty reassembly queue and hooking it to the argument
675 * protocol control block. The `inp' parameter must have
676 * come from the zone allocator set up in tcp_init().
677 */
678 struct tcpcb *
679 tcp_newtcpcb(inp)
680 struct inpcb *inp;
681 {
682 struct inp_tp *it;
683 register struct tcpcb *tp;
684 register struct socket *so = inp->inp_socket;
685 #if INET6
686 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
687 #endif /* INET6 */
688
689 if (so->cached_in_sock_layer == 0) {
690 it = (struct inp_tp *)inp;
691 tp = &it->tcb;
692 }
693 else
694 tp = (struct tcpcb *) inp->inp_saved_ppcb;
695
696 bzero((char *) tp, sizeof(struct tcpcb));
697 LIST_INIT(&tp->t_segq);
698 tp->t_maxseg = tp->t_maxopd =
699 #if INET6
700 isipv6 ? tcp_v6mssdflt :
701 #endif /* INET6 */
702 tcp_mssdflt;
703
704 if (tcp_do_rfc1323)
705 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
706 tp->sack_enable = tcp_do_sack;
707 TAILQ_INIT(&tp->snd_holes);
708 tp->t_inpcb = inp; /* XXX */
709 /*
710 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
711 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
712 * reasonable initial retransmit time.
713 */
714 tp->t_srtt = TCPTV_SRTTBASE;
715 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
716 tp->t_rttmin = tcp_TCPTV_MIN;
717 tp->t_rxtcur = TCPTV_RTOBASE;
718 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
719 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
720 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
721 tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
722 tp->t_rcvtime = 0;
723 tp->t_bw_rtttime = 0;
724 /*
725 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
726 * because the socket may be bound to an IPv6 wildcard address,
727 * which may match an IPv4-mapped IPv6 address.
728 */
729 inp->inp_ip_ttl = ip_defttl;
730 inp->inp_ppcb = (caddr_t)tp;
731 return (tp); /* XXX */
732 }
733
734 /*
735 * Drop a TCP connection, reporting
736 * the specified error. If connection is synchronized,
737 * then send a RST to peer.
738 */
739 struct tcpcb *
740 tcp_drop(tp, errno)
741 register struct tcpcb *tp;
742 int errno;
743 {
744 struct socket *so = tp->t_inpcb->inp_socket;
745
746 if (TCPS_HAVERCVDSYN(tp->t_state)) {
747 tp->t_state = TCPS_CLOSED;
748 (void) tcp_output(tp);
749 tcpstat.tcps_drops++;
750 } else
751 tcpstat.tcps_conndrops++;
752 if (errno == ETIMEDOUT && tp->t_softerror)
753 errno = tp->t_softerror;
754 so->so_error = errno;
755 return (tcp_close(tp));
756 }
757
758 /*
759 * Close a TCP control block:
760 * discard all space held by the tcp
761 * discard internet protocol block
762 * wake up any sleepers
763 */
764 struct tcpcb *
765 tcp_close(tp)
766 register struct tcpcb *tp;
767 {
768 struct inpcb *inp = tp->t_inpcb;
769 struct socket *so = inp->inp_socket;
770 #if INET6
771 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
772 #endif /* INET6 */
773 struct rtentry *rt;
774 int dosavessthresh;
775
776 if ( inp->inp_ppcb == NULL) /* tcp_close was called previously, bail */
777 return NULL;
778
779 /* Clear the timers before we delete the PCB. */
780 {
781 int i;
782 for (i = 0; i < TCPT_NTIMERS; i++) {
783 tp->t_timer[i] = 0;
784 }
785 }
786
787 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0);
788 switch (tp->t_state)
789 {
790 case TCPS_ESTABLISHED:
791 case TCPS_FIN_WAIT_1:
792 case TCPS_CLOSING:
793 case TCPS_CLOSE_WAIT:
794 case TCPS_LAST_ACK:
795 break;
796 }
797
798 /*
799 * If another thread for this tcp is currently in ip (indicated by
800 * the TF_SENDINPROG flag), defer the cleanup until after it returns
801 * back to tcp. This is done to serialize the close until after all
802 * pending output is finished, in order to avoid having the PCB be
803 * detached and the cached route cleaned, only for ip to cache the
804 * route back into the PCB again. Note that we've cleared all the
805 * timers at this point. Set TF_CLOSING to indicate to tcp_output()
806 * that is should call us again once it returns from ip; at that
807 * point both flags should be cleared and we can proceed further
808 * with the cleanup.
809 */
810 if (tp->t_flags & (TF_CLOSING|TF_SENDINPROG)) {
811 tp->t_flags |= TF_CLOSING;
812 return (NULL);
813 }
814
815 #if INET6
816 rt = isipv6 ? inp->in6p_route.ro_rt : inp->inp_route.ro_rt;
817 #else
818 rt = inp->inp_route.ro_rt;
819 #endif
820 if (rt != NULL)
821 RT_LOCK_SPIN(rt);
822
823 /*
824 * If we got enough samples through the srtt filter,
825 * save the rtt and rttvar in the routing entry.
826 * 'Enough' is arbitrarily defined as the 16 samples.
827 * 16 samples is enough for the srtt filter to converge
828 * to within 5% of the correct value; fewer samples and
829 * we could save a very bogus rtt.
830 *
831 * Don't update the default route's characteristics and don't
832 * update anything that the user "locked".
833 */
834 if (tp->t_rttupdated >= 16) {
835 register u_int32_t i = 0;
836
837 #if INET6
838 if (isipv6) {
839 struct sockaddr_in6 *sin6;
840
841 if (rt == NULL)
842 goto no_valid_rt;
843 sin6 = (struct sockaddr_in6 *)rt_key(rt);
844 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
845 goto no_valid_rt;
846 }
847 else
848 #endif /* INET6 */
849 if (rt == NULL || !(rt->rt_flags & RTF_UP) ||
850 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr ==
851 INADDR_ANY || rt->generation_id != route_generation) {
852 if (tp->t_state >= TCPS_CLOSE_WAIT)
853 tp->t_state = TCPS_CLOSING;
854 goto no_valid_rt;
855 }
856
857 RT_LOCK_ASSERT_HELD(rt);
858 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
859 i = tp->t_srtt *
860 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
861 if (rt->rt_rmx.rmx_rtt && i)
862 /*
863 * filter this update to half the old & half
864 * the new values, converting scale.
865 * See route.h and tcp_var.h for a
866 * description of the scaling constants.
867 */
868 rt->rt_rmx.rmx_rtt =
869 (rt->rt_rmx.rmx_rtt + i) / 2;
870 else
871 rt->rt_rmx.rmx_rtt = i;
872 tcpstat.tcps_cachedrtt++;
873 }
874 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
875 i = tp->t_rttvar *
876 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
877 if (rt->rt_rmx.rmx_rttvar && i)
878 rt->rt_rmx.rmx_rttvar =
879 (rt->rt_rmx.rmx_rttvar + i) / 2;
880 else
881 rt->rt_rmx.rmx_rttvar = i;
882 tcpstat.tcps_cachedrttvar++;
883 }
884 /*
885 * The old comment here said:
886 * update the pipelimit (ssthresh) if it has been updated
887 * already or if a pipesize was specified & the threshhold
888 * got below half the pipesize. I.e., wait for bad news
889 * before we start updating, then update on both good
890 * and bad news.
891 *
892 * But we want to save the ssthresh even if no pipesize is
893 * specified explicitly in the route, because such
894 * connections still have an implicit pipesize specified
895 * by the global tcp_sendspace. In the absence of a reliable
896 * way to calculate the pipesize, it will have to do.
897 */
898 i = tp->snd_ssthresh;
899 if (rt->rt_rmx.rmx_sendpipe != 0)
900 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
901 else
902 dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
903 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
904 i != 0 && rt->rt_rmx.rmx_ssthresh != 0)
905 || dosavessthresh) {
906 /*
907 * convert the limit from user data bytes to
908 * packets then to packet data bytes.
909 */
910 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
911 if (i < 2)
912 i = 2;
913 i *= (u_int32_t)(tp->t_maxseg +
914 #if INET6
915 (isipv6 ? sizeof (struct ip6_hdr) +
916 sizeof (struct tcphdr) :
917 #endif
918 sizeof (struct tcpiphdr)
919 #if INET6
920 )
921 #endif
922 );
923 if (rt->rt_rmx.rmx_ssthresh)
924 rt->rt_rmx.rmx_ssthresh =
925 (rt->rt_rmx.rmx_ssthresh + i) / 2;
926 else
927 rt->rt_rmx.rmx_ssthresh = i;
928 tcpstat.tcps_cachedssthresh++;
929 }
930 }
931
932 /*
933 * Mark route for deletion if no information is cached.
934 */
935 if (rt != NULL && (so->so_flags & SOF_OVERFLOW) && tcp_lq_overflow) {
936 if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
937 rt->rt_rmx.rmx_rtt == 0) {
938 rt->rt_flags |= RTF_DELCLONE;
939 }
940 }
941
942 no_valid_rt:
943 if (rt != NULL)
944 RT_UNLOCK(rt);
945
946 /* free the reassembly queue, if any */
947 (void) tcp_freeq(tp);
948
949 tcp_free_sackholes(tp);
950
951 /* Free the packet list */
952 if (tp->t_pktlist_head != NULL)
953 m_freem_list(tp->t_pktlist_head);
954 TCP_PKTLIST_CLEAR(tp);
955
956 #ifdef __APPLE__
957 if (so->cached_in_sock_layer)
958 inp->inp_saved_ppcb = (caddr_t) tp;
959 #endif
960
961 soisdisconnected(so);
962 #if INET6
963 if (INP_CHECK_SOCKAF(so, AF_INET6))
964 in6_pcbdetach(inp);
965 else
966 #endif /* INET6 */
967 in_pcbdetach(inp);
968 tcpstat.tcps_closed++;
969 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed,0,0,0,0);
970 return ((struct tcpcb *)0);
971 }
972
973 int
974 tcp_freeq(tp)
975 struct tcpcb *tp;
976 {
977
978 register struct tseg_qent *q;
979 int rv = 0;
980
981 while((q = LIST_FIRST(&tp->t_segq)) != NULL) {
982 LIST_REMOVE(q, tqe_q);
983 m_freem(q->tqe_m);
984 FREE(q, M_TSEGQ);
985 tcp_reass_qsize--;
986 rv = 1;
987 }
988 return (rv);
989 }
990
991 void
992 tcp_drain()
993 {
994 if (do_tcpdrain)
995 {
996 struct inpcb *inpb;
997 struct tcpcb *tcpb;
998 struct tseg_qent *te;
999
1000 /*
1001 * Walk the tcpbs, if existing, and flush the reassembly queue,
1002 * if there is one...
1003 * XXX: The "Net/3" implementation doesn't imply that the TCP
1004 * reassembly queue should be flushed, but in a situation
1005 * where we're really low on mbufs, this is potentially
1006 * usefull.
1007 */
1008 if (!lck_rw_try_lock_exclusive(tcbinfo.mtx)) /* do it next time if the lock is in use */
1009 return;
1010
1011 for (inpb = LIST_FIRST(tcbinfo.listhead); inpb;
1012 inpb = LIST_NEXT(inpb, inp_list)) {
1013 if ((tcpb = intotcpcb(inpb))) {
1014 while ((te = LIST_FIRST(&tcpb->t_segq))
1015 != NULL) {
1016 LIST_REMOVE(te, tqe_q);
1017 m_freem(te->tqe_m);
1018 FREE(te, M_TSEGQ);
1019 tcp_reass_qsize--;
1020 }
1021 }
1022 }
1023 lck_rw_done(tcbinfo.mtx);
1024
1025 }
1026 }
1027
1028 /*
1029 * Notify a tcp user of an asynchronous error;
1030 * store error as soft error, but wake up user
1031 * (for now, won't do anything until can select for soft error).
1032 *
1033 * Do not wake up user since there currently is no mechanism for
1034 * reporting soft errors (yet - a kqueue filter may be added).
1035 */
1036 static void
1037 tcp_notify(inp, error)
1038 struct inpcb *inp;
1039 int error;
1040 {
1041 struct tcpcb *tp;
1042
1043 if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD))
1044 return; /* pcb is gone already */
1045
1046 tp = (struct tcpcb *)inp->inp_ppcb;
1047
1048 /*
1049 * Ignore some errors if we are hooked up.
1050 * If connection hasn't completed, has retransmitted several times,
1051 * and receives a second error, give up now. This is better
1052 * than waiting a long time to establish a connection that
1053 * can never complete.
1054 */
1055 if (tp->t_state == TCPS_ESTABLISHED &&
1056 (error == EHOSTUNREACH || error == ENETUNREACH ||
1057 error == EHOSTDOWN)) {
1058 return;
1059 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1060 tp->t_softerror)
1061 tcp_drop(tp, error);
1062 else
1063 tp->t_softerror = error;
1064 #if 0
1065 wakeup((caddr_t) &so->so_timeo);
1066 sorwakeup(so);
1067 sowwakeup(so);
1068 #endif
1069 }
1070
1071 /*
1072 * tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
1073 * The otcpcb data structure is passed to user space and must not change.
1074 */
1075 static void
1076 tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp)
1077 {
1078 int i;
1079
1080 otp->t_segq = (u_int32_t)(uintptr_t)tp->t_segq.lh_first;
1081 otp->t_dupacks = tp->t_dupacks;
1082 for (i = 0; i < TCPT_NTIMERS; i++)
1083 otp->t_timer[i] = tp->t_timer[i];
1084 otp->t_inpcb = (_TCPCB_PTR(struct inpcb *))(uintptr_t)tp->t_inpcb;
1085 otp->t_state = tp->t_state;
1086 otp->t_flags = tp->t_flags;
1087 otp->t_force = tp->t_force;
1088 otp->snd_una = tp->snd_una;
1089 otp->snd_max = tp->snd_max;
1090 otp->snd_nxt = tp->snd_nxt;
1091 otp->snd_up = tp->snd_up;
1092 otp->snd_wl1 = tp->snd_wl1;
1093 otp->snd_wl2 = tp->snd_wl2;
1094 otp->iss = tp->iss;
1095 otp->irs = tp->irs;
1096 otp->rcv_nxt = tp->rcv_nxt;
1097 otp->rcv_adv = tp->rcv_adv;
1098 otp->rcv_wnd = tp->rcv_wnd;
1099 otp->rcv_up = tp->rcv_up;
1100 otp->snd_wnd = tp->snd_wnd;
1101 otp->snd_cwnd = tp->snd_cwnd;
1102 otp->snd_ssthresh = tp->snd_ssthresh;
1103 otp->t_maxopd = tp->t_maxopd;
1104 otp->t_rcvtime = tp->t_rcvtime;
1105 otp->t_starttime = tp->t_starttime;
1106 otp->t_rtttime = tp->t_rtttime;
1107 otp->t_rtseq = tp->t_rtseq;
1108 otp->t_rxtcur = tp->t_rxtcur;
1109 otp->t_maxseg = tp->t_maxseg;
1110 otp->t_srtt = tp->t_srtt;
1111 otp->t_rttvar = tp->t_rttvar;
1112 otp->t_rxtshift = tp->t_rxtshift;
1113 otp->t_rttmin = tp->t_rttmin;
1114 otp->t_rttupdated = tp->t_rttupdated;
1115 otp->max_sndwnd = tp->max_sndwnd;
1116 otp->t_softerror = tp->t_softerror;
1117 otp->t_oobflags = tp->t_oobflags;
1118 otp->t_iobc = tp->t_iobc;
1119 otp->snd_scale = tp->snd_scale;
1120 otp->rcv_scale = tp->rcv_scale;
1121 otp->request_r_scale = tp->request_r_scale;
1122 otp->requested_s_scale = tp->requested_s_scale;
1123 otp->ts_recent = tp->ts_recent;
1124 otp->ts_recent_age = tp->ts_recent_age;
1125 otp->last_ack_sent = tp->last_ack_sent;
1126 otp->cc_send = tp->cc_send;
1127 otp->cc_recv = tp->cc_recv;
1128 otp->snd_recover = tp->snd_recover;
1129 otp->snd_cwnd_prev = tp->snd_cwnd_prev;
1130 otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
1131 otp->t_badrxtwin = tp->t_badrxtwin;
1132 }
1133
1134 static int
1135 tcp_pcblist SYSCTL_HANDLER_ARGS
1136 {
1137 #pragma unused(oidp, arg1, arg2)
1138 int error, i, n;
1139 struct inpcb *inp, **inp_list;
1140 inp_gen_t gencnt;
1141 struct xinpgen xig;
1142 int slot;
1143
1144 /*
1145 * The process of preparing the TCB list is too time-consuming and
1146 * resource-intensive to repeat twice on every request.
1147 */
1148 lck_rw_lock_shared(tcbinfo.mtx);
1149 if (req->oldptr == USER_ADDR_NULL) {
1150 n = tcbinfo.ipi_count;
1151 req->oldidx = 2 * (sizeof xig)
1152 + (n + n/8) * sizeof(struct xtcpcb);
1153 lck_rw_done(tcbinfo.mtx);
1154 return 0;
1155 }
1156
1157 if (req->newptr != USER_ADDR_NULL) {
1158 lck_rw_done(tcbinfo.mtx);
1159 return EPERM;
1160 }
1161
1162 /*
1163 * OK, now we're committed to doing something.
1164 */
1165 gencnt = tcbinfo.ipi_gencnt;
1166 n = tcbinfo.ipi_count;
1167
1168 bzero(&xig, sizeof(xig));
1169 xig.xig_len = sizeof xig;
1170 xig.xig_count = n;
1171 xig.xig_gen = gencnt;
1172 xig.xig_sogen = so_gencnt;
1173 error = SYSCTL_OUT(req, &xig, sizeof xig);
1174 if (error) {
1175 lck_rw_done(tcbinfo.mtx);
1176 return error;
1177 }
1178 /*
1179 * We are done if there is no pcb
1180 */
1181 if (n == 0) {
1182 lck_rw_done(tcbinfo.mtx);
1183 return 0;
1184 }
1185
1186 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1187 if (inp_list == 0) {
1188 lck_rw_done(tcbinfo.mtx);
1189 return ENOMEM;
1190 }
1191
1192 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
1193 inp = LIST_NEXT(inp, inp_list)) {
1194 #ifdef __APPLE__
1195 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1196 #else
1197 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp))
1198 #endif
1199 inp_list[i++] = inp;
1200 }
1201
1202 for (slot = 0; slot < N_TIME_WAIT_SLOTS; slot++) {
1203 struct inpcb *inpnxt;
1204
1205 for (inp = time_wait_slots[slot].lh_first; inp && i < n; inp = inpnxt) {
1206 inpnxt = inp->inp_list.le_next;
1207 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1208 inp_list[i++] = inp;
1209 }
1210 }
1211
1212 n = i;
1213
1214 error = 0;
1215 for (i = 0; i < n; i++) {
1216 inp = inp_list[i];
1217 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1218 struct xtcpcb xt;
1219 caddr_t inp_ppcb;
1220
1221 bzero(&xt, sizeof(xt));
1222 xt.xt_len = sizeof xt;
1223 /* XXX should avoid extra copy */
1224 inpcb_to_compat(inp, &xt.xt_inp);
1225 inp_ppcb = inp->inp_ppcb;
1226 if (inp_ppcb != NULL) {
1227 tcpcb_to_otcpcb((struct tcpcb *)inp_ppcb,
1228 &xt.xt_tp);
1229 } else {
1230 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1231 }
1232 if (inp->inp_socket)
1233 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1234 error = SYSCTL_OUT(req, &xt, sizeof xt);
1235 }
1236 }
1237 if (!error) {
1238 /*
1239 * Give the user an updated idea of our state.
1240 * If the generation differs from what we told
1241 * her before, she knows that something happened
1242 * while we were processing this request, and it
1243 * might be necessary to retry.
1244 */
1245 bzero(&xig, sizeof(xig));
1246 xig.xig_len = sizeof xig;
1247 xig.xig_gen = tcbinfo.ipi_gencnt;
1248 xig.xig_sogen = so_gencnt;
1249 xig.xig_count = tcbinfo.ipi_count;
1250 error = SYSCTL_OUT(req, &xig, sizeof xig);
1251 }
1252 FREE(inp_list, M_TEMP);
1253 lck_rw_done(tcbinfo.mtx);
1254 return error;
1255 }
1256
1257 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1258 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1259
1260 #if !CONFIG_EMBEDDED
1261
1262 static void
1263 tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
1264 {
1265 int i;
1266
1267 otp->t_segq = (u_int32_t)(uintptr_t)tp->t_segq.lh_first;
1268 otp->t_dupacks = tp->t_dupacks;
1269 for (i = 0; i < TCPT_NTIMERS; i++)
1270 otp->t_timer[i] = tp->t_timer[i];
1271 otp->t_state = tp->t_state;
1272 otp->t_flags = tp->t_flags;
1273 otp->t_force = tp->t_force;
1274 otp->snd_una = tp->snd_una;
1275 otp->snd_max = tp->snd_max;
1276 otp->snd_nxt = tp->snd_nxt;
1277 otp->snd_up = tp->snd_up;
1278 otp->snd_wl1 = tp->snd_wl1;
1279 otp->snd_wl2 = tp->snd_wl2;
1280 otp->iss = tp->iss;
1281 otp->irs = tp->irs;
1282 otp->rcv_nxt = tp->rcv_nxt;
1283 otp->rcv_adv = tp->rcv_adv;
1284 otp->rcv_wnd = tp->rcv_wnd;
1285 otp->rcv_up = tp->rcv_up;
1286 otp->snd_wnd = tp->snd_wnd;
1287 otp->snd_cwnd = tp->snd_cwnd;
1288 otp->snd_ssthresh = tp->snd_ssthresh;
1289 otp->t_maxopd = tp->t_maxopd;
1290 otp->t_rcvtime = tp->t_rcvtime;
1291 otp->t_starttime = tp->t_starttime;
1292 otp->t_rtttime = tp->t_rtttime;
1293 otp->t_rtseq = tp->t_rtseq;
1294 otp->t_rxtcur = tp->t_rxtcur;
1295 otp->t_maxseg = tp->t_maxseg;
1296 otp->t_srtt = tp->t_srtt;
1297 otp->t_rttvar = tp->t_rttvar;
1298 otp->t_rxtshift = tp->t_rxtshift;
1299 otp->t_rttmin = tp->t_rttmin;
1300 otp->t_rttupdated = tp->t_rttupdated;
1301 otp->max_sndwnd = tp->max_sndwnd;
1302 otp->t_softerror = tp->t_softerror;
1303 otp->t_oobflags = tp->t_oobflags;
1304 otp->t_iobc = tp->t_iobc;
1305 otp->snd_scale = tp->snd_scale;
1306 otp->rcv_scale = tp->rcv_scale;
1307 otp->request_r_scale = tp->request_r_scale;
1308 otp->requested_s_scale = tp->requested_s_scale;
1309 otp->ts_recent = tp->ts_recent;
1310 otp->ts_recent_age = tp->ts_recent_age;
1311 otp->last_ack_sent = tp->last_ack_sent;
1312 otp->cc_send = tp->cc_send;
1313 otp->cc_recv = tp->cc_recv;
1314 otp->snd_recover = tp->snd_recover;
1315 otp->snd_cwnd_prev = tp->snd_cwnd_prev;
1316 otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
1317 otp->t_badrxtwin = tp->t_badrxtwin;
1318 }
1319
1320
1321 static int
1322 tcp_pcblist64 SYSCTL_HANDLER_ARGS
1323 {
1324 #pragma unused(oidp, arg1, arg2)
1325 int error, i, n;
1326 struct inpcb *inp, **inp_list;
1327 inp_gen_t gencnt;
1328 struct xinpgen xig;
1329 int slot;
1330
1331 /*
1332 * The process of preparing the TCB list is too time-consuming and
1333 * resource-intensive to repeat twice on every request.
1334 */
1335 lck_rw_lock_shared(tcbinfo.mtx);
1336 if (req->oldptr == USER_ADDR_NULL) {
1337 n = tcbinfo.ipi_count;
1338 req->oldidx = 2 * (sizeof xig)
1339 + (n + n/8) * sizeof(struct xtcpcb64);
1340 lck_rw_done(tcbinfo.mtx);
1341 return 0;
1342 }
1343
1344 if (req->newptr != USER_ADDR_NULL) {
1345 lck_rw_done(tcbinfo.mtx);
1346 return EPERM;
1347 }
1348
1349 /*
1350 * OK, now we're committed to doing something.
1351 */
1352 gencnt = tcbinfo.ipi_gencnt;
1353 n = tcbinfo.ipi_count;
1354
1355 bzero(&xig, sizeof(xig));
1356 xig.xig_len = sizeof xig;
1357 xig.xig_count = n;
1358 xig.xig_gen = gencnt;
1359 xig.xig_sogen = so_gencnt;
1360 error = SYSCTL_OUT(req, &xig, sizeof xig);
1361 if (error) {
1362 lck_rw_done(tcbinfo.mtx);
1363 return error;
1364 }
1365 /*
1366 * We are done if there is no pcb
1367 */
1368 if (n == 0) {
1369 lck_rw_done(tcbinfo.mtx);
1370 return 0;
1371 }
1372
1373 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1374 if (inp_list == 0) {
1375 lck_rw_done(tcbinfo.mtx);
1376 return ENOMEM;
1377 }
1378
1379 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
1380 inp = LIST_NEXT(inp, inp_list)) {
1381 #ifdef __APPLE__
1382 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1383 #else
1384 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp))
1385 #endif
1386 inp_list[i++] = inp;
1387 }
1388
1389 for (slot = 0; slot < N_TIME_WAIT_SLOTS; slot++) {
1390 struct inpcb *inpnxt;
1391
1392 for (inp = time_wait_slots[slot].lh_first; inp && i < n; inp = inpnxt) {
1393 inpnxt = inp->inp_list.le_next;
1394 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1395 inp_list[i++] = inp;
1396 }
1397 }
1398
1399 n = i;
1400
1401 error = 0;
1402 for (i = 0; i < n; i++) {
1403 inp = inp_list[i];
1404 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1405 struct xtcpcb64 xt;
1406
1407 bzero(&xt, sizeof(xt));
1408 xt.xt_len = sizeof xt;
1409 inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
1410 xt.xt_inpcb.inp_ppcb = (u_int64_t)(uintptr_t)inp->inp_ppcb;
1411 if (inp->inp_ppcb != NULL)
1412 tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb, &xt);
1413 if (inp->inp_socket)
1414 sotoxsocket64(inp->inp_socket, &xt.xt_inpcb.xi_socket);
1415 error = SYSCTL_OUT(req, &xt, sizeof xt);
1416 }
1417 }
1418 if (!error) {
1419 /*
1420 * Give the user an updated idea of our state.
1421 * If the generation differs from what we told
1422 * her before, she knows that something happened
1423 * while we were processing this request, and it
1424 * might be necessary to retry.
1425 */
1426 bzero(&xig, sizeof(xig));
1427 xig.xig_len = sizeof xig;
1428 xig.xig_gen = tcbinfo.ipi_gencnt;
1429 xig.xig_sogen = so_gencnt;
1430 xig.xig_count = tcbinfo.ipi_count;
1431 error = SYSCTL_OUT(req, &xig, sizeof xig);
1432 }
1433 FREE(inp_list, M_TEMP);
1434 lck_rw_done(tcbinfo.mtx);
1435 return error;
1436 }
1437
1438 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64, CTLFLAG_RD, 0, 0,
1439 tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
1440
1441 #endif /* !CONFIG_EMBEDDED */
1442
1443 void
1444 tcp_ctlinput(cmd, sa, vip)
1445 int cmd;
1446 struct sockaddr *sa;
1447 void *vip;
1448 {
1449 tcp_seq icmp_tcp_seq;
1450 struct ip *ip = vip;
1451 struct tcphdr *th;
1452 struct in_addr faddr;
1453 struct inpcb *inp;
1454 struct tcpcb *tp;
1455
1456 void (*notify)(struct inpcb *, int) = tcp_notify;
1457
1458 struct icmp *icp;
1459
1460 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1461 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1462 return;
1463
1464 if (cmd == PRC_MSGSIZE)
1465 notify = tcp_mtudisc;
1466 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1467 cmd == PRC_UNREACH_PORT) && ip)
1468 notify = tcp_drop_syn_sent;
1469 else if (PRC_IS_REDIRECT(cmd)) {
1470 ip = 0;
1471 notify = in_rtchange;
1472 } else if (cmd == PRC_HOSTDEAD)
1473 ip = 0;
1474 /* Source quench is deprecated */
1475 else if (cmd == PRC_QUENCH)
1476 return;
1477 else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)
1478 return;
1479 if (ip) {
1480 icp = (struct icmp *)((caddr_t)ip
1481 - offsetof(struct icmp, icmp_ip));
1482 th = (struct tcphdr *)((caddr_t)ip
1483 + (IP_VHL_HL(ip->ip_vhl) << 2));
1484 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1485 ip->ip_src, th->th_sport, 0, NULL);
1486 if (inp != NULL && inp->inp_socket != NULL) {
1487 tcp_lock(inp->inp_socket, 1, 0);
1488 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1489 tcp_unlock(inp->inp_socket, 1, 0);
1490 return;
1491 }
1492 icmp_tcp_seq = htonl(th->th_seq);
1493 tp = intotcpcb(inp);
1494 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
1495 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
1496 if (cmd == PRC_MSGSIZE) {
1497
1498 /*
1499 * MTU discovery:
1500 * If we got a needfrag and there is a host route to the
1501 * original destination, and the MTU is not locked, then
1502 * set the MTU in the route to the suggested new value
1503 * (if given) and then notify as usual. The ULPs will
1504 * notice that the MTU has changed and adapt accordingly.
1505 * If no new MTU was suggested, then we guess a new one
1506 * less than the current value. If the new MTU is
1507 * unreasonably small (defined by sysctl tcp_minmss), then
1508 * we reset the MTU to the interface value and enable the
1509 * lock bit, indicating that we are no longer doing MTU
1510 * discovery.
1511 */
1512 struct rtentry *rt;
1513 int mtu;
1514 struct sockaddr_in icmpsrc = { sizeof (struct sockaddr_in), AF_INET,
1515 0 , { 0 }, { 0,0,0,0,0,0,0,0 } };
1516 icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
1517
1518 rt = rtalloc1((struct sockaddr *)&icmpsrc, 0,
1519 RTF_CLONING | RTF_PRCLONING);
1520 if (rt != NULL) {
1521 RT_LOCK(rt);
1522 if ((rt->rt_flags & RTF_HOST) &&
1523 !(rt->rt_rmx.rmx_locks & RTV_MTU)) {
1524 mtu = ntohs(icp->icmp_nextmtu);
1525 if (!mtu)
1526 mtu = ip_next_mtu(rt->rt_rmx.
1527 rmx_mtu, 1);
1528 #if DEBUG_MTUDISC
1529 printf("MTU for %s reduced to %d\n",
1530 inet_ntop(AF_INET,
1531 &icmpsrc.sin_addr, ipv4str,
1532 sizeof (ipv4str)), mtu);
1533 #endif
1534 if (mtu < max(296, (tcp_minmss +
1535 sizeof (struct tcpiphdr)))) {
1536 /* rt->rt_rmx.rmx_mtu =
1537 rt->rt_ifp->if_mtu; */
1538 rt->rt_rmx.rmx_locks |= RTV_MTU;
1539 } else if (rt->rt_rmx.rmx_mtu > mtu) {
1540 rt->rt_rmx.rmx_mtu = mtu;
1541 }
1542 }
1543 RT_UNLOCK(rt);
1544 rtfree(rt);
1545 }
1546 }
1547
1548 (*notify)(inp, inetctlerrmap[cmd]);
1549 }
1550 tcp_unlock(inp->inp_socket, 1, 0);
1551 }
1552 } else
1553 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1554 }
1555
1556 #if INET6
1557 void
1558 tcp6_ctlinput(cmd, sa, d)
1559 int cmd;
1560 struct sockaddr *sa;
1561 void *d;
1562 {
1563 struct tcphdr th;
1564 void (*notify)(struct inpcb *, int) = tcp_notify;
1565 struct ip6_hdr *ip6;
1566 struct mbuf *m;
1567 struct ip6ctlparam *ip6cp = NULL;
1568 const struct sockaddr_in6 *sa6_src = NULL;
1569 int off;
1570 struct tcp_portonly {
1571 u_int16_t th_sport;
1572 u_int16_t th_dport;
1573 } *thp;
1574
1575 if (sa->sa_family != AF_INET6 ||
1576 sa->sa_len != sizeof(struct sockaddr_in6))
1577 return;
1578
1579 if (cmd == PRC_MSGSIZE)
1580 notify = tcp_mtudisc;
1581 else if (!PRC_IS_REDIRECT(cmd) &&
1582 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1583 return;
1584 /* Source quench is deprecated */
1585 else if (cmd == PRC_QUENCH)
1586 return;
1587
1588 /* if the parameter is from icmp6, decode it. */
1589 if (d != NULL) {
1590 ip6cp = (struct ip6ctlparam *)d;
1591 m = ip6cp->ip6c_m;
1592 ip6 = ip6cp->ip6c_ip6;
1593 off = ip6cp->ip6c_off;
1594 sa6_src = ip6cp->ip6c_src;
1595 } else {
1596 m = NULL;
1597 ip6 = NULL;
1598 off = 0; /* fool gcc */
1599 sa6_src = &sa6_any;
1600 }
1601
1602 if (ip6) {
1603 /*
1604 * XXX: We assume that when IPV6 is non NULL,
1605 * M and OFF are valid.
1606 */
1607
1608 /* check if we can safely examine src and dst ports */
1609 if (m->m_pkthdr.len < off + sizeof(*thp))
1610 return;
1611
1612 bzero(&th, sizeof(th));
1613 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1614
1615 in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1616 (struct sockaddr *)ip6cp->ip6c_src,
1617 th.th_sport, cmd, notify);
1618 } else {
1619 in6_pcbnotify(&tcbinfo, sa, 0,
1620 (struct sockaddr *)(size_t)sa6_src, 0, cmd, notify);
1621 }
1622 }
1623 #endif /* INET6 */
1624
1625
1626 /*
1627 * Following is where TCP initial sequence number generation occurs.
1628 *
1629 * There are two places where we must use initial sequence numbers:
1630 * 1. In SYN-ACK packets.
1631 * 2. In SYN packets.
1632 *
1633 * The ISNs in SYN-ACK packets have no monotonicity requirement,
1634 * and should be as unpredictable as possible to avoid the possibility
1635 * of spoofing and/or connection hijacking. To satisfy this
1636 * requirement, SYN-ACK ISNs are generated via the arc4random()
1637 * function. If exact RFC 1948 compliance is requested via sysctl,
1638 * these ISNs will be generated just like those in SYN packets.
1639 *
1640 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1641 * depends on this property. In addition, these ISNs should be
1642 * unguessable so as to prevent connection hijacking. To satisfy
1643 * the requirements of this situation, the algorithm outlined in
1644 * RFC 1948 is used to generate sequence numbers.
1645 *
1646 * For more information on the theory of operation, please see
1647 * RFC 1948.
1648 *
1649 * Implementation details:
1650 *
1651 * Time is based off the system timer, and is corrected so that it
1652 * increases by one megabyte per second. This allows for proper
1653 * recycling on high speed LANs while still leaving over an hour
1654 * before rollover.
1655 *
1656 * Two sysctls control the generation of ISNs:
1657 *
1658 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1659 * between seeding of isn_secret. This is normally set to zero,
1660 * as reseeding should not be necessary.
1661 *
1662 * net.inet.tcp.strict_rfc1948 controls whether RFC 1948 is followed
1663 * strictly. When strict compliance is requested, reseeding is
1664 * disabled and SYN-ACKs will be generated in the same manner as
1665 * SYNs. Strict mode is disabled by default.
1666 *
1667 */
1668
1669 #define ISN_BYTES_PER_SECOND 1048576
1670
1671 tcp_seq
1672 tcp_new_isn(tp)
1673 struct tcpcb *tp;
1674 {
1675 u_int32_t md5_buffer[4];
1676 tcp_seq new_isn;
1677 struct timeval timenow;
1678 u_char isn_secret[32];
1679 int isn_last_reseed = 0;
1680 MD5_CTX isn_ctx;
1681
1682 /* Use arc4random for SYN-ACKs when not in exact RFC1948 mode. */
1683 if (((tp->t_state == TCPS_LISTEN) || (tp->t_state == TCPS_TIME_WAIT))
1684 && tcp_strict_rfc1948 == 0)
1685 #ifdef __APPLE__
1686 return random();
1687 #else
1688 return arc4random();
1689 #endif
1690 getmicrotime(&timenow);
1691
1692 /* Seed if this is the first use, reseed if requested. */
1693 if ((isn_last_reseed == 0) ||
1694 ((tcp_strict_rfc1948 == 0) && (tcp_isn_reseed_interval > 0) &&
1695 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1696 < (u_int)timenow.tv_sec))) {
1697 #ifdef __APPLE__
1698 read_random(&isn_secret, sizeof(isn_secret));
1699 #else
1700 read_random_unlimited(&isn_secret, sizeof(isn_secret));
1701 #endif
1702 isn_last_reseed = timenow.tv_sec;
1703 }
1704
1705 /* Compute the md5 hash and return the ISN. */
1706 MD5Init(&isn_ctx);
1707 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1708 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1709 #if INET6
1710 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1711 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1712 sizeof(struct in6_addr));
1713 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1714 sizeof(struct in6_addr));
1715 } else
1716 #endif
1717 {
1718 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1719 sizeof(struct in_addr));
1720 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1721 sizeof(struct in_addr));
1722 }
1723 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1724 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1725 new_isn = (tcp_seq) md5_buffer[0];
1726 new_isn += timenow.tv_sec * (ISN_BYTES_PER_SECOND / hz);
1727 return new_isn;
1728 }
1729
1730
1731 /*
1732 * When a specific ICMP unreachable message is received and the
1733 * connection state is SYN-SENT, drop the connection. This behavior
1734 * is controlled by the icmp_may_rst sysctl.
1735 */
1736 void
1737 tcp_drop_syn_sent(inp, errno)
1738 struct inpcb *inp;
1739 int errno;
1740 {
1741 struct tcpcb *tp = intotcpcb(inp);
1742
1743 if (tp && tp->t_state == TCPS_SYN_SENT)
1744 tcp_drop(tp, errno);
1745 }
1746
1747 /*
1748 * When `need fragmentation' ICMP is received, update our idea of the MSS
1749 * based on the new value in the route. Also nudge TCP to send something,
1750 * since we know the packet we just sent was dropped.
1751 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1752 */
1753 void
1754 tcp_mtudisc(
1755 struct inpcb *inp,
1756 __unused int errno
1757 )
1758 {
1759 struct tcpcb *tp = intotcpcb(inp);
1760 struct rtentry *rt;
1761 struct rmxp_tao *taop;
1762 struct socket *so = inp->inp_socket;
1763 int offered;
1764 int mss;
1765 #if INET6
1766 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1767 #endif /* INET6 */
1768
1769 if (tp) {
1770 #if INET6
1771 if (isipv6)
1772 rt = tcp_rtlookup6(inp);
1773 else
1774 #endif /* INET6 */
1775 rt = tcp_rtlookup(inp, IFSCOPE_NONE);
1776 if (!rt || !rt->rt_rmx.rmx_mtu) {
1777 tp->t_maxopd = tp->t_maxseg =
1778 #if INET6
1779 isipv6 ? tcp_v6mssdflt :
1780 #endif /* INET6 */
1781 tcp_mssdflt;
1782
1783 /* Route locked during lookup above */
1784 if (rt != NULL)
1785 RT_UNLOCK(rt);
1786 return;
1787 }
1788 taop = rmx_taop(rt->rt_rmx);
1789 offered = taop->tao_mssopt;
1790 mss = rt->rt_rmx.rmx_mtu -
1791 #if INET6
1792 (isipv6 ?
1793 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1794 #endif /* INET6 */
1795 sizeof(struct tcpiphdr)
1796 #if INET6
1797 )
1798 #endif /* INET6 */
1799 ;
1800
1801 /* Route locked during lookup above */
1802 RT_UNLOCK(rt);
1803
1804 if (offered)
1805 mss = min(mss, offered);
1806 /*
1807 * XXX - The above conditional probably violates the TCP
1808 * spec. The problem is that, since we don't know the
1809 * other end's MSS, we are supposed to use a conservative
1810 * default. But, if we do that, then MTU discovery will
1811 * never actually take place, because the conservative
1812 * default is much less than the MTUs typically seen
1813 * on the Internet today. For the moment, we'll sweep
1814 * this under the carpet.
1815 *
1816 * The conservative default might not actually be a problem
1817 * if the only case this occurs is when sending an initial
1818 * SYN with options and data to a host we've never talked
1819 * to before. Then, they will reply with an MSS value which
1820 * will get recorded and the new parameters should get
1821 * recomputed. For Further Study.
1822 */
1823 if (tp->t_maxopd <= mss)
1824 return;
1825 tp->t_maxopd = mss;
1826
1827 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1828 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1829 mss -= TCPOLEN_TSTAMP_APPA;
1830
1831 if (so->so_snd.sb_hiwat < mss)
1832 mss = so->so_snd.sb_hiwat;
1833
1834 tp->t_maxseg = mss;
1835
1836 tcpstat.tcps_mturesent++;
1837 tp->t_rtttime = 0;
1838 tp->snd_nxt = tp->snd_una;
1839 tcp_output(tp);
1840 }
1841 }
1842
1843 /*
1844 * Look-up the routing entry to the peer of this inpcb. If no route
1845 * is found and it cannot be allocated the return NULL. This routine
1846 * is called by TCP routines that access the rmx structure and by tcp_mss
1847 * to get the interface MTU. If a route is found, this routine will
1848 * hold the rtentry lock; the caller is responsible for unlocking.
1849 */
1850 struct rtentry *
1851 tcp_rtlookup(inp, input_ifscope)
1852 struct inpcb *inp;
1853 unsigned int input_ifscope;
1854 {
1855 struct route *ro;
1856 struct rtentry *rt;
1857 struct tcpcb *tp;
1858
1859 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1860
1861 ro = &inp->inp_route;
1862 if ((rt = ro->ro_rt) != NULL)
1863 RT_LOCK(rt);
1864
1865 if (rt == NULL || !(rt->rt_flags & RTF_UP) ||
1866 rt->generation_id != route_generation) {
1867 /* No route yet, so try to acquire one */
1868 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1869 unsigned int ifscope;
1870
1871 ro->ro_dst.sa_family = AF_INET;
1872 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1873 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
1874 inp->inp_faddr;
1875
1876 /*
1877 * If the socket was bound to an interface, then
1878 * the bound-to-interface takes precedence over
1879 * the inbound interface passed in by the caller
1880 * (if we get here as part of the output path then
1881 * input_ifscope is IFSCOPE_NONE).
1882 */
1883 ifscope = (inp->inp_flags & INP_BOUND_IF) ?
1884 inp->inp_boundif : input_ifscope;
1885
1886 if (rt != NULL)
1887 RT_UNLOCK(rt);
1888 rtalloc_scoped_ign(ro, 0, ifscope);
1889 if ((rt = ro->ro_rt) != NULL)
1890 RT_LOCK(rt);
1891 }
1892 }
1893
1894 /*
1895 * Update MTU discovery determination. Don't do it if:
1896 * 1) it is disabled via the sysctl
1897 * 2) the route isn't up
1898 * 3) the MTU is locked (if it is, then discovery has been
1899 * disabled)
1900 */
1901
1902 tp = intotcpcb(inp);
1903
1904 if (!path_mtu_discovery || ((rt != NULL) &&
1905 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
1906 tp->t_flags &= ~TF_PMTUD;
1907 else
1908 tp->t_flags |= TF_PMTUD;
1909
1910 #if CONFIG_IFEF_NOWINDOWSCALE
1911 if (tcp_obey_ifef_nowindowscale &&
1912 tp->t_state == TCPS_SYN_SENT && rt != NULL && rt->rt_ifp != NULL &&
1913 (rt->rt_ifp->if_eflags & IFEF_NOWINDOWSCALE)) {
1914 /* Window scaling is enabled on this interface */
1915 tp->t_flags &= ~TF_REQ_SCALE;
1916 }
1917 #endif
1918
1919 if (rt != NULL && rt->rt_ifp != NULL) {
1920 somultipages(inp->inp_socket,
1921 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
1922 tcp_set_tso(tp, rt->rt_ifp);
1923 }
1924
1925 /*
1926 * Caller needs to call RT_UNLOCK(rt).
1927 */
1928 return rt;
1929 }
1930
1931 #if INET6
1932 struct rtentry *
1933 tcp_rtlookup6(inp)
1934 struct inpcb *inp;
1935 {
1936 struct route_in6 *ro6;
1937 struct rtentry *rt;
1938 struct tcpcb *tp;
1939
1940 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1941
1942 ro6 = &inp->in6p_route;
1943 if ((rt = ro6->ro_rt) != NULL)
1944 RT_LOCK(rt);
1945
1946 if (rt == NULL || !(rt->rt_flags & RTF_UP) ||
1947 rt->generation_id != route_generation) {
1948 /* No route yet, so try to acquire one */
1949 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
1950 struct sockaddr_in6 *dst6;
1951
1952 dst6 = (struct sockaddr_in6 *)&ro6->ro_dst;
1953 dst6->sin6_family = AF_INET6;
1954 dst6->sin6_len = sizeof(*dst6);
1955 dst6->sin6_addr = inp->in6p_faddr;
1956 if (rt != NULL)
1957 RT_UNLOCK(rt);
1958 rtalloc_ign((struct route *)ro6, 0);
1959 if ((rt = ro6->ro_rt) != NULL)
1960 RT_LOCK(rt);
1961 }
1962 }
1963 /*
1964 * Update path MTU Discovery determination
1965 * while looking up the route:
1966 * 1) we have a valid route to the destination
1967 * 2) the MTU is not locked (if it is, then discovery has been
1968 * disabled)
1969 */
1970
1971
1972 tp = intotcpcb(inp);
1973
1974 /*
1975 * Update MTU discovery determination. Don't do it if:
1976 * 1) it is disabled via the sysctl
1977 * 2) the route isn't up
1978 * 3) the MTU is locked (if it is, then discovery has been
1979 * disabled)
1980 */
1981
1982 if (!path_mtu_discovery || ((rt != NULL) &&
1983 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
1984 tp->t_flags &= ~TF_PMTUD;
1985 else
1986 tp->t_flags |= TF_PMTUD;
1987
1988 #if CONFIG_IFEF_NOWINDOWSCALE
1989 if (tcp_obey_ifef_nowindowscale &&
1990 tp->t_state == TCPS_SYN_SENT && rt != NULL && rt->rt_ifp != NULL &&
1991 (rt->rt_ifp->if_eflags & IFEF_NOWINDOWSCALE)) {
1992 /* Window scaling is not enabled on this interface */
1993 tp->t_flags &= ~TF_REQ_SCALE;
1994 }
1995 #endif
1996
1997 if (rt != NULL && rt->rt_ifp != NULL) {
1998 somultipages(inp->inp_socket,
1999 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
2000 tcp_set_tso(tp, rt->rt_ifp);
2001 }
2002
2003 /*
2004 * Caller needs to call RT_UNLOCK(rt).
2005 */
2006 return rt;
2007 }
2008 #endif /* INET6 */
2009
2010 #if IPSEC
2011 /* compute ESP/AH header size for TCP, including outer IP header. */
2012 size_t
2013 ipsec_hdrsiz_tcp(tp)
2014 struct tcpcb *tp;
2015 {
2016 struct inpcb *inp;
2017 struct mbuf *m;
2018 size_t hdrsiz;
2019 struct ip *ip;
2020 #if INET6
2021 struct ip6_hdr *ip6 = NULL;
2022 #endif /* INET6 */
2023 struct tcphdr *th;
2024
2025 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
2026 return 0;
2027 MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */
2028 if (!m)
2029 return 0;
2030
2031 #if INET6
2032 if ((inp->inp_vflag & INP_IPV6) != 0) {
2033 ip6 = mtod(m, struct ip6_hdr *);
2034 th = (struct tcphdr *)(ip6 + 1);
2035 m->m_pkthdr.len = m->m_len =
2036 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
2037 tcp_fillheaders(tp, ip6, th);
2038 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
2039 } else
2040 #endif /* INET6 */
2041 {
2042 ip = mtod(m, struct ip *);
2043 th = (struct tcphdr *)(ip + 1);
2044 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
2045 tcp_fillheaders(tp, ip, th);
2046 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
2047 }
2048 m_free(m);
2049 return hdrsiz;
2050 }
2051 #endif /*IPSEC*/
2052
2053 /*
2054 * Return a pointer to the cached information about the remote host.
2055 * The cached information is stored in the protocol specific part of
2056 * the route metrics.
2057 */
2058 struct rmxp_tao *
2059 tcp_gettaocache(inp)
2060 struct inpcb *inp;
2061 {
2062 struct rtentry *rt;
2063 struct rmxp_tao *taop;
2064
2065 #if INET6
2066 if ((inp->inp_vflag & INP_IPV6) != 0)
2067 rt = tcp_rtlookup6(inp);
2068 else
2069 #endif /* INET6 */
2070 rt = tcp_rtlookup(inp, IFSCOPE_NONE);
2071
2072 /* Make sure this is a host route and is up. */
2073 if (rt == NULL ||
2074 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) {
2075 /* Route locked during lookup above */
2076 if (rt != NULL)
2077 RT_UNLOCK(rt);
2078 return NULL;
2079 }
2080
2081 taop = rmx_taop(rt->rt_rmx);
2082 /* Route locked during lookup above */
2083 RT_UNLOCK(rt);
2084 return (taop);
2085 }
2086
2087 /*
2088 * Clear all the TAO cache entries, called from tcp_init.
2089 *
2090 * XXX
2091 * This routine is just an empty one, because we assume that the routing
2092 * routing tables are initialized at the same time when TCP, so there is
2093 * nothing in the cache left over.
2094 */
2095 static void
2096 tcp_cleartaocache()
2097 {
2098 }
2099
2100 int
2101 tcp_lock(struct socket *so, int refcount, void *lr)
2102 {
2103 void *lr_saved;
2104
2105 if (lr == NULL)
2106 lr_saved = __builtin_return_address(0);
2107 else
2108 lr_saved = lr;
2109
2110 if (so->so_pcb != NULL) {
2111 lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
2112 } else {
2113 panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s\n",
2114 so, lr_saved, solockhistory_nr(so));
2115 /* NOTREACHED */
2116 }
2117
2118 if (so->so_usecount < 0) {
2119 panic("tcp_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
2120 so, so->so_pcb, lr_saved, so->so_usecount, solockhistory_nr(so));
2121 /* NOTREACHED */
2122 }
2123 if (refcount)
2124 so->so_usecount++;
2125 so->lock_lr[so->next_lock_lr] = lr_saved;
2126 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
2127 return (0);
2128 }
2129
2130 int
2131 tcp_unlock(struct socket *so, int refcount, void *lr)
2132 {
2133 void *lr_saved;
2134
2135 if (lr == NULL)
2136 lr_saved = __builtin_return_address(0);
2137 else
2138 lr_saved = lr;
2139
2140 #ifdef MORE_TCPLOCK_DEBUG
2141 printf("tcp_unlock: so=%p sopcb=%p lock=%p ref=%x lr=%p\n",
2142 so, so->so_pcb, ((struct inpcb *)so->so_pcb)->inpcb_mtx,
2143 so->so_usecount, lr_saved);
2144 #endif
2145 if (refcount)
2146 so->so_usecount--;
2147
2148 if (so->so_usecount < 0) {
2149 panic("tcp_unlock: so=%p usecount=%x lrh= %s\n",
2150 so, so->so_usecount, solockhistory_nr(so));
2151 /* NOTREACHED */
2152 }
2153 if (so->so_pcb == NULL) {
2154 panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2155 so, so->so_usecount, lr_saved, solockhistory_nr(so));
2156 /* NOTREACHED */
2157 } else {
2158 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx,
2159 LCK_MTX_ASSERT_OWNED);
2160 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2161 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
2162 lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
2163 }
2164 return (0);
2165 }
2166
2167 lck_mtx_t *
2168 tcp_getlock(
2169 struct socket *so,
2170 __unused int locktype)
2171 {
2172 struct inpcb *inp = sotoinpcb(so);
2173
2174 if (so->so_pcb) {
2175 if (so->so_usecount < 0)
2176 panic("tcp_getlock: so=%p usecount=%x lrh= %s\n",
2177 so, so->so_usecount, solockhistory_nr(so));
2178 return(inp->inpcb_mtx);
2179 }
2180 else {
2181 panic("tcp_getlock: so=%p NULL so_pcb %s\n",
2182 so, solockhistory_nr(so));
2183 return (so->so_proto->pr_domain->dom_mtx);
2184 }
2185 }
2186
2187 int32_t
2188 tcp_sbspace(struct tcpcb *tp)
2189 {
2190 struct sockbuf *sb = &tp->t_inpcb->inp_socket->so_rcv;
2191 int32_t space, newspace;
2192
2193 space = ((int32_t) imin((sb->sb_hiwat - sb->sb_cc),
2194 (sb->sb_mbmax - sb->sb_mbcnt)));
2195 if (space < 0)
2196 space = 0;
2197
2198 #if TRAFFIC_MGT
2199 if (tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND) {
2200 if (tcp_background_io_enabled &&
2201 tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_SUPPRESSED) {
2202 tp->t_flags |= TF_RXWIN0SENT;
2203 return 0; /* Triggers TCP window closing by responding there is no space */
2204 }
2205 }
2206 #endif /* TRAFFIC_MGT */
2207
2208 /* Avoid inscreasing window size if the current window
2209 * is already very low, we could be in "persist" mode and
2210 * we could break some apps (see rdar://5409343)
2211 */
2212
2213 if (space < tp->t_maxseg)
2214 return space;
2215
2216 /* Clip window size for slower link */
2217
2218 if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0 )
2219 return imin(space, slowlink_wsize);
2220
2221 /*
2222 * Check for ressources constraints before over-ajusting the amount of space we can
2223 * advertise in the TCP window size updates.
2224 */
2225
2226 if (sbspace_factor && (tp->t_inpcb->inp_pcbinfo->ipi_count < tcp_sockthreshold) &&
2227 (total_mb_cnt / 8) < (mbstat.m_clusters / sbspace_factor)) {
2228 if (space < (int32_t)(sb->sb_maxused - sb->sb_cc)) {/* make sure we don't constrain the window if we have enough ressources */
2229 space = (int32_t) imax((sb->sb_maxused - sb->sb_cc), tp->rcv_maxbyps);
2230 }
2231 newspace = (int32_t) imax(((int32_t)sb->sb_maxused - sb->sb_cc), (int32_t)tp->rcv_maxbyps);
2232
2233 if (newspace > space)
2234 space = newspace;
2235 }
2236 return space;
2237 }
2238 /*
2239 * Checks TCP Segment Offloading capability for a given connection and interface pair.
2240 */
2241 void
2242 tcp_set_tso(tp, ifp)
2243 struct tcpcb *tp;
2244 struct ifnet *ifp;
2245 {
2246 #if INET6
2247 struct inpcb *inp = tp->t_inpcb;
2248 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
2249
2250 if (isipv6) {
2251 /*
2252 * Radar 6921834: Disable TSO IPv6 because there is no support
2253 * for TSO & HW checksum in ip6_output yet
2254 */
2255 #if 0
2256 if (ifp && ifp->if_hwassist & IFNET_TSO_IPV6) {
2257 tp->t_flags |= TF_TSO;
2258 if (ifp->if_tso_v6_mtu != 0)
2259 tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
2260 else
2261 tp->tso_max_segment_size = TCP_MAXWIN;
2262 } else
2263 tp->t_flags &= ~TF_TSO;
2264
2265 #endif
2266 } else
2267 #endif /* INET6 */
2268
2269 {
2270 if (ifp && ifp->if_hwassist & IFNET_TSO_IPV4) {
2271 tp->t_flags |= TF_TSO;
2272 if (ifp->if_tso_v4_mtu != 0)
2273 tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
2274 else
2275 tp->tso_max_segment_size = TCP_MAXWIN;
2276 } else
2277 tp->t_flags &= ~TF_TSO;
2278 }
2279 }
2280 /* DSEP Review Done pl-20051213-v02 @3253,@3391,@3400 */