]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_subr.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_subr.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.22 2001/08/22 00:59:12 silby Exp $
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/callout.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <kern/locks.h>
86 #include <kern/zalloc.h>
87
88 #include <net/route.h>
89 #include <net/if.h>
90
91 #define tcp_minmssoverload fring
92 #define _IP_VHL
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_icmp.h>
97 #if INET6
98 #include <netinet/ip6.h>
99 #endif
100 #include <netinet/in_pcb.h>
101 #if INET6
102 #include <netinet6/in6_pcb.h>
103 #endif
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 #include <netinet/icmp_var.h>
107 #if INET6
108 #include <netinet6/ip6_var.h>
109 #endif
110 #include <netinet/tcp.h>
111 #include <netinet/tcp_fsm.h>
112 #include <netinet/tcp_seq.h>
113 #include <netinet/tcp_timer.h>
114 #include <netinet/tcp_var.h>
115 #if INET6
116 #include <netinet6/tcp6_var.h>
117 #endif
118 #include <netinet/tcpip.h>
119 #if TCPDEBUG
120 #include <netinet/tcp_debug.h>
121 #endif
122 #include <netinet6/ip6protosw.h>
123
124 #if IPSEC
125 #include <netinet6/ipsec.h>
126 #if INET6
127 #include <netinet6/ipsec6.h>
128 #endif
129 #endif /*IPSEC*/
130
131 #undef tcp_minmssoverload
132
133 #if CONFIG_MACF_NET
134 #include <security/mac_framework.h>
135 #endif /* MAC_NET */
136
137 #include <libkern/crypto/md5.h>
138 #include <sys/kdebug.h>
139
140 #define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
141
142 extern int tcp_lq_overflow;
143
144 /* temporary: for testing */
145 #if IPSEC
146 extern int ipsec_bypass;
147 #endif
148
149 int tcp_mssdflt = TCP_MSS;
150 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
151 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
152
153 #if INET6
154 int tcp_v6mssdflt = TCP6_MSS;
155 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
156 CTLFLAG_RW, &tcp_v6mssdflt , 0,
157 "Default TCP Maximum Segment Size for IPv6");
158 #endif
159
160 /*
161 * Minimum MSS we accept and use. This prevents DoS attacks where
162 * we are forced to a ridiculous low MSS like 20 and send hundreds
163 * of packets instead of one. The effect scales with the available
164 * bandwidth and quickly saturates the CPU and network interface
165 * with packet generation and sending. Set to zero to disable MINMSS
166 * checking. This setting prevents us from sending too small packets.
167 */
168 int tcp_minmss = TCP_MINMSS;
169 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
170 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
171
172 /*
173 * Number of TCP segments per second we accept from remote host
174 * before we start to calculate average segment size. If average
175 * segment size drops below the minimum TCP MSS we assume a DoS
176 * attack and reset+drop the connection. Care has to be taken not to
177 * set this value too small to not kill interactive type connections
178 * (telnet, SSH) which send many small packets.
179 */
180 #ifdef FIX_WORKAROUND_FOR_3894301
181 __private_extern__ int tcp_minmssoverload = TCP_MINMSSOVERLOAD;
182 #else
183 __private_extern__ int tcp_minmssoverload = 0;
184 #endif
185 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
186 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
187 "be under the MINMSS Size");
188
189 static int tcp_do_rfc1323 = 1;
190 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
191 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
192
193 static int tcp_do_rfc1644 = 0;
194 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
195 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
196
197 static int do_tcpdrain = 0;
198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
199 "Enable tcp_drain routine for extra help when low on mbufs");
200
201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
202 &tcbinfo.ipi_count, 0, "Number of active PCBs");
203
204 static int icmp_may_rst = 1;
205 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
206 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
207
208 static int tcp_strict_rfc1948 = 0;
209 SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW,
210 &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
211
212 static int tcp_isn_reseed_interval = 0;
213 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
214 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
215 static int tcp_background_io_enabled = 1;
216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_io_enabled, CTLFLAG_RW,
217 &tcp_background_io_enabled, 0, "Background IO Enabled");
218
219 int tcp_TCPTV_MIN = 1;
220 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_min, CTLFLAG_RW,
221 &tcp_TCPTV_MIN, 0, "min rtt value allowed");
222
223 __private_extern__ int tcp_use_randomport = 0;
224 SYSCTL_INT(_net_inet_tcp, OID_AUTO, randomize_ports, CTLFLAG_RW,
225 &tcp_use_randomport, 0, "Randomize TCP port numbers");
226
227 static void tcp_cleartaocache(void);
228 static void tcp_notify(struct inpcb *, int);
229 struct zone *sack_hole_zone;
230
231 extern unsigned int total_mb_cnt;
232 extern unsigned int total_cl_cnt;
233 extern int sbspace_factor;
234 extern int tcp_sockthreshold;
235 extern int slowlink_wsize; /* window correction for slow links */
236 extern int path_mtu_discovery;
237
238
239 /*
240 * Target size of TCP PCB hash tables. Must be a power of two.
241 *
242 * Note that this can be overridden by the kernel environment
243 * variable net.inet.tcp.tcbhashsize
244 */
245 #ifndef TCBHASHSIZE
246 #define TCBHASHSIZE CONFIG_TCBHASHSIZE
247 #endif
248
249 __private_extern__ int tcp_tcbhashsize = TCBHASHSIZE;
250 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
251 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
252
253 /*
254 * This is the actual shape of what we allocate using the zone
255 * allocator. Doing it this way allows us to protect both structures
256 * using the same generation count, and also eliminates the overhead
257 * of allocating tcpcbs separately. By hiding the structure here,
258 * we avoid changing most of the rest of the code (although it needs
259 * to be changed, eventually, for greater efficiency).
260 */
261 #define ALIGNMENT 32
262 #define ALIGNM1 (ALIGNMENT - 1)
263 struct inp_tp {
264 union {
265 struct inpcb inp;
266 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
267 } inp_tp_u;
268 struct tcpcb tcb;
269 };
270 #undef ALIGNMENT
271 #undef ALIGNM1
272
273 extern struct inpcbhead time_wait_slots[];
274 extern u_int32_t *delack_bitmask;
275
276 int get_inpcb_str_size(void);
277 int get_tcp_str_size(void);
278
279 static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
280
281 int get_inpcb_str_size(void)
282 {
283 return sizeof(struct inpcb);
284 }
285
286
287 int get_tcp_str_size(void)
288 {
289 return sizeof(struct tcpcb);
290 }
291
292 int tcp_freeq(struct tcpcb *tp);
293
294
295 /*
296 * Tcp initialization
297 */
298 void
299 tcp_init()
300 {
301 vm_size_t str_size;
302 int i;
303 struct inpcbinfo *pcbinfo;
304
305 tcp_ccgen = 1;
306 tcp_cleartaocache();
307
308 tcp_keepinit = TCPTV_KEEP_INIT;
309 tcp_keepidle = TCPTV_KEEP_IDLE;
310 tcp_keepintvl = TCPTV_KEEPINTVL;
311 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
312 tcp_msl = TCPTV_MSL;
313 read_random(&tcp_now, sizeof(tcp_now));
314 tcp_now = tcp_now & 0x3fffffff; /* Starts tcp internal 100ms clock at a random value */
315
316
317 LIST_INIT(&tcb);
318 tcbinfo.listhead = &tcb;
319 pcbinfo = &tcbinfo;
320 if (!powerof2(tcp_tcbhashsize)) {
321 printf("WARNING: TCB hash size not a power of 2\n");
322 tcp_tcbhashsize = 512; /* safe default */
323 }
324 tcbinfo.hashsize = tcp_tcbhashsize;
325 tcbinfo.hashbase = hashinit(tcp_tcbhashsize, M_PCB, &tcbinfo.hashmask);
326 tcbinfo.porthashbase = hashinit(tcp_tcbhashsize, M_PCB,
327 &tcbinfo.porthashmask);
328 str_size = (vm_size_t) sizeof(struct inp_tp);
329 tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "tcpcb");
330 sack_hole_zone = zinit(str_size, 120000*str_size, 8192, "sack_hole zone");
331 tcp_reass_maxseg = nmbclusters / 16;
332
333 #if INET6
334 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
335 #else /* INET6 */
336 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
337 #endif /* INET6 */
338 if (max_protohdr < TCP_MINPROTOHDR)
339 max_protohdr = TCP_MINPROTOHDR;
340 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
341 panic("tcp_init");
342 #undef TCP_MINPROTOHDR
343
344 /*
345 * allocate lock group attribute and group for tcp pcb mutexes
346 */
347 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
348 pcbinfo->mtx_grp = lck_grp_alloc_init("tcppcb", pcbinfo->mtx_grp_attr);
349
350 /*
351 * allocate the lock attribute for tcp pcb mutexes
352 */
353 pcbinfo->mtx_attr = lck_attr_alloc_init();
354
355 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL) {
356 printf("tcp_init: mutex not alloced!\n");
357 return; /* pretty much dead if this fails... */
358 }
359
360 delack_bitmask = _MALLOC((4 * tcp_tcbhashsize)/32, M_PCB, M_WAITOK);
361 if (delack_bitmask == 0)
362 panic("Delack Memory");
363
364 for (i=0; i < (tcbinfo.hashsize / 32); i++)
365 delack_bitmask[i] = 0;
366
367 for (i=0; i < N_TIME_WAIT_SLOTS; i++) {
368 LIST_INIT(&time_wait_slots[i]);
369 }
370
371 timeout(tcp_fasttimo, NULL, hz/TCP_RETRANSHZ);
372 }
373
374 /*
375 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
376 * tcp_template used to store this data in mbufs, but we now recopy it out
377 * of the tcpcb each time to conserve mbufs.
378 */
379 void
380 tcp_fillheaders(tp, ip_ptr, tcp_ptr)
381 struct tcpcb *tp;
382 void *ip_ptr;
383 void *tcp_ptr;
384 {
385 struct inpcb *inp = tp->t_inpcb;
386 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
387
388 #if INET6
389 if ((inp->inp_vflag & INP_IPV6) != 0) {
390 struct ip6_hdr *ip6;
391
392 ip6 = (struct ip6_hdr *)ip_ptr;
393 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
394 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
395 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
396 (IPV6_VERSION & IPV6_VERSION_MASK);
397 ip6->ip6_nxt = IPPROTO_TCP;
398 ip6->ip6_plen = sizeof(struct tcphdr);
399 ip6->ip6_src = inp->in6p_laddr;
400 ip6->ip6_dst = inp->in6p_faddr;
401 tcp_hdr->th_sum = 0;
402 } else
403 #endif
404 {
405 struct ip *ip = (struct ip *) ip_ptr;
406
407 ip->ip_vhl = IP_VHL_BORING;
408 ip->ip_tos = 0;
409 ip->ip_len = 0;
410 ip->ip_id = 0;
411 ip->ip_off = 0;
412 ip->ip_ttl = 0;
413 ip->ip_sum = 0;
414 ip->ip_p = IPPROTO_TCP;
415 ip->ip_src = inp->inp_laddr;
416 ip->ip_dst = inp->inp_faddr;
417 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
418 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
419 }
420
421 tcp_hdr->th_sport = inp->inp_lport;
422 tcp_hdr->th_dport = inp->inp_fport;
423 tcp_hdr->th_seq = 0;
424 tcp_hdr->th_ack = 0;
425 tcp_hdr->th_x2 = 0;
426 tcp_hdr->th_off = 5;
427 tcp_hdr->th_flags = 0;
428 tcp_hdr->th_win = 0;
429 tcp_hdr->th_urp = 0;
430 }
431
432 /*
433 * Create template to be used to send tcp packets on a connection.
434 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
435 * use for this function is in keepalives, which use tcp_respond.
436 */
437 struct tcptemp *
438 tcp_maketemplate(tp)
439 struct tcpcb *tp;
440 {
441 struct mbuf *m;
442 struct tcptemp *n;
443
444 m = m_get(M_DONTWAIT, MT_HEADER);
445 if (m == NULL)
446 return (0);
447 m->m_len = sizeof(struct tcptemp);
448 n = mtod(m, struct tcptemp *);
449
450 tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
451 return (n);
452 }
453
454 /*
455 * Send a single message to the TCP at address specified by
456 * the given TCP/IP header. If m == 0, then we make a copy
457 * of the tcpiphdr at ti and send directly to the addressed host.
458 * This is used to force keep alive messages out using the TCP
459 * template for a connection. If flags are given then we send
460 * a message back to the TCP which originated the * segment ti,
461 * and discard the mbuf containing it and any other attached mbufs.
462 *
463 * In any case the ack and sequence number of the transmitted
464 * segment are as specified by the parameters.
465 *
466 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
467 */
468 void
469 tcp_respond(
470 struct tcpcb *tp,
471 void *ipgen,
472 register struct tcphdr *th,
473 register struct mbuf *m,
474 tcp_seq ack,
475 tcp_seq seq,
476 int flags,
477 unsigned int ifscope
478 )
479 {
480 register int tlen;
481 int win = 0;
482 struct route *ro = 0;
483 struct route sro;
484 struct ip *ip;
485 struct tcphdr *nth;
486 #if INET6
487 struct route_in6 *ro6 = 0;
488 struct route_in6 sro6;
489 struct ip6_hdr *ip6;
490 int isipv6;
491 #endif /* INET6 */
492
493 #if INET6
494 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
495 ip6 = ipgen;
496 #endif /* INET6 */
497 ip = ipgen;
498
499 if (tp) {
500 if (!(flags & TH_RST)) {
501 win = tcp_sbspace(tp);
502 if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale)
503 win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
504 }
505 #if INET6
506 if (isipv6)
507 ro6 = &tp->t_inpcb->in6p_route;
508 else
509 #endif /* INET6 */
510 ro = &tp->t_inpcb->inp_route;
511 } else {
512 #if INET6
513 if (isipv6) {
514 ro6 = &sro6;
515 bzero(ro6, sizeof *ro6);
516 } else
517 #endif /* INET6 */
518 {
519 ro = &sro;
520 bzero(ro, sizeof *ro);
521 }
522 }
523 if (m == 0) {
524 m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */
525 if (m == NULL)
526 return;
527 tlen = 0;
528 m->m_data += max_linkhdr;
529 #if INET6
530 if (isipv6) {
531 bcopy((caddr_t)ip6, mtod(m, caddr_t),
532 sizeof(struct ip6_hdr));
533 ip6 = mtod(m, struct ip6_hdr *);
534 nth = (struct tcphdr *)(ip6 + 1);
535 } else
536 #endif /* INET6 */
537 {
538 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
539 ip = mtod(m, struct ip *);
540 nth = (struct tcphdr *)(ip + 1);
541 }
542 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
543 flags = TH_ACK;
544 } else {
545 m_freem(m->m_next);
546 m->m_next = 0;
547 m->m_data = (caddr_t)ipgen;
548 /* m_len is set later */
549 tlen = 0;
550 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
551 #if INET6
552 if (isipv6) {
553 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
554 nth = (struct tcphdr *)(ip6 + 1);
555 } else
556 #endif /* INET6 */
557 {
558 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
559 nth = (struct tcphdr *)(ip + 1);
560 }
561 if (th != nth) {
562 /*
563 * this is usually a case when an extension header
564 * exists between the IPv6 header and the
565 * TCP header.
566 */
567 nth->th_sport = th->th_sport;
568 nth->th_dport = th->th_dport;
569 }
570 xchg(nth->th_dport, nth->th_sport, n_short);
571 #undef xchg
572 }
573 #if INET6
574 if (isipv6) {
575 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
576 tlen));
577 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
578 } else
579 #endif
580 {
581 tlen += sizeof (struct tcpiphdr);
582 ip->ip_len = tlen;
583 ip->ip_ttl = ip_defttl;
584 }
585 m->m_len = tlen;
586 m->m_pkthdr.len = tlen;
587 m->m_pkthdr.rcvif = 0;
588 #if CONFIG_MACF_NET
589 if (tp != NULL && tp->t_inpcb != NULL) {
590 /*
591 * Packet is associated with a socket, so allow the
592 * label of the response to reflect the socket label.
593 */
594 mac_mbuf_label_associate_inpcb(tp->t_inpcb, m);
595 } else {
596 /*
597 * Packet is not associated with a socket, so possibly
598 * update the label in place.
599 */
600 mac_netinet_tcp_reply(m);
601 }
602 #endif
603
604 nth->th_seq = htonl(seq);
605 nth->th_ack = htonl(ack);
606 nth->th_x2 = 0;
607 nth->th_off = sizeof (struct tcphdr) >> 2;
608 nth->th_flags = flags;
609 if (tp)
610 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
611 else
612 nth->th_win = htons((u_short)win);
613 nth->th_urp = 0;
614 #if INET6
615 if (isipv6) {
616 nth->th_sum = 0;
617 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
618 sizeof(struct ip6_hdr),
619 tlen - sizeof(struct ip6_hdr));
620 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
621 ro6 && ro6->ro_rt ?
622 ro6->ro_rt->rt_ifp :
623 NULL);
624 } else
625 #endif /* INET6 */
626 {
627 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
628 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
629 m->m_pkthdr.csum_flags = CSUM_TCP;
630 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
631 }
632 #if TCPDEBUG
633 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
634 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
635 #endif
636 #if IPSEC
637 if (ipsec_bypass == 0 && ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
638 m_freem(m);
639 return;
640 }
641 #endif
642 #if PKT_PRIORITY
643 if (tp != NULL)
644 set_traffic_class(m, tp->t_inpcb->inp_socket, MBUF_TC_NONE);
645 #endif /* PKT_PRIORITY */
646 #if INET6
647 if (isipv6) {
648 (void)ip6_output(m, NULL, ro6, 0, NULL, NULL, 0);
649 if (ro6 == &sro6 && ro6->ro_rt) {
650 rtfree(ro6->ro_rt);
651 ro6->ro_rt = NULL;
652 }
653 } else
654 #endif /* INET6 */
655 {
656 struct ip_out_args ipoa = { ifscope };
657
658 if (ro != &sro) {
659 /* Copy the cached route and take an extra reference */
660 inp_route_copyout(tp->t_inpcb, &sro);
661 }
662 /*
663 * For consistency, pass a local route copy.
664 */
665 (void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
666
667 if (ro != &sro) {
668 /* Synchronize cached PCB route */
669 inp_route_copyin(tp->t_inpcb, &sro);
670 } else if (sro.ro_rt != NULL) {
671 rtfree(sro.ro_rt);
672 }
673 }
674 }
675
676 /*
677 * Create a new TCP control block, making an
678 * empty reassembly queue and hooking it to the argument
679 * protocol control block. The `inp' parameter must have
680 * come from the zone allocator set up in tcp_init().
681 */
682 struct tcpcb *
683 tcp_newtcpcb(inp)
684 struct inpcb *inp;
685 {
686 struct inp_tp *it;
687 register struct tcpcb *tp;
688 register struct socket *so = inp->inp_socket;
689 #if INET6
690 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
691 #endif /* INET6 */
692
693 if (so->cached_in_sock_layer == 0) {
694 it = (struct inp_tp *)inp;
695 tp = &it->tcb;
696 }
697 else
698 tp = (struct tcpcb *) inp->inp_saved_ppcb;
699
700 bzero((char *) tp, sizeof(struct tcpcb));
701 LIST_INIT(&tp->t_segq);
702 tp->t_maxseg = tp->t_maxopd =
703 #if INET6
704 isipv6 ? tcp_v6mssdflt :
705 #endif /* INET6 */
706 tcp_mssdflt;
707
708 if (tcp_do_rfc1323)
709 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
710 tp->sack_enable = tcp_do_sack;
711 TAILQ_INIT(&tp->snd_holes);
712 tp->t_inpcb = inp; /* XXX */
713 /*
714 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
715 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
716 * reasonable initial retransmit time.
717 */
718 tp->t_srtt = TCPTV_SRTTBASE;
719 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
720 tp->t_rttmin = tcp_TCPTV_MIN;
721 tp->t_rxtcur = TCPTV_RTOBASE;
722 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
723 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
724 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
725 tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
726 tp->t_rcvtime = 0;
727 tp->t_bw_rtttime = 0;
728 /*
729 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
730 * because the socket may be bound to an IPv6 wildcard address,
731 * which may match an IPv4-mapped IPv6 address.
732 */
733 inp->inp_ip_ttl = ip_defttl;
734 inp->inp_ppcb = (caddr_t)tp;
735 return (tp); /* XXX */
736 }
737
738 /*
739 * Drop a TCP connection, reporting
740 * the specified error. If connection is synchronized,
741 * then send a RST to peer.
742 */
743 struct tcpcb *
744 tcp_drop(tp, errno)
745 register struct tcpcb *tp;
746 int errno;
747 {
748 struct socket *so = tp->t_inpcb->inp_socket;
749
750 if (TCPS_HAVERCVDSYN(tp->t_state)) {
751 tp->t_state = TCPS_CLOSED;
752 (void) tcp_output(tp);
753 tcpstat.tcps_drops++;
754 } else
755 tcpstat.tcps_conndrops++;
756 if (errno == ETIMEDOUT && tp->t_softerror)
757 errno = tp->t_softerror;
758 so->so_error = errno;
759 return (tcp_close(tp));
760 }
761
762 /*
763 * Close a TCP control block:
764 * discard all space held by the tcp
765 * discard internet protocol block
766 * wake up any sleepers
767 */
768 struct tcpcb *
769 tcp_close(tp)
770 register struct tcpcb *tp;
771 {
772 struct inpcb *inp = tp->t_inpcb;
773 struct socket *so = inp->inp_socket;
774 #if INET6
775 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
776 #endif /* INET6 */
777 struct rtentry *rt;
778 int dosavessthresh;
779
780 if ( inp->inp_ppcb == NULL) /* tcp_close was called previously, bail */
781 return NULL;
782
783 /* Clear the timers before we delete the PCB. */
784 {
785 int i;
786 for (i = 0; i < TCPT_NTIMERS; i++) {
787 tp->t_timer[i] = 0;
788 }
789 }
790
791 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0);
792 switch (tp->t_state)
793 {
794 case TCPS_ESTABLISHED:
795 case TCPS_FIN_WAIT_1:
796 case TCPS_CLOSING:
797 case TCPS_CLOSE_WAIT:
798 case TCPS_LAST_ACK:
799 break;
800 }
801
802 /*
803 * If another thread for this tcp is currently in ip (indicated by
804 * the TF_SENDINPROG flag), defer the cleanup until after it returns
805 * back to tcp. This is done to serialize the close until after all
806 * pending output is finished, in order to avoid having the PCB be
807 * detached and the cached route cleaned, only for ip to cache the
808 * route back into the PCB again. Note that we've cleared all the
809 * timers at this point. Set TF_CLOSING to indicate to tcp_output()
810 * that is should call us again once it returns from ip; at that
811 * point both flags should be cleared and we can proceed further
812 * with the cleanup.
813 */
814 if (tp->t_flags & (TF_CLOSING|TF_SENDINPROG)) {
815 tp->t_flags |= TF_CLOSING;
816 return (NULL);
817 }
818
819 #if INET6
820 rt = isipv6 ? inp->in6p_route.ro_rt : inp->inp_route.ro_rt;
821 #else
822 rt = inp->inp_route.ro_rt;
823 #endif
824 if (rt != NULL)
825 RT_LOCK_SPIN(rt);
826
827 /*
828 * If we got enough samples through the srtt filter,
829 * save the rtt and rttvar in the routing entry.
830 * 'Enough' is arbitrarily defined as the 16 samples.
831 * 16 samples is enough for the srtt filter to converge
832 * to within 5% of the correct value; fewer samples and
833 * we could save a very bogus rtt.
834 *
835 * Don't update the default route's characteristics and don't
836 * update anything that the user "locked".
837 */
838 if (tp->t_rttupdated >= 16) {
839 register u_int32_t i = 0;
840
841 #if INET6
842 if (isipv6) {
843 struct sockaddr_in6 *sin6;
844
845 if (rt == NULL)
846 goto no_valid_rt;
847 sin6 = (struct sockaddr_in6 *)rt_key(rt);
848 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
849 goto no_valid_rt;
850 }
851 else
852 #endif /* INET6 */
853 if (rt == NULL || !(rt->rt_flags & RTF_UP) ||
854 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr ==
855 INADDR_ANY || rt->generation_id != route_generation) {
856 if (tp->t_state >= TCPS_CLOSE_WAIT)
857 tp->t_state = TCPS_CLOSING;
858 goto no_valid_rt;
859 }
860
861 RT_LOCK_ASSERT_HELD(rt);
862 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
863 i = tp->t_srtt *
864 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
865 if (rt->rt_rmx.rmx_rtt && i)
866 /*
867 * filter this update to half the old & half
868 * the new values, converting scale.
869 * See route.h and tcp_var.h for a
870 * description of the scaling constants.
871 */
872 rt->rt_rmx.rmx_rtt =
873 (rt->rt_rmx.rmx_rtt + i) / 2;
874 else
875 rt->rt_rmx.rmx_rtt = i;
876 tcpstat.tcps_cachedrtt++;
877 }
878 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
879 i = tp->t_rttvar *
880 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
881 if (rt->rt_rmx.rmx_rttvar && i)
882 rt->rt_rmx.rmx_rttvar =
883 (rt->rt_rmx.rmx_rttvar + i) / 2;
884 else
885 rt->rt_rmx.rmx_rttvar = i;
886 tcpstat.tcps_cachedrttvar++;
887 }
888 /*
889 * The old comment here said:
890 * update the pipelimit (ssthresh) if it has been updated
891 * already or if a pipesize was specified & the threshhold
892 * got below half the pipesize. I.e., wait for bad news
893 * before we start updating, then update on both good
894 * and bad news.
895 *
896 * But we want to save the ssthresh even if no pipesize is
897 * specified explicitly in the route, because such
898 * connections still have an implicit pipesize specified
899 * by the global tcp_sendspace. In the absence of a reliable
900 * way to calculate the pipesize, it will have to do.
901 */
902 i = tp->snd_ssthresh;
903 if (rt->rt_rmx.rmx_sendpipe != 0)
904 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
905 else
906 dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
907 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
908 i != 0 && rt->rt_rmx.rmx_ssthresh != 0)
909 || dosavessthresh) {
910 /*
911 * convert the limit from user data bytes to
912 * packets then to packet data bytes.
913 */
914 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
915 if (i < 2)
916 i = 2;
917 i *= (u_int32_t)(tp->t_maxseg +
918 #if INET6
919 (isipv6 ? sizeof (struct ip6_hdr) +
920 sizeof (struct tcphdr) :
921 #endif
922 sizeof (struct tcpiphdr)
923 #if INET6
924 )
925 #endif
926 );
927 if (rt->rt_rmx.rmx_ssthresh)
928 rt->rt_rmx.rmx_ssthresh =
929 (rt->rt_rmx.rmx_ssthresh + i) / 2;
930 else
931 rt->rt_rmx.rmx_ssthresh = i;
932 tcpstat.tcps_cachedssthresh++;
933 }
934 }
935
936 /*
937 * Mark route for deletion if no information is cached.
938 */
939 if (rt != NULL && (so->so_flags & SOF_OVERFLOW) && tcp_lq_overflow) {
940 if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
941 rt->rt_rmx.rmx_rtt == 0) {
942 rt->rt_flags |= RTF_DELCLONE;
943 }
944 }
945
946 no_valid_rt:
947 if (rt != NULL)
948 RT_UNLOCK(rt);
949
950 /* free the reassembly queue, if any */
951 (void) tcp_freeq(tp);
952
953 tcp_free_sackholes(tp);
954
955 /* Free the packet list */
956 if (tp->t_pktlist_head != NULL)
957 m_freem_list(tp->t_pktlist_head);
958 TCP_PKTLIST_CLEAR(tp);
959
960 #ifdef __APPLE__
961 if (so->cached_in_sock_layer)
962 inp->inp_saved_ppcb = (caddr_t) tp;
963 #endif
964
965 soisdisconnected(so);
966 #if INET6
967 if (INP_CHECK_SOCKAF(so, AF_INET6))
968 in6_pcbdetach(inp);
969 else
970 #endif /* INET6 */
971 in_pcbdetach(inp);
972 tcpstat.tcps_closed++;
973 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed,0,0,0,0);
974 return ((struct tcpcb *)0);
975 }
976
977 int
978 tcp_freeq(tp)
979 struct tcpcb *tp;
980 {
981
982 register struct tseg_qent *q;
983 int rv = 0;
984
985 while((q = LIST_FIRST(&tp->t_segq)) != NULL) {
986 LIST_REMOVE(q, tqe_q);
987 m_freem(q->tqe_m);
988 FREE(q, M_TSEGQ);
989 tcp_reass_qsize--;
990 rv = 1;
991 }
992 return (rv);
993 }
994
995 void
996 tcp_drain()
997 {
998 if (do_tcpdrain)
999 {
1000 struct inpcb *inpb;
1001 struct tcpcb *tcpb;
1002 struct tseg_qent *te;
1003
1004 /*
1005 * Walk the tcpbs, if existing, and flush the reassembly queue,
1006 * if there is one...
1007 * XXX: The "Net/3" implementation doesn't imply that the TCP
1008 * reassembly queue should be flushed, but in a situation
1009 * where we're really low on mbufs, this is potentially
1010 * usefull.
1011 */
1012 if (!lck_rw_try_lock_exclusive(tcbinfo.mtx)) /* do it next time if the lock is in use */
1013 return;
1014
1015 for (inpb = LIST_FIRST(tcbinfo.listhead); inpb;
1016 inpb = LIST_NEXT(inpb, inp_list)) {
1017 if ((tcpb = intotcpcb(inpb))) {
1018 while ((te = LIST_FIRST(&tcpb->t_segq))
1019 != NULL) {
1020 LIST_REMOVE(te, tqe_q);
1021 m_freem(te->tqe_m);
1022 FREE(te, M_TSEGQ);
1023 tcp_reass_qsize--;
1024 }
1025 }
1026 }
1027 lck_rw_done(tcbinfo.mtx);
1028
1029 }
1030 }
1031
1032 /*
1033 * Notify a tcp user of an asynchronous error;
1034 * store error as soft error, but wake up user
1035 * (for now, won't do anything until can select for soft error).
1036 *
1037 * Do not wake up user since there currently is no mechanism for
1038 * reporting soft errors (yet - a kqueue filter may be added).
1039 */
1040 static void
1041 tcp_notify(inp, error)
1042 struct inpcb *inp;
1043 int error;
1044 {
1045 struct tcpcb *tp;
1046
1047 if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD))
1048 return; /* pcb is gone already */
1049
1050 tp = (struct tcpcb *)inp->inp_ppcb;
1051
1052 /*
1053 * Ignore some errors if we are hooked up.
1054 * If connection hasn't completed, has retransmitted several times,
1055 * and receives a second error, give up now. This is better
1056 * than waiting a long time to establish a connection that
1057 * can never complete.
1058 */
1059 if (tp->t_state == TCPS_ESTABLISHED &&
1060 (error == EHOSTUNREACH || error == ENETUNREACH ||
1061 error == EHOSTDOWN)) {
1062 return;
1063 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1064 tp->t_softerror)
1065 tcp_drop(tp, error);
1066 else
1067 tp->t_softerror = error;
1068 #if 0
1069 wakeup((caddr_t) &so->so_timeo);
1070 sorwakeup(so);
1071 sowwakeup(so);
1072 #endif
1073 }
1074
1075 /*
1076 * tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
1077 * The otcpcb data structure is passed to user space and must not change.
1078 */
1079 static void
1080 tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp)
1081 {
1082 int i;
1083
1084 otp->t_segq = (u_int32_t)(uintptr_t)tp->t_segq.lh_first;
1085 otp->t_dupacks = tp->t_dupacks;
1086 for (i = 0; i < TCPT_NTIMERS; i++)
1087 otp->t_timer[i] = tp->t_timer[i];
1088 otp->t_inpcb = (_TCPCB_PTR(struct inpcb *))(uintptr_t)tp->t_inpcb;
1089 otp->t_state = tp->t_state;
1090 otp->t_flags = tp->t_flags;
1091 otp->t_force = tp->t_force;
1092 otp->snd_una = tp->snd_una;
1093 otp->snd_max = tp->snd_max;
1094 otp->snd_nxt = tp->snd_nxt;
1095 otp->snd_up = tp->snd_up;
1096 otp->snd_wl1 = tp->snd_wl1;
1097 otp->snd_wl2 = tp->snd_wl2;
1098 otp->iss = tp->iss;
1099 otp->irs = tp->irs;
1100 otp->rcv_nxt = tp->rcv_nxt;
1101 otp->rcv_adv = tp->rcv_adv;
1102 otp->rcv_wnd = tp->rcv_wnd;
1103 otp->rcv_up = tp->rcv_up;
1104 otp->snd_wnd = tp->snd_wnd;
1105 otp->snd_cwnd = tp->snd_cwnd;
1106 otp->snd_ssthresh = tp->snd_ssthresh;
1107 otp->t_maxopd = tp->t_maxopd;
1108 otp->t_rcvtime = tp->t_rcvtime;
1109 otp->t_starttime = tp->t_starttime;
1110 otp->t_rtttime = tp->t_rtttime;
1111 otp->t_rtseq = tp->t_rtseq;
1112 otp->t_rxtcur = tp->t_rxtcur;
1113 otp->t_maxseg = tp->t_maxseg;
1114 otp->t_srtt = tp->t_srtt;
1115 otp->t_rttvar = tp->t_rttvar;
1116 otp->t_rxtshift = tp->t_rxtshift;
1117 otp->t_rttmin = tp->t_rttmin;
1118 otp->t_rttupdated = tp->t_rttupdated;
1119 otp->max_sndwnd = tp->max_sndwnd;
1120 otp->t_softerror = tp->t_softerror;
1121 otp->t_oobflags = tp->t_oobflags;
1122 otp->t_iobc = tp->t_iobc;
1123 otp->snd_scale = tp->snd_scale;
1124 otp->rcv_scale = tp->rcv_scale;
1125 otp->request_r_scale = tp->request_r_scale;
1126 otp->requested_s_scale = tp->requested_s_scale;
1127 otp->ts_recent = tp->ts_recent;
1128 otp->ts_recent_age = tp->ts_recent_age;
1129 otp->last_ack_sent = tp->last_ack_sent;
1130 otp->cc_send = tp->cc_send;
1131 otp->cc_recv = tp->cc_recv;
1132 otp->snd_recover = tp->snd_recover;
1133 otp->snd_cwnd_prev = tp->snd_cwnd_prev;
1134 otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
1135 otp->t_badrxtwin = tp->t_badrxtwin;
1136 }
1137
1138 static int
1139 tcp_pcblist SYSCTL_HANDLER_ARGS
1140 {
1141 #pragma unused(oidp, arg1, arg2)
1142 int error, i, n;
1143 struct inpcb *inp, **inp_list;
1144 inp_gen_t gencnt;
1145 struct xinpgen xig;
1146 int slot;
1147
1148 /*
1149 * The process of preparing the TCB list is too time-consuming and
1150 * resource-intensive to repeat twice on every request.
1151 */
1152 lck_rw_lock_shared(tcbinfo.mtx);
1153 if (req->oldptr == USER_ADDR_NULL) {
1154 n = tcbinfo.ipi_count;
1155 req->oldidx = 2 * (sizeof xig)
1156 + (n + n/8) * sizeof(struct xtcpcb);
1157 lck_rw_done(tcbinfo.mtx);
1158 return 0;
1159 }
1160
1161 if (req->newptr != USER_ADDR_NULL) {
1162 lck_rw_done(tcbinfo.mtx);
1163 return EPERM;
1164 }
1165
1166 /*
1167 * OK, now we're committed to doing something.
1168 */
1169 gencnt = tcbinfo.ipi_gencnt;
1170 n = tcbinfo.ipi_count;
1171
1172 bzero(&xig, sizeof(xig));
1173 xig.xig_len = sizeof xig;
1174 xig.xig_count = n;
1175 xig.xig_gen = gencnt;
1176 xig.xig_sogen = so_gencnt;
1177 error = SYSCTL_OUT(req, &xig, sizeof xig);
1178 if (error) {
1179 lck_rw_done(tcbinfo.mtx);
1180 return error;
1181 }
1182 /*
1183 * We are done if there is no pcb
1184 */
1185 if (n == 0) {
1186 lck_rw_done(tcbinfo.mtx);
1187 return 0;
1188 }
1189
1190 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1191 if (inp_list == 0) {
1192 lck_rw_done(tcbinfo.mtx);
1193 return ENOMEM;
1194 }
1195
1196 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
1197 inp = LIST_NEXT(inp, inp_list)) {
1198 #ifdef __APPLE__
1199 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1200 #else
1201 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp))
1202 #endif
1203 inp_list[i++] = inp;
1204 }
1205
1206 for (slot = 0; slot < N_TIME_WAIT_SLOTS; slot++) {
1207 struct inpcb *inpnxt;
1208
1209 for (inp = time_wait_slots[slot].lh_first; inp && i < n; inp = inpnxt) {
1210 inpnxt = inp->inp_list.le_next;
1211 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1212 inp_list[i++] = inp;
1213 }
1214 }
1215
1216 n = i;
1217
1218 error = 0;
1219 for (i = 0; i < n; i++) {
1220 inp = inp_list[i];
1221 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1222 struct xtcpcb xt;
1223 caddr_t inp_ppcb;
1224
1225 bzero(&xt, sizeof(xt));
1226 xt.xt_len = sizeof xt;
1227 /* XXX should avoid extra copy */
1228 inpcb_to_compat(inp, &xt.xt_inp);
1229 inp_ppcb = inp->inp_ppcb;
1230 if (inp_ppcb != NULL) {
1231 tcpcb_to_otcpcb((struct tcpcb *)inp_ppcb,
1232 &xt.xt_tp);
1233 } else {
1234 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1235 }
1236 if (inp->inp_socket)
1237 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1238 error = SYSCTL_OUT(req, &xt, sizeof xt);
1239 }
1240 }
1241 if (!error) {
1242 /*
1243 * Give the user an updated idea of our state.
1244 * If the generation differs from what we told
1245 * her before, she knows that something happened
1246 * while we were processing this request, and it
1247 * might be necessary to retry.
1248 */
1249 bzero(&xig, sizeof(xig));
1250 xig.xig_len = sizeof xig;
1251 xig.xig_gen = tcbinfo.ipi_gencnt;
1252 xig.xig_sogen = so_gencnt;
1253 xig.xig_count = tcbinfo.ipi_count;
1254 error = SYSCTL_OUT(req, &xig, sizeof xig);
1255 }
1256 FREE(inp_list, M_TEMP);
1257 lck_rw_done(tcbinfo.mtx);
1258 return error;
1259 }
1260
1261 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1262 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1263
1264 #if !CONFIG_EMBEDDED
1265
1266 static void
1267 tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
1268 {
1269 int i;
1270
1271 otp->t_segq = (u_int32_t)(uintptr_t)tp->t_segq.lh_first;
1272 otp->t_dupacks = tp->t_dupacks;
1273 for (i = 0; i < TCPT_NTIMERS; i++)
1274 otp->t_timer[i] = tp->t_timer[i];
1275 otp->t_state = tp->t_state;
1276 otp->t_flags = tp->t_flags;
1277 otp->t_force = tp->t_force;
1278 otp->snd_una = tp->snd_una;
1279 otp->snd_max = tp->snd_max;
1280 otp->snd_nxt = tp->snd_nxt;
1281 otp->snd_up = tp->snd_up;
1282 otp->snd_wl1 = tp->snd_wl1;
1283 otp->snd_wl2 = tp->snd_wl2;
1284 otp->iss = tp->iss;
1285 otp->irs = tp->irs;
1286 otp->rcv_nxt = tp->rcv_nxt;
1287 otp->rcv_adv = tp->rcv_adv;
1288 otp->rcv_wnd = tp->rcv_wnd;
1289 otp->rcv_up = tp->rcv_up;
1290 otp->snd_wnd = tp->snd_wnd;
1291 otp->snd_cwnd = tp->snd_cwnd;
1292 otp->snd_ssthresh = tp->snd_ssthresh;
1293 otp->t_maxopd = tp->t_maxopd;
1294 otp->t_rcvtime = tp->t_rcvtime;
1295 otp->t_starttime = tp->t_starttime;
1296 otp->t_rtttime = tp->t_rtttime;
1297 otp->t_rtseq = tp->t_rtseq;
1298 otp->t_rxtcur = tp->t_rxtcur;
1299 otp->t_maxseg = tp->t_maxseg;
1300 otp->t_srtt = tp->t_srtt;
1301 otp->t_rttvar = tp->t_rttvar;
1302 otp->t_rxtshift = tp->t_rxtshift;
1303 otp->t_rttmin = tp->t_rttmin;
1304 otp->t_rttupdated = tp->t_rttupdated;
1305 otp->max_sndwnd = tp->max_sndwnd;
1306 otp->t_softerror = tp->t_softerror;
1307 otp->t_oobflags = tp->t_oobflags;
1308 otp->t_iobc = tp->t_iobc;
1309 otp->snd_scale = tp->snd_scale;
1310 otp->rcv_scale = tp->rcv_scale;
1311 otp->request_r_scale = tp->request_r_scale;
1312 otp->requested_s_scale = tp->requested_s_scale;
1313 otp->ts_recent = tp->ts_recent;
1314 otp->ts_recent_age = tp->ts_recent_age;
1315 otp->last_ack_sent = tp->last_ack_sent;
1316 otp->cc_send = tp->cc_send;
1317 otp->cc_recv = tp->cc_recv;
1318 otp->snd_recover = tp->snd_recover;
1319 otp->snd_cwnd_prev = tp->snd_cwnd_prev;
1320 otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
1321 otp->t_badrxtwin = tp->t_badrxtwin;
1322 }
1323
1324
1325 static int
1326 tcp_pcblist64 SYSCTL_HANDLER_ARGS
1327 {
1328 #pragma unused(oidp, arg1, arg2)
1329 int error, i, n;
1330 struct inpcb *inp, **inp_list;
1331 inp_gen_t gencnt;
1332 struct xinpgen xig;
1333 int slot;
1334
1335 /*
1336 * The process of preparing the TCB list is too time-consuming and
1337 * resource-intensive to repeat twice on every request.
1338 */
1339 lck_rw_lock_shared(tcbinfo.mtx);
1340 if (req->oldptr == USER_ADDR_NULL) {
1341 n = tcbinfo.ipi_count;
1342 req->oldidx = 2 * (sizeof xig)
1343 + (n + n/8) * sizeof(struct xtcpcb64);
1344 lck_rw_done(tcbinfo.mtx);
1345 return 0;
1346 }
1347
1348 if (req->newptr != USER_ADDR_NULL) {
1349 lck_rw_done(tcbinfo.mtx);
1350 return EPERM;
1351 }
1352
1353 /*
1354 * OK, now we're committed to doing something.
1355 */
1356 gencnt = tcbinfo.ipi_gencnt;
1357 n = tcbinfo.ipi_count;
1358
1359 bzero(&xig, sizeof(xig));
1360 xig.xig_len = sizeof xig;
1361 xig.xig_count = n;
1362 xig.xig_gen = gencnt;
1363 xig.xig_sogen = so_gencnt;
1364 error = SYSCTL_OUT(req, &xig, sizeof xig);
1365 if (error) {
1366 lck_rw_done(tcbinfo.mtx);
1367 return error;
1368 }
1369 /*
1370 * We are done if there is no pcb
1371 */
1372 if (n == 0) {
1373 lck_rw_done(tcbinfo.mtx);
1374 return 0;
1375 }
1376
1377 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1378 if (inp_list == 0) {
1379 lck_rw_done(tcbinfo.mtx);
1380 return ENOMEM;
1381 }
1382
1383 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
1384 inp = LIST_NEXT(inp, inp_list)) {
1385 #ifdef __APPLE__
1386 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1387 #else
1388 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp))
1389 #endif
1390 inp_list[i++] = inp;
1391 }
1392
1393 for (slot = 0; slot < N_TIME_WAIT_SLOTS; slot++) {
1394 struct inpcb *inpnxt;
1395
1396 for (inp = time_wait_slots[slot].lh_first; inp && i < n; inp = inpnxt) {
1397 inpnxt = inp->inp_list.le_next;
1398 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1399 inp_list[i++] = inp;
1400 }
1401 }
1402
1403 n = i;
1404
1405 error = 0;
1406 for (i = 0; i < n; i++) {
1407 inp = inp_list[i];
1408 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1409 struct xtcpcb64 xt;
1410
1411 bzero(&xt, sizeof(xt));
1412 xt.xt_len = sizeof xt;
1413 inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
1414 xt.xt_inpcb.inp_ppcb = (u_int64_t)(uintptr_t)inp->inp_ppcb;
1415 if (inp->inp_ppcb != NULL)
1416 tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb, &xt);
1417 if (inp->inp_socket)
1418 sotoxsocket64(inp->inp_socket, &xt.xt_inpcb.xi_socket);
1419 error = SYSCTL_OUT(req, &xt, sizeof xt);
1420 }
1421 }
1422 if (!error) {
1423 /*
1424 * Give the user an updated idea of our state.
1425 * If the generation differs from what we told
1426 * her before, she knows that something happened
1427 * while we were processing this request, and it
1428 * might be necessary to retry.
1429 */
1430 bzero(&xig, sizeof(xig));
1431 xig.xig_len = sizeof xig;
1432 xig.xig_gen = tcbinfo.ipi_gencnt;
1433 xig.xig_sogen = so_gencnt;
1434 xig.xig_count = tcbinfo.ipi_count;
1435 error = SYSCTL_OUT(req, &xig, sizeof xig);
1436 }
1437 FREE(inp_list, M_TEMP);
1438 lck_rw_done(tcbinfo.mtx);
1439 return error;
1440 }
1441
1442 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64, CTLFLAG_RD, 0, 0,
1443 tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
1444
1445 #endif /* !CONFIG_EMBEDDED */
1446
1447 void
1448 tcp_ctlinput(cmd, sa, vip)
1449 int cmd;
1450 struct sockaddr *sa;
1451 void *vip;
1452 {
1453 tcp_seq icmp_tcp_seq;
1454 struct ip *ip = vip;
1455 struct tcphdr *th;
1456 struct in_addr faddr;
1457 struct inpcb *inp;
1458 struct tcpcb *tp;
1459
1460 void (*notify)(struct inpcb *, int) = tcp_notify;
1461
1462 struct icmp *icp;
1463
1464 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1465 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1466 return;
1467
1468 if (cmd == PRC_MSGSIZE)
1469 notify = tcp_mtudisc;
1470 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1471 cmd == PRC_UNREACH_PORT) && ip)
1472 notify = tcp_drop_syn_sent;
1473 else if (PRC_IS_REDIRECT(cmd)) {
1474 ip = 0;
1475 notify = in_rtchange;
1476 } else if (cmd == PRC_HOSTDEAD)
1477 ip = 0;
1478 /* Source quench is deprecated */
1479 else if (cmd == PRC_QUENCH)
1480 return;
1481 else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)
1482 return;
1483 if (ip) {
1484 icp = (struct icmp *)((caddr_t)ip
1485 - offsetof(struct icmp, icmp_ip));
1486 th = (struct tcphdr *)((caddr_t)ip
1487 + (IP_VHL_HL(ip->ip_vhl) << 2));
1488 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1489 ip->ip_src, th->th_sport, 0, NULL);
1490 if (inp != NULL && inp->inp_socket != NULL) {
1491 tcp_lock(inp->inp_socket, 1, 0);
1492 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1493 tcp_unlock(inp->inp_socket, 1, 0);
1494 return;
1495 }
1496 icmp_tcp_seq = htonl(th->th_seq);
1497 tp = intotcpcb(inp);
1498 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
1499 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
1500 if (cmd == PRC_MSGSIZE) {
1501
1502 /*
1503 * MTU discovery:
1504 * If we got a needfrag and there is a host route to the
1505 * original destination, and the MTU is not locked, then
1506 * set the MTU in the route to the suggested new value
1507 * (if given) and then notify as usual. The ULPs will
1508 * notice that the MTU has changed and adapt accordingly.
1509 * If no new MTU was suggested, then we guess a new one
1510 * less than the current value. If the new MTU is
1511 * unreasonably small (defined by sysctl tcp_minmss), then
1512 * we reset the MTU to the interface value and enable the
1513 * lock bit, indicating that we are no longer doing MTU
1514 * discovery.
1515 */
1516 struct rtentry *rt;
1517 int mtu;
1518 struct sockaddr_in icmpsrc = { sizeof (struct sockaddr_in), AF_INET,
1519 0 , { 0 }, { 0,0,0,0,0,0,0,0 } };
1520 icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
1521
1522 rt = rtalloc1((struct sockaddr *)&icmpsrc, 0,
1523 RTF_CLONING | RTF_PRCLONING);
1524 if (rt != NULL) {
1525 RT_LOCK(rt);
1526 if ((rt->rt_flags & RTF_HOST) &&
1527 !(rt->rt_rmx.rmx_locks & RTV_MTU)) {
1528 mtu = ntohs(icp->icmp_nextmtu);
1529 if (!mtu)
1530 mtu = ip_next_mtu(rt->rt_rmx.
1531 rmx_mtu, 1);
1532 #if DEBUG_MTUDISC
1533 printf("MTU for %s reduced to %d\n",
1534 inet_ntop(AF_INET,
1535 &icmpsrc.sin_addr, ipv4str,
1536 sizeof (ipv4str)), mtu);
1537 #endif
1538 if (mtu < max(296, (tcp_minmss +
1539 sizeof (struct tcpiphdr)))) {
1540 /* rt->rt_rmx.rmx_mtu =
1541 rt->rt_ifp->if_mtu; */
1542 rt->rt_rmx.rmx_locks |= RTV_MTU;
1543 } else if (rt->rt_rmx.rmx_mtu > mtu) {
1544 rt->rt_rmx.rmx_mtu = mtu;
1545 }
1546 }
1547 RT_UNLOCK(rt);
1548 rtfree(rt);
1549 }
1550 }
1551
1552 (*notify)(inp, inetctlerrmap[cmd]);
1553 }
1554 tcp_unlock(inp->inp_socket, 1, 0);
1555 }
1556 } else
1557 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1558 }
1559
1560 #if INET6
1561 void
1562 tcp6_ctlinput(cmd, sa, d)
1563 int cmd;
1564 struct sockaddr *sa;
1565 void *d;
1566 {
1567 struct tcphdr th;
1568 void (*notify)(struct inpcb *, int) = tcp_notify;
1569 struct ip6_hdr *ip6;
1570 struct mbuf *m;
1571 struct ip6ctlparam *ip6cp = NULL;
1572 const struct sockaddr_in6 *sa6_src = NULL;
1573 int off;
1574 struct tcp_portonly {
1575 u_int16_t th_sport;
1576 u_int16_t th_dport;
1577 } *thp;
1578
1579 if (sa->sa_family != AF_INET6 ||
1580 sa->sa_len != sizeof(struct sockaddr_in6))
1581 return;
1582
1583 if (cmd == PRC_MSGSIZE)
1584 notify = tcp_mtudisc;
1585 else if (!PRC_IS_REDIRECT(cmd) &&
1586 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1587 return;
1588 /* Source quench is deprecated */
1589 else if (cmd == PRC_QUENCH)
1590 return;
1591
1592 /* if the parameter is from icmp6, decode it. */
1593 if (d != NULL) {
1594 ip6cp = (struct ip6ctlparam *)d;
1595 m = ip6cp->ip6c_m;
1596 ip6 = ip6cp->ip6c_ip6;
1597 off = ip6cp->ip6c_off;
1598 sa6_src = ip6cp->ip6c_src;
1599 } else {
1600 m = NULL;
1601 ip6 = NULL;
1602 off = 0; /* fool gcc */
1603 sa6_src = &sa6_any;
1604 }
1605
1606 if (ip6) {
1607 /*
1608 * XXX: We assume that when IPV6 is non NULL,
1609 * M and OFF are valid.
1610 */
1611
1612 /* check if we can safely examine src and dst ports */
1613 if (m->m_pkthdr.len < off + sizeof(*thp))
1614 return;
1615
1616 bzero(&th, sizeof(th));
1617 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1618
1619 in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1620 (struct sockaddr *)ip6cp->ip6c_src,
1621 th.th_sport, cmd, notify);
1622 } else {
1623 in6_pcbnotify(&tcbinfo, sa, 0,
1624 (struct sockaddr *)(size_t)sa6_src, 0, cmd, notify);
1625 }
1626 }
1627 #endif /* INET6 */
1628
1629
1630 /*
1631 * Following is where TCP initial sequence number generation occurs.
1632 *
1633 * There are two places where we must use initial sequence numbers:
1634 * 1. In SYN-ACK packets.
1635 * 2. In SYN packets.
1636 *
1637 * The ISNs in SYN-ACK packets have no monotonicity requirement,
1638 * and should be as unpredictable as possible to avoid the possibility
1639 * of spoofing and/or connection hijacking. To satisfy this
1640 * requirement, SYN-ACK ISNs are generated via the arc4random()
1641 * function. If exact RFC 1948 compliance is requested via sysctl,
1642 * these ISNs will be generated just like those in SYN packets.
1643 *
1644 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1645 * depends on this property. In addition, these ISNs should be
1646 * unguessable so as to prevent connection hijacking. To satisfy
1647 * the requirements of this situation, the algorithm outlined in
1648 * RFC 1948 is used to generate sequence numbers.
1649 *
1650 * For more information on the theory of operation, please see
1651 * RFC 1948.
1652 *
1653 * Implementation details:
1654 *
1655 * Time is based off the system timer, and is corrected so that it
1656 * increases by one megabyte per second. This allows for proper
1657 * recycling on high speed LANs while still leaving over an hour
1658 * before rollover.
1659 *
1660 * Two sysctls control the generation of ISNs:
1661 *
1662 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1663 * between seeding of isn_secret. This is normally set to zero,
1664 * as reseeding should not be necessary.
1665 *
1666 * net.inet.tcp.strict_rfc1948 controls whether RFC 1948 is followed
1667 * strictly. When strict compliance is requested, reseeding is
1668 * disabled and SYN-ACKs will be generated in the same manner as
1669 * SYNs. Strict mode is disabled by default.
1670 *
1671 */
1672
1673 #define ISN_BYTES_PER_SECOND 1048576
1674
1675 tcp_seq
1676 tcp_new_isn(tp)
1677 struct tcpcb *tp;
1678 {
1679 u_int32_t md5_buffer[4];
1680 tcp_seq new_isn;
1681 struct timeval timenow;
1682 u_char isn_secret[32];
1683 int isn_last_reseed = 0;
1684 MD5_CTX isn_ctx;
1685
1686 /* Use arc4random for SYN-ACKs when not in exact RFC1948 mode. */
1687 if (((tp->t_state == TCPS_LISTEN) || (tp->t_state == TCPS_TIME_WAIT))
1688 && tcp_strict_rfc1948 == 0)
1689 #ifdef __APPLE__
1690 return random();
1691 #else
1692 return arc4random();
1693 #endif
1694 getmicrotime(&timenow);
1695
1696 /* Seed if this is the first use, reseed if requested. */
1697 if ((isn_last_reseed == 0) ||
1698 ((tcp_strict_rfc1948 == 0) && (tcp_isn_reseed_interval > 0) &&
1699 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1700 < (u_int)timenow.tv_sec))) {
1701 #ifdef __APPLE__
1702 read_random(&isn_secret, sizeof(isn_secret));
1703 #else
1704 read_random_unlimited(&isn_secret, sizeof(isn_secret));
1705 #endif
1706 isn_last_reseed = timenow.tv_sec;
1707 }
1708
1709 /* Compute the md5 hash and return the ISN. */
1710 MD5Init(&isn_ctx);
1711 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1712 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1713 #if INET6
1714 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1715 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1716 sizeof(struct in6_addr));
1717 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1718 sizeof(struct in6_addr));
1719 } else
1720 #endif
1721 {
1722 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1723 sizeof(struct in_addr));
1724 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1725 sizeof(struct in_addr));
1726 }
1727 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1728 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1729 new_isn = (tcp_seq) md5_buffer[0];
1730 new_isn += timenow.tv_sec * (ISN_BYTES_PER_SECOND / hz);
1731 return new_isn;
1732 }
1733
1734
1735 /*
1736 * When a specific ICMP unreachable message is received and the
1737 * connection state is SYN-SENT, drop the connection. This behavior
1738 * is controlled by the icmp_may_rst sysctl.
1739 */
1740 void
1741 tcp_drop_syn_sent(inp, errno)
1742 struct inpcb *inp;
1743 int errno;
1744 {
1745 struct tcpcb *tp = intotcpcb(inp);
1746
1747 if (tp && tp->t_state == TCPS_SYN_SENT)
1748 tcp_drop(tp, errno);
1749 }
1750
1751 /*
1752 * When `need fragmentation' ICMP is received, update our idea of the MSS
1753 * based on the new value in the route. Also nudge TCP to send something,
1754 * since we know the packet we just sent was dropped.
1755 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1756 */
1757 void
1758 tcp_mtudisc(
1759 struct inpcb *inp,
1760 __unused int errno
1761 )
1762 {
1763 struct tcpcb *tp = intotcpcb(inp);
1764 struct rtentry *rt;
1765 struct rmxp_tao *taop;
1766 struct socket *so = inp->inp_socket;
1767 int offered;
1768 int mss;
1769 #if INET6
1770 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1771 #endif /* INET6 */
1772
1773 if (tp) {
1774 #if INET6
1775 if (isipv6)
1776 rt = tcp_rtlookup6(inp);
1777 else
1778 #endif /* INET6 */
1779 rt = tcp_rtlookup(inp, IFSCOPE_NONE);
1780 if (!rt || !rt->rt_rmx.rmx_mtu) {
1781 tp->t_maxopd = tp->t_maxseg =
1782 #if INET6
1783 isipv6 ? tcp_v6mssdflt :
1784 #endif /* INET6 */
1785 tcp_mssdflt;
1786
1787 /* Route locked during lookup above */
1788 if (rt != NULL)
1789 RT_UNLOCK(rt);
1790 return;
1791 }
1792 taop = rmx_taop(rt->rt_rmx);
1793 offered = taop->tao_mssopt;
1794 mss = rt->rt_rmx.rmx_mtu -
1795 #if INET6
1796 (isipv6 ?
1797 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1798 #endif /* INET6 */
1799 sizeof(struct tcpiphdr)
1800 #if INET6
1801 )
1802 #endif /* INET6 */
1803 ;
1804
1805 /* Route locked during lookup above */
1806 RT_UNLOCK(rt);
1807
1808 if (offered)
1809 mss = min(mss, offered);
1810 /*
1811 * XXX - The above conditional probably violates the TCP
1812 * spec. The problem is that, since we don't know the
1813 * other end's MSS, we are supposed to use a conservative
1814 * default. But, if we do that, then MTU discovery will
1815 * never actually take place, because the conservative
1816 * default is much less than the MTUs typically seen
1817 * on the Internet today. For the moment, we'll sweep
1818 * this under the carpet.
1819 *
1820 * The conservative default might not actually be a problem
1821 * if the only case this occurs is when sending an initial
1822 * SYN with options and data to a host we've never talked
1823 * to before. Then, they will reply with an MSS value which
1824 * will get recorded and the new parameters should get
1825 * recomputed. For Further Study.
1826 */
1827 if (tp->t_maxopd <= mss)
1828 return;
1829 tp->t_maxopd = mss;
1830
1831 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1832 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1833 mss -= TCPOLEN_TSTAMP_APPA;
1834
1835 if (so->so_snd.sb_hiwat < mss)
1836 mss = so->so_snd.sb_hiwat;
1837
1838 tp->t_maxseg = mss;
1839
1840 tcpstat.tcps_mturesent++;
1841 tp->t_rtttime = 0;
1842 tp->snd_nxt = tp->snd_una;
1843 tcp_output(tp);
1844 }
1845 }
1846
1847 /*
1848 * Look-up the routing entry to the peer of this inpcb. If no route
1849 * is found and it cannot be allocated the return NULL. This routine
1850 * is called by TCP routines that access the rmx structure and by tcp_mss
1851 * to get the interface MTU. If a route is found, this routine will
1852 * hold the rtentry lock; the caller is responsible for unlocking.
1853 */
1854 struct rtentry *
1855 tcp_rtlookup(inp, input_ifscope)
1856 struct inpcb *inp;
1857 unsigned int input_ifscope;
1858 {
1859 struct route *ro;
1860 struct rtentry *rt;
1861 struct tcpcb *tp;
1862
1863 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1864
1865 ro = &inp->inp_route;
1866 if ((rt = ro->ro_rt) != NULL)
1867 RT_LOCK(rt);
1868
1869 if (rt == NULL || !(rt->rt_flags & RTF_UP) ||
1870 rt->generation_id != route_generation) {
1871 /* No route yet, so try to acquire one */
1872 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1873 unsigned int ifscope;
1874
1875 ro->ro_dst.sa_family = AF_INET;
1876 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1877 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
1878 inp->inp_faddr;
1879
1880 /*
1881 * If the socket was bound to an interface, then
1882 * the bound-to-interface takes precedence over
1883 * the inbound interface passed in by the caller
1884 * (if we get here as part of the output path then
1885 * input_ifscope is IFSCOPE_NONE).
1886 */
1887 ifscope = (inp->inp_flags & INP_BOUND_IF) ?
1888 inp->inp_boundif : input_ifscope;
1889
1890 if (rt != NULL)
1891 RT_UNLOCK(rt);
1892 rtalloc_scoped_ign(ro, 0, ifscope);
1893 if ((rt = ro->ro_rt) != NULL)
1894 RT_LOCK(rt);
1895 }
1896 }
1897
1898 /*
1899 * Update MTU discovery determination. Don't do it if:
1900 * 1) it is disabled via the sysctl
1901 * 2) the route isn't up
1902 * 3) the MTU is locked (if it is, then discovery has been
1903 * disabled)
1904 */
1905
1906 tp = intotcpcb(inp);
1907
1908 if (!path_mtu_discovery || ((rt != NULL) &&
1909 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
1910 tp->t_flags &= ~TF_PMTUD;
1911 else
1912 tp->t_flags |= TF_PMTUD;
1913
1914 #if CONFIG_IFEF_NOWINDOWSCALE
1915 if (tcp_obey_ifef_nowindowscale &&
1916 tp->t_state == TCPS_SYN_SENT && rt != NULL && rt->rt_ifp != NULL &&
1917 (rt->rt_ifp->if_eflags & IFEF_NOWINDOWSCALE)) {
1918 /* Window scaling is enabled on this interface */
1919 tp->t_flags &= ~TF_REQ_SCALE;
1920 }
1921 #endif
1922
1923 if (rt != NULL && rt->rt_ifp != NULL) {
1924 somultipages(inp->inp_socket,
1925 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
1926 tcp_set_tso(tp, rt->rt_ifp);
1927 }
1928
1929 /*
1930 * Caller needs to call RT_UNLOCK(rt).
1931 */
1932 return rt;
1933 }
1934
1935 #if INET6
1936 struct rtentry *
1937 tcp_rtlookup6(inp)
1938 struct inpcb *inp;
1939 {
1940 struct route_in6 *ro6;
1941 struct rtentry *rt;
1942 struct tcpcb *tp;
1943
1944 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1945
1946 ro6 = &inp->in6p_route;
1947 if ((rt = ro6->ro_rt) != NULL)
1948 RT_LOCK(rt);
1949
1950 if (rt == NULL || !(rt->rt_flags & RTF_UP) ||
1951 rt->generation_id != route_generation) {
1952 /* No route yet, so try to acquire one */
1953 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
1954 struct sockaddr_in6 *dst6;
1955
1956 dst6 = (struct sockaddr_in6 *)&ro6->ro_dst;
1957 dst6->sin6_family = AF_INET6;
1958 dst6->sin6_len = sizeof(*dst6);
1959 dst6->sin6_addr = inp->in6p_faddr;
1960 if (rt != NULL)
1961 RT_UNLOCK(rt);
1962 rtalloc_ign((struct route *)ro6, 0);
1963 if ((rt = ro6->ro_rt) != NULL)
1964 RT_LOCK(rt);
1965 }
1966 }
1967 /*
1968 * Update path MTU Discovery determination
1969 * while looking up the route:
1970 * 1) we have a valid route to the destination
1971 * 2) the MTU is not locked (if it is, then discovery has been
1972 * disabled)
1973 */
1974
1975
1976 tp = intotcpcb(inp);
1977
1978 /*
1979 * Update MTU discovery determination. Don't do it if:
1980 * 1) it is disabled via the sysctl
1981 * 2) the route isn't up
1982 * 3) the MTU is locked (if it is, then discovery has been
1983 * disabled)
1984 */
1985
1986 if (!path_mtu_discovery || ((rt != NULL) &&
1987 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
1988 tp->t_flags &= ~TF_PMTUD;
1989 else
1990 tp->t_flags |= TF_PMTUD;
1991
1992 #if CONFIG_IFEF_NOWINDOWSCALE
1993 if (tcp_obey_ifef_nowindowscale &&
1994 tp->t_state == TCPS_SYN_SENT && rt != NULL && rt->rt_ifp != NULL &&
1995 (rt->rt_ifp->if_eflags & IFEF_NOWINDOWSCALE)) {
1996 /* Window scaling is not enabled on this interface */
1997 tp->t_flags &= ~TF_REQ_SCALE;
1998 }
1999 #endif
2000
2001 if (rt != NULL && rt->rt_ifp != NULL) {
2002 somultipages(inp->inp_socket,
2003 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
2004 tcp_set_tso(tp, rt->rt_ifp);
2005 }
2006
2007 /*
2008 * Caller needs to call RT_UNLOCK(rt).
2009 */
2010 return rt;
2011 }
2012 #endif /* INET6 */
2013
2014 #if IPSEC
2015 /* compute ESP/AH header size for TCP, including outer IP header. */
2016 size_t
2017 ipsec_hdrsiz_tcp(tp)
2018 struct tcpcb *tp;
2019 {
2020 struct inpcb *inp;
2021 struct mbuf *m;
2022 size_t hdrsiz;
2023 struct ip *ip;
2024 #if INET6
2025 struct ip6_hdr *ip6 = NULL;
2026 #endif /* INET6 */
2027 struct tcphdr *th;
2028
2029 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
2030 return 0;
2031 MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */
2032 if (!m)
2033 return 0;
2034
2035 #if INET6
2036 if ((inp->inp_vflag & INP_IPV6) != 0) {
2037 ip6 = mtod(m, struct ip6_hdr *);
2038 th = (struct tcphdr *)(ip6 + 1);
2039 m->m_pkthdr.len = m->m_len =
2040 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
2041 tcp_fillheaders(tp, ip6, th);
2042 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
2043 } else
2044 #endif /* INET6 */
2045 {
2046 ip = mtod(m, struct ip *);
2047 th = (struct tcphdr *)(ip + 1);
2048 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
2049 tcp_fillheaders(tp, ip, th);
2050 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
2051 }
2052 m_free(m);
2053 return hdrsiz;
2054 }
2055 #endif /*IPSEC*/
2056
2057 /*
2058 * Return a pointer to the cached information about the remote host.
2059 * The cached information is stored in the protocol specific part of
2060 * the route metrics.
2061 */
2062 struct rmxp_tao *
2063 tcp_gettaocache(inp)
2064 struct inpcb *inp;
2065 {
2066 struct rtentry *rt;
2067 struct rmxp_tao *taop;
2068
2069 #if INET6
2070 if ((inp->inp_vflag & INP_IPV6) != 0)
2071 rt = tcp_rtlookup6(inp);
2072 else
2073 #endif /* INET6 */
2074 rt = tcp_rtlookup(inp, IFSCOPE_NONE);
2075
2076 /* Make sure this is a host route and is up. */
2077 if (rt == NULL ||
2078 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) {
2079 /* Route locked during lookup above */
2080 if (rt != NULL)
2081 RT_UNLOCK(rt);
2082 return NULL;
2083 }
2084
2085 taop = rmx_taop(rt->rt_rmx);
2086 /* Route locked during lookup above */
2087 RT_UNLOCK(rt);
2088 return (taop);
2089 }
2090
2091 /*
2092 * Clear all the TAO cache entries, called from tcp_init.
2093 *
2094 * XXX
2095 * This routine is just an empty one, because we assume that the routing
2096 * routing tables are initialized at the same time when TCP, so there is
2097 * nothing in the cache left over.
2098 */
2099 static void
2100 tcp_cleartaocache()
2101 {
2102 }
2103
2104 int
2105 tcp_lock(struct socket *so, int refcount, void *lr)
2106 {
2107 void *lr_saved;
2108
2109 if (lr == NULL)
2110 lr_saved = __builtin_return_address(0);
2111 else
2112 lr_saved = lr;
2113
2114 if (so->so_pcb != NULL) {
2115 lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
2116 } else {
2117 panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s\n",
2118 so, lr_saved, solockhistory_nr(so));
2119 /* NOTREACHED */
2120 }
2121
2122 if (so->so_usecount < 0) {
2123 panic("tcp_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
2124 so, so->so_pcb, lr_saved, so->so_usecount, solockhistory_nr(so));
2125 /* NOTREACHED */
2126 }
2127 if (refcount)
2128 so->so_usecount++;
2129 so->lock_lr[so->next_lock_lr] = lr_saved;
2130 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
2131 return (0);
2132 }
2133
2134 int
2135 tcp_unlock(struct socket *so, int refcount, void *lr)
2136 {
2137 void *lr_saved;
2138
2139 if (lr == NULL)
2140 lr_saved = __builtin_return_address(0);
2141 else
2142 lr_saved = lr;
2143
2144 #ifdef MORE_TCPLOCK_DEBUG
2145 printf("tcp_unlock: so=%p sopcb=%p lock=%p ref=%x lr=%p\n",
2146 so, so->so_pcb, ((struct inpcb *)so->so_pcb)->inpcb_mtx,
2147 so->so_usecount, lr_saved);
2148 #endif
2149 if (refcount)
2150 so->so_usecount--;
2151
2152 if (so->so_usecount < 0) {
2153 panic("tcp_unlock: so=%p usecount=%x lrh= %s\n",
2154 so, so->so_usecount, solockhistory_nr(so));
2155 /* NOTREACHED */
2156 }
2157 if (so->so_pcb == NULL) {
2158 panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2159 so, so->so_usecount, lr_saved, solockhistory_nr(so));
2160 /* NOTREACHED */
2161 } else {
2162 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx,
2163 LCK_MTX_ASSERT_OWNED);
2164 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2165 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
2166 lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
2167 }
2168 return (0);
2169 }
2170
2171 lck_mtx_t *
2172 tcp_getlock(
2173 struct socket *so,
2174 __unused int locktype)
2175 {
2176 struct inpcb *inp = sotoinpcb(so);
2177
2178 if (so->so_pcb) {
2179 if (so->so_usecount < 0)
2180 panic("tcp_getlock: so=%p usecount=%x lrh= %s\n",
2181 so, so->so_usecount, solockhistory_nr(so));
2182 return(inp->inpcb_mtx);
2183 }
2184 else {
2185 panic("tcp_getlock: so=%p NULL so_pcb %s\n",
2186 so, solockhistory_nr(so));
2187 return (so->so_proto->pr_domain->dom_mtx);
2188 }
2189 }
2190
2191 int32_t
2192 tcp_sbspace(struct tcpcb *tp)
2193 {
2194 struct sockbuf *sb = &tp->t_inpcb->inp_socket->so_rcv;
2195 int32_t space, newspace;
2196
2197 space = ((int32_t) imin((sb->sb_hiwat - sb->sb_cc),
2198 (sb->sb_mbmax - sb->sb_mbcnt)));
2199 if (space < 0)
2200 space = 0;
2201
2202 #if TRAFFIC_MGT
2203 if (tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_REGULATE) {
2204 if (tcp_background_io_enabled &&
2205 tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_SUPPRESSED) {
2206 tp->t_flags |= TF_RXWIN0SENT;
2207 return 0; /* Triggers TCP window closing by responding there is no space */
2208 }
2209 }
2210 #endif /* TRAFFIC_MGT */
2211
2212 /* Avoid inscreasing window size if the current window
2213 * is already very low, we could be in "persist" mode and
2214 * we could break some apps (see rdar://5409343)
2215 */
2216
2217 if (space < tp->t_maxseg)
2218 return space;
2219
2220 /* Clip window size for slower link */
2221
2222 if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0 )
2223 return imin(space, slowlink_wsize);
2224
2225 /*
2226 * Check for ressources constraints before over-ajusting the amount of space we can
2227 * advertise in the TCP window size updates.
2228 */
2229
2230 if (sbspace_factor && (tp->t_inpcb->inp_pcbinfo->ipi_count < tcp_sockthreshold) &&
2231 (total_mb_cnt / 8) < (mbstat.m_clusters / sbspace_factor)) {
2232 if (space < (int32_t)(sb->sb_maxused - sb->sb_cc)) {/* make sure we don't constrain the window if we have enough ressources */
2233 space = (int32_t) imax((sb->sb_maxused - sb->sb_cc), tp->rcv_maxbyps);
2234 }
2235 newspace = (int32_t) imax(((int32_t)sb->sb_maxused - sb->sb_cc), (int32_t)tp->rcv_maxbyps);
2236
2237 if (newspace > space)
2238 space = newspace;
2239 }
2240 return space;
2241 }
2242 /*
2243 * Checks TCP Segment Offloading capability for a given connection and interface pair.
2244 */
2245 void
2246 tcp_set_tso(tp, ifp)
2247 struct tcpcb *tp;
2248 struct ifnet *ifp;
2249 {
2250 #if INET6
2251 struct inpcb *inp = tp->t_inpcb;
2252 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
2253
2254 if (isipv6) {
2255 /*
2256 * Radar 6921834: Disable TSO IPv6 because there is no support
2257 * for TSO & HW checksum in ip6_output yet
2258 */
2259 #if 0
2260 if (ifp && ifp->if_hwassist & IFNET_TSO_IPV6) {
2261 tp->t_flags |= TF_TSO;
2262 if (ifp->if_tso_v6_mtu != 0)
2263 tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
2264 else
2265 tp->tso_max_segment_size = TCP_MAXWIN;
2266 } else
2267 tp->t_flags &= ~TF_TSO;
2268
2269 #endif
2270 } else
2271 #endif /* INET6 */
2272
2273 {
2274 if (ifp && ifp->if_hwassist & IFNET_TSO_IPV4) {
2275 tp->t_flags |= TF_TSO;
2276 if (ifp->if_tso_v4_mtu != 0)
2277 tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
2278 else
2279 tp->tso_max_segment_size = TCP_MAXWIN;
2280 } else
2281 tp->t_flags &= ~TF_TSO;
2282 }
2283 }
2284 /* DSEP Review Done pl-20051213-v02 @3253,@3391,@3400 */