]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_subr.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_subr.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
24 * The Regents of the University of California. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
55 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.22 2001/08/22 00:59:12 silby Exp $
56 */
57
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/callout.h>
62 #include <sys/kernel.h>
63 #include <sys/sysctl.h>
64 #include <sys/malloc.h>
65 #include <sys/mbuf.h>
66 #if INET6
67 #include <sys/domain.h>
68 #endif
69 #include <sys/proc.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/protosw.h>
73 #include <sys/random.h>
74 #include <sys/syslog.h>
75
76
77
78 #include <net/route.h>
79 #include <net/if.h>
80
81 #define _IP_VHL
82 #include <netinet/in.h>
83 #include <netinet/in_systm.h>
84 #include <netinet/ip.h>
85 #if INET6
86 #include <netinet/ip6.h>
87 #endif
88 #include <netinet/in_pcb.h>
89 #if INET6
90 #include <netinet6/in6_pcb.h>
91 #endif
92 #include <netinet/in_var.h>
93 #include <netinet/ip_var.h>
94 #if INET6
95 #include <netinet6/ip6_var.h>
96 #endif
97 #include <netinet/tcp.h>
98 #include <netinet/tcp_fsm.h>
99 #include <netinet/tcp_seq.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #if INET6
103 #include <netinet6/tcp6_var.h>
104 #endif
105 #include <netinet/tcpip.h>
106 #if TCPDEBUG
107 #include <netinet/tcp_debug.h>
108 #endif
109 #include <netinet6/ip6protosw.h>
110
111 #if IPSEC
112 #include <netinet6/ipsec.h>
113 #if INET6
114 #include <netinet6/ipsec6.h>
115 #endif
116 #endif /*IPSEC*/
117
118 #include <sys/md5.h>
119 #include <sys/kdebug.h>
120
121 #define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
122
123
124 /* temporary: for testing */
125 #if IPSEC
126 extern int ipsec_bypass;
127 #endif
128
129 int tcp_mssdflt = TCP_MSS;
130 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
131 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
132
133 #if INET6
134 int tcp_v6mssdflt = TCP6_MSS;
135 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
136 CTLFLAG_RW, &tcp_v6mssdflt , 0,
137 "Default TCP Maximum Segment Size for IPv6");
138 #endif
139
140 /*
141 * Minimum MSS we accept and use. This prevents DoS attacks where
142 * we are forced to a ridiculous low MSS like 20 and send hundreds
143 * of packets instead of one. The effect scales with the available
144 * bandwidth and quickly saturates the CPU and network interface
145 * with packet generation and sending. Set to zero to disable MINMSS
146 * checking. This setting prevents us from sending too small packets.
147 */
148 int tcp_minmss = TCP_MINMSS;
149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
150 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
151
152 static int tcp_do_rfc1323 = 1;
153 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
154 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
155
156 static int tcp_do_rfc1644 = 0;
157 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
158 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
159
160 static int tcp_tcbhashsize = 0;
161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
162 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
163
164 static int do_tcpdrain = 1;
165 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
166 "Enable tcp_drain routine for extra help when low on mbufs");
167
168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
169 &tcbinfo.ipi_count, 0, "Number of active PCBs");
170
171 static int icmp_may_rst = 1;
172 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
173 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
174
175 static int tcp_strict_rfc1948 = 0;
176 SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW,
177 &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
178
179 static int tcp_isn_reseed_interval = 0;
180 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
181 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
182
183 static void tcp_cleartaocache __P((void));
184 static void tcp_notify __P((struct inpcb *, int));
185
186 /*
187 * Target size of TCP PCB hash tables. Must be a power of two.
188 *
189 * Note that this can be overridden by the kernel environment
190 * variable net.inet.tcp.tcbhashsize
191 */
192 #ifndef TCBHASHSIZE
193 #define TCBHASHSIZE 4096
194 #endif
195
196 /*
197 * This is the actual shape of what we allocate using the zone
198 * allocator. Doing it this way allows us to protect both structures
199 * using the same generation count, and also eliminates the overhead
200 * of allocating tcpcbs separately. By hiding the structure here,
201 * we avoid changing most of the rest of the code (although it needs
202 * to be changed, eventually, for greater efficiency).
203 */
204 #define ALIGNMENT 32
205 #define ALIGNM1 (ALIGNMENT - 1)
206 struct inp_tp {
207 union {
208 struct inpcb inp;
209 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
210 } inp_tp_u;
211 struct tcpcb tcb;
212 #ifndef __APPLE__
213 struct callout inp_tp_rexmt, inp_tp_persist, inp_tp_keep, inp_tp_2msl;
214 struct callout inp_tp_delack;
215 #endif
216 };
217 #undef ALIGNMENT
218 #undef ALIGNM1
219
220 static struct tcpcb dummy_tcb;
221
222
223 extern struct inpcbhead time_wait_slots[];
224 extern int cur_tw_slot;
225 extern u_long *delack_bitmask;
226 extern u_long route_generation;
227
228
229 int get_inpcb_str_size()
230 {
231 return sizeof(struct inpcb);
232 }
233
234
235 int get_tcp_str_size()
236 {
237 return sizeof(struct tcpcb);
238 }
239
240 int tcp_freeq __P((struct tcpcb *tp));
241
242
243 /*
244 * Tcp initialization
245 */
246 void
247 tcp_init()
248 {
249 int hashsize = TCBHASHSIZE;
250 vm_size_t str_size;
251 int i;
252
253 tcp_ccgen = 1;
254 tcp_cleartaocache();
255
256 tcp_delacktime = TCPTV_DELACK;
257 tcp_keepinit = TCPTV_KEEP_INIT;
258 tcp_keepidle = TCPTV_KEEP_IDLE;
259 tcp_keepintvl = TCPTV_KEEPINTVL;
260 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
261 tcp_msl = TCPTV_MSL;
262 read_random(&tcp_now, sizeof(tcp_now));
263 tcp_now = tcp_now & 0x7fffffffffffffff; /* Starts tcp internal 500ms clock at a random value */
264
265
266 LIST_INIT(&tcb);
267 tcbinfo.listhead = &tcb;
268 #ifndef __APPLE__
269 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
270 #endif
271 if (!powerof2(hashsize)) {
272 printf("WARNING: TCB hash size not a power of 2\n");
273 hashsize = 512; /* safe default */
274 }
275 tcp_tcbhashsize = hashsize;
276 tcbinfo.hashsize = hashsize;
277 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
278 tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
279 &tcbinfo.porthashmask);
280 #ifdef __APPLE__
281 str_size = (vm_size_t) sizeof(struct inp_tp);
282 tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "tcpcb");
283 #else
284 tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets,
285 ZONE_INTERRUPT, 0);
286 #endif
287
288 tcp_reass_maxseg = nmbclusters / 16;
289 #ifndef __APPLE__
290 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
291 &tcp_reass_maxseg);
292 #endif
293
294 #if INET6
295 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
296 #else /* INET6 */
297 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
298 #endif /* INET6 */
299 if (max_protohdr < TCP_MINPROTOHDR)
300 max_protohdr = TCP_MINPROTOHDR;
301 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
302 panic("tcp_init");
303 #undef TCP_MINPROTOHDR
304 tcbinfo.last_pcb = 0;
305 dummy_tcb.t_state = TCP_NSTATES;
306 dummy_tcb.t_flags = 0;
307 tcbinfo.dummy_cb = (caddr_t) &dummy_tcb;
308 in_pcb_nat_init(&tcbinfo, AF_INET, IPPROTO_TCP, SOCK_STREAM);
309
310 delack_bitmask = _MALLOC((4 * hashsize)/32, M_PCB, M_WAITOK);
311 if (delack_bitmask == 0)
312 panic("Delack Memory");
313
314 for (i=0; i < (tcbinfo.hashsize / 32); i++)
315 delack_bitmask[i] = 0;
316
317 for (i=0; i < N_TIME_WAIT_SLOTS; i++) {
318 LIST_INIT(&time_wait_slots[i]);
319 }
320 }
321
322 /*
323 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
324 * tcp_template used to store this data in mbufs, but we now recopy it out
325 * of the tcpcb each time to conserve mbufs.
326 */
327 void
328 tcp_fillheaders(tp, ip_ptr, tcp_ptr)
329 struct tcpcb *tp;
330 void *ip_ptr;
331 void *tcp_ptr;
332 {
333 struct inpcb *inp = tp->t_inpcb;
334 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
335
336 #if INET6
337 if ((inp->inp_vflag & INP_IPV6) != 0) {
338 struct ip6_hdr *ip6;
339
340 ip6 = (struct ip6_hdr *)ip_ptr;
341 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
342 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
343 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
344 (IPV6_VERSION & IPV6_VERSION_MASK);
345 ip6->ip6_nxt = IPPROTO_TCP;
346 ip6->ip6_plen = sizeof(struct tcphdr);
347 ip6->ip6_src = inp->in6p_laddr;
348 ip6->ip6_dst = inp->in6p_faddr;
349 tcp_hdr->th_sum = 0;
350 } else
351 #endif
352 {
353 struct ip *ip = (struct ip *) ip_ptr;
354
355 ip->ip_vhl = IP_VHL_BORING;
356 ip->ip_tos = 0;
357 ip->ip_len = 0;
358 ip->ip_id = 0;
359 ip->ip_off = 0;
360 ip->ip_ttl = 0;
361 ip->ip_sum = 0;
362 ip->ip_p = IPPROTO_TCP;
363 ip->ip_src = inp->inp_laddr;
364 ip->ip_dst = inp->inp_faddr;
365 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
366 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
367 }
368
369 tcp_hdr->th_sport = inp->inp_lport;
370 tcp_hdr->th_dport = inp->inp_fport;
371 tcp_hdr->th_seq = 0;
372 tcp_hdr->th_ack = 0;
373 tcp_hdr->th_x2 = 0;
374 tcp_hdr->th_off = 5;
375 tcp_hdr->th_flags = 0;
376 tcp_hdr->th_win = 0;
377 tcp_hdr->th_urp = 0;
378 }
379
380 /*
381 * Create template to be used to send tcp packets on a connection.
382 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
383 * use for this function is in keepalives, which use tcp_respond.
384 */
385 struct tcptemp *
386 tcp_maketemplate(tp)
387 struct tcpcb *tp;
388 {
389 struct mbuf *m;
390 struct tcptemp *n;
391
392 m = m_get(M_DONTWAIT, MT_HEADER);
393 if (m == NULL)
394 return (0);
395 m->m_len = sizeof(struct tcptemp);
396 n = mtod(m, struct tcptemp *);
397
398 tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
399 return (n);
400 }
401
402 /*
403 * Send a single message to the TCP at address specified by
404 * the given TCP/IP header. If m == 0, then we make a copy
405 * of the tcpiphdr at ti and send directly to the addressed host.
406 * This is used to force keep alive messages out using the TCP
407 * template for a connection. If flags are given then we send
408 * a message back to the TCP which originated the * segment ti,
409 * and discard the mbuf containing it and any other attached mbufs.
410 *
411 * In any case the ack and sequence number of the transmitted
412 * segment are as specified by the parameters.
413 *
414 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
415 */
416 void
417 tcp_respond(tp, ipgen, th, m, ack, seq, flags)
418 struct tcpcb *tp;
419 void *ipgen;
420 register struct tcphdr *th;
421 register struct mbuf *m;
422 tcp_seq ack, seq;
423 int flags;
424 {
425 register int tlen;
426 int win = 0;
427 struct route *ro = 0;
428 struct route sro;
429 struct ip *ip;
430 struct tcphdr *nth;
431 #if INET6
432 struct route_in6 *ro6 = 0;
433 struct route_in6 sro6;
434 struct ip6_hdr *ip6;
435 int isipv6;
436 #endif /* INET6 */
437 int ipflags = 0;
438
439 #if INET6
440 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
441 ip6 = ipgen;
442 #endif /* INET6 */
443 ip = ipgen;
444
445 if (tp) {
446 if (!(flags & TH_RST)) {
447 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
448 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
449 win = (long)TCP_MAXWIN << tp->rcv_scale;
450 }
451 #if INET6
452 if (isipv6)
453 ro6 = &tp->t_inpcb->in6p_route;
454 else
455 #endif /* INET6 */
456 ro = &tp->t_inpcb->inp_route;
457 } else {
458 #if INET6
459 if (isipv6) {
460 ro6 = &sro6;
461 bzero(ro6, sizeof *ro6);
462 } else
463 #endif /* INET6 */
464 {
465 ro = &sro;
466 bzero(ro, sizeof *ro);
467 }
468 }
469 if (m == 0) {
470 m = m_gethdr(M_DONTWAIT, MT_HEADER);
471 if (m == NULL)
472 return;
473 tlen = 0;
474 m->m_data += max_linkhdr;
475 #if INET6
476 if (isipv6) {
477 bcopy((caddr_t)ip6, mtod(m, caddr_t),
478 sizeof(struct ip6_hdr));
479 ip6 = mtod(m, struct ip6_hdr *);
480 nth = (struct tcphdr *)(ip6 + 1);
481 } else
482 #endif /* INET6 */
483 {
484 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
485 ip = mtod(m, struct ip *);
486 nth = (struct tcphdr *)(ip + 1);
487 }
488 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
489 flags = TH_ACK;
490 } else {
491 m_freem(m->m_next);
492 m->m_next = 0;
493 m->m_data = (caddr_t)ipgen;
494 /* m_len is set later */
495 tlen = 0;
496 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
497 #if INET6
498 if (isipv6) {
499 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
500 nth = (struct tcphdr *)(ip6 + 1);
501 } else
502 #endif /* INET6 */
503 {
504 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
505 nth = (struct tcphdr *)(ip + 1);
506 }
507 if (th != nth) {
508 /*
509 * this is usually a case when an extension header
510 * exists between the IPv6 header and the
511 * TCP header.
512 */
513 nth->th_sport = th->th_sport;
514 nth->th_dport = th->th_dport;
515 }
516 xchg(nth->th_dport, nth->th_sport, n_short);
517 #undef xchg
518 }
519 #if INET6
520 if (isipv6) {
521 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
522 tlen));
523 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
524 } else
525 #endif
526 {
527 tlen += sizeof (struct tcpiphdr);
528 ip->ip_len = tlen;
529 ip->ip_ttl = ip_defttl;
530 }
531 m->m_len = tlen;
532 m->m_pkthdr.len = tlen;
533 m->m_pkthdr.rcvif = (struct ifnet *) 0;
534 nth->th_seq = htonl(seq);
535 nth->th_ack = htonl(ack);
536 nth->th_x2 = 0;
537 nth->th_off = sizeof (struct tcphdr) >> 2;
538 nth->th_flags = flags;
539 if (tp)
540 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
541 else
542 nth->th_win = htons((u_short)win);
543 nth->th_urp = 0;
544 #if INET6
545 if (isipv6) {
546 nth->th_sum = 0;
547 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
548 sizeof(struct ip6_hdr),
549 tlen - sizeof(struct ip6_hdr));
550 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
551 ro6 && ro6->ro_rt ?
552 ro6->ro_rt->rt_ifp :
553 NULL);
554 } else
555 #endif /* INET6 */
556 {
557 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
558 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
559 m->m_pkthdr.csum_flags = CSUM_TCP;
560 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
561 }
562 #if TCPDEBUG
563 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
564 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
565 #endif
566 #if IPSEC
567 if (ipsec_bypass == 0 && ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
568 m_freem(m);
569 return;
570 }
571 #endif
572 #if INET6
573 if (isipv6) {
574 (void)ip6_output(m, NULL, ro6, ipflags, NULL, NULL);
575 if (ro6 == &sro6 && ro6->ro_rt) {
576 rtfree(ro6->ro_rt);
577 ro6->ro_rt = NULL;
578 }
579 } else
580 #endif /* INET6 */
581 {
582 (void) ip_output(m, NULL, ro, ipflags, NULL);
583 if (ro == &sro && ro->ro_rt) {
584 rtfree(ro->ro_rt);
585 ro->ro_rt = NULL;
586 }
587 }
588 }
589
590 /*
591 * Create a new TCP control block, making an
592 * empty reassembly queue and hooking it to the argument
593 * protocol control block. The `inp' parameter must have
594 * come from the zone allocator set up in tcp_init().
595 */
596 struct tcpcb *
597 tcp_newtcpcb(inp)
598 struct inpcb *inp;
599 {
600 struct inp_tp *it;
601 register struct tcpcb *tp;
602 register struct socket *so = inp->inp_socket;
603 #if INET6
604 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
605 #endif /* INET6 */
606
607 if (so->cached_in_sock_layer == 0) {
608 it = (struct inp_tp *)inp;
609 tp = &it->tcb;
610 }
611 else
612 tp = (struct tcpcb *) inp->inp_saved_ppcb;
613
614 bzero((char *) tp, sizeof(struct tcpcb));
615 LIST_INIT(&tp->t_segq);
616 tp->t_maxseg = tp->t_maxopd =
617 #if INET6
618 isipv6 ? tcp_v6mssdflt :
619 #endif /* INET6 */
620 tcp_mssdflt;
621
622 #ifndef __APPLE__
623 /* Set up our timeouts. */
624 callout_init(tp->tt_rexmt = &it->inp_tp_rexmt);
625 callout_init(tp->tt_persist = &it->inp_tp_persist);
626 callout_init(tp->tt_keep = &it->inp_tp_keep);
627 callout_init(tp->tt_2msl = &it->inp_tp_2msl);
628 callout_init(tp->tt_delack = &it->inp_tp_delack);
629 #endif
630
631 if (tcp_do_rfc1323)
632 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
633 if (tcp_do_rfc1644)
634 tp->t_flags |= TF_REQ_CC;
635 tp->t_inpcb = inp; /* XXX */
636 /*
637 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
638 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
639 * reasonable initial retransmit time.
640 */
641 tp->t_srtt = TCPTV_SRTTBASE;
642 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
643 tp->t_rttmin = TCPTV_MIN;
644 tp->t_rxtcur = TCPTV_RTOBASE;
645 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
646 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
647 /*
648 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
649 * because the socket may be bound to an IPv6 wildcard address,
650 * which may match an IPv4-mapped IPv6 address.
651 */
652 inp->inp_ip_ttl = ip_defttl;
653 inp->inp_ppcb = (caddr_t)tp;
654 return (tp); /* XXX */
655 }
656
657 /*
658 * Drop a TCP connection, reporting
659 * the specified error. If connection is synchronized,
660 * then send a RST to peer.
661 */
662 struct tcpcb *
663 tcp_drop(tp, errno)
664 register struct tcpcb *tp;
665 int errno;
666 {
667 struct socket *so = tp->t_inpcb->inp_socket;
668
669 #ifdef __APPLE__
670 switch (tp->t_state)
671 {
672 case TCPS_ESTABLISHED:
673 case TCPS_FIN_WAIT_1:
674 case TCPS_CLOSING:
675 case TCPS_CLOSE_WAIT:
676 case TCPS_LAST_ACK:
677 break;
678 }
679 #endif
680
681 if (TCPS_HAVERCVDSYN(tp->t_state)) {
682 tp->t_state = TCPS_CLOSED;
683 (void) tcp_output(tp);
684 tcpstat.tcps_drops++;
685 } else
686 tcpstat.tcps_conndrops++;
687 if (errno == ETIMEDOUT && tp->t_softerror)
688 errno = tp->t_softerror;
689 so->so_error = errno;
690 return (tcp_close(tp));
691 }
692
693 /*
694 * Close a TCP control block:
695 * discard all space held by the tcp
696 * discard internet protocol block
697 * wake up any sleepers
698 */
699 struct tcpcb *
700 tcp_close(tp)
701 register struct tcpcb *tp;
702 {
703 register struct tseg_qent *q;
704 struct inpcb *inp = tp->t_inpcb;
705 struct socket *so = inp->inp_socket;
706 #if INET6
707 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
708 #endif /* INET6 */
709 register struct rtentry *rt;
710 int dosavessthresh;
711
712 if ( inp->inp_ppcb == NULL) /* tcp_close was called previously, bail */
713 return;
714
715 #ifndef __APPLE__
716 /*
717 * Make sure that all of our timers are stopped before we
718 * delete the PCB.
719 */
720 callout_stop(tp->tt_rexmt);
721 callout_stop(tp->tt_persist);
722 callout_stop(tp->tt_keep);
723 callout_stop(tp->tt_2msl);
724 callout_stop(tp->tt_delack);
725 #else
726 /* Clear the timers before we delete the PCB. */
727 {
728 int i;
729 for (i = 0; i < TCPT_NTIMERS; i++) {
730 tp->t_timer[i] = 0;
731 }
732 }
733 #endif
734
735
736 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0);
737 switch (tp->t_state)
738 {
739 case TCPS_ESTABLISHED:
740 case TCPS_FIN_WAIT_1:
741 case TCPS_CLOSING:
742 case TCPS_CLOSE_WAIT:
743 case TCPS_LAST_ACK:
744 break;
745 }
746
747
748 /*
749 * If we got enough samples through the srtt filter,
750 * save the rtt and rttvar in the routing entry.
751 * 'Enough' is arbitrarily defined as the 16 samples.
752 * 16 samples is enough for the srtt filter to converge
753 * to within 5% of the correct value; fewer samples and
754 * we could save a very bogus rtt.
755 *
756 * Don't update the default route's characteristics and don't
757 * update anything that the user "locked".
758 */
759 if (tp->t_rttupdated >= 16) {
760 register u_long i = 0;
761 #if INET6
762 if (isipv6) {
763 struct sockaddr_in6 *sin6;
764
765 if ((rt = inp->in6p_route.ro_rt) == NULL)
766 goto no_valid_rt;
767 sin6 = (struct sockaddr_in6 *)rt_key(rt);
768 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
769 goto no_valid_rt;
770 }
771 else
772 #endif /* INET6 */
773 rt = inp->inp_route.ro_rt;
774 if (rt == NULL ||
775 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr
776 == INADDR_ANY || rt->generation_id != route_generation) {
777 if (tp->t_state >= TCPS_CLOSE_WAIT)
778 tp->t_state = TCPS_CLOSING;
779
780 goto no_valid_rt;
781 }
782
783 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
784 i = tp->t_srtt *
785 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE));
786 if (rt->rt_rmx.rmx_rtt && i)
787 /*
788 * filter this update to half the old & half
789 * the new values, converting scale.
790 * See route.h and tcp_var.h for a
791 * description of the scaling constants.
792 */
793 rt->rt_rmx.rmx_rtt =
794 (rt->rt_rmx.rmx_rtt + i) / 2;
795 else
796 rt->rt_rmx.rmx_rtt = i;
797 tcpstat.tcps_cachedrtt++;
798 }
799 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
800 i = tp->t_rttvar *
801 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE));
802 if (rt->rt_rmx.rmx_rttvar && i)
803 rt->rt_rmx.rmx_rttvar =
804 (rt->rt_rmx.rmx_rttvar + i) / 2;
805 else
806 rt->rt_rmx.rmx_rttvar = i;
807 tcpstat.tcps_cachedrttvar++;
808 }
809 /*
810 * The old comment here said:
811 * update the pipelimit (ssthresh) if it has been updated
812 * already or if a pipesize was specified & the threshhold
813 * got below half the pipesize. I.e., wait for bad news
814 * before we start updating, then update on both good
815 * and bad news.
816 *
817 * But we want to save the ssthresh even if no pipesize is
818 * specified explicitly in the route, because such
819 * connections still have an implicit pipesize specified
820 * by the global tcp_sendspace. In the absence of a reliable
821 * way to calculate the pipesize, it will have to do.
822 */
823 i = tp->snd_ssthresh;
824 if (rt->rt_rmx.rmx_sendpipe != 0)
825 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
826 else
827 dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
828 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
829 i != 0 && rt->rt_rmx.rmx_ssthresh != 0)
830 || dosavessthresh) {
831 /*
832 * convert the limit from user data bytes to
833 * packets then to packet data bytes.
834 */
835 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
836 if (i < 2)
837 i = 2;
838 i *= (u_long)(tp->t_maxseg +
839 #if INET6
840 (isipv6 ? sizeof (struct ip6_hdr) +
841 sizeof (struct tcphdr) :
842 #endif
843 sizeof (struct tcpiphdr)
844 #if INET6
845 )
846 #endif
847 );
848 if (rt->rt_rmx.rmx_ssthresh)
849 rt->rt_rmx.rmx_ssthresh =
850 (rt->rt_rmx.rmx_ssthresh + i) / 2;
851 else
852 rt->rt_rmx.rmx_ssthresh = i;
853 tcpstat.tcps_cachedssthresh++;
854 }
855 }
856 rt = inp->inp_route.ro_rt;
857 if (rt) {
858 /*
859 * mark route for deletion if no information is
860 * cached.
861 */
862 if ((tp->t_flags & TF_LQ_OVERFLOW) &&
863 ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0)){
864 if (rt->rt_rmx.rmx_rtt == 0)
865 rt->rt_flags |= RTF_DELCLONE;
866 }
867 }
868 no_valid_rt:
869 /* free the reassembly queue, if any */
870 (void) tcp_freeq(tp);
871
872 #ifdef __APPLE__
873 if (so->cached_in_sock_layer)
874 inp->inp_saved_ppcb = (caddr_t) tp;
875 #endif
876
877 inp->inp_ppcb = NULL;
878 soisdisconnected(so);
879 #if INET6
880 if (INP_CHECK_SOCKAF(so, AF_INET6))
881 in6_pcbdetach(inp);
882 else
883 #endif /* INET6 */
884 in_pcbdetach(inp);
885 tcpstat.tcps_closed++;
886 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed,0,0,0,0);
887 return ((struct tcpcb *)0);
888 }
889
890 int
891 tcp_freeq(tp)
892 struct tcpcb *tp;
893 {
894
895 register struct tseg_qent *q;
896 int rv = 0;
897
898 while((q = LIST_FIRST(&tp->t_segq)) != NULL) {
899 LIST_REMOVE(q, tqe_q);
900 m_freem(q->tqe_m);
901 FREE(q, M_TSEGQ);
902 tcp_reass_qsize--;
903 rv = 1;
904 }
905 return (rv);
906 }
907
908 void
909 tcp_drain()
910 {
911 if (do_tcpdrain)
912 {
913 struct inpcb *inpb;
914 struct tcpcb *tcpb;
915 struct tseg_qent *te;
916
917 /*
918 * Walk the tcpbs, if existing, and flush the reassembly queue,
919 * if there is one...
920 * XXX: The "Net/3" implementation doesn't imply that the TCP
921 * reassembly queue should be flushed, but in a situation
922 * where we're really low on mbufs, this is potentially
923 * usefull.
924 */
925 for (inpb = LIST_FIRST(tcbinfo.listhead); inpb;
926 inpb = LIST_NEXT(inpb, inp_list)) {
927 if ((tcpb = intotcpcb(inpb))) {
928 while ((te = LIST_FIRST(&tcpb->t_segq))
929 != NULL) {
930 LIST_REMOVE(te, tqe_q);
931 m_freem(te->tqe_m);
932 FREE(te, M_TSEGQ);
933 tcp_reass_qsize--;
934 }
935 }
936 }
937
938 }
939 }
940
941 /*
942 * Notify a tcp user of an asynchronous error;
943 * store error as soft error, but wake up user
944 * (for now, won't do anything until can select for soft error).
945 *
946 * Do not wake up user since there currently is no mechanism for
947 * reporting soft errors (yet - a kqueue filter may be added).
948 */
949 static void
950 tcp_notify(inp, error)
951 struct inpcb *inp;
952 int error;
953 {
954 struct tcpcb *tp;
955
956 if (inp == NULL)
957 return; /* pcb is gone already */
958
959 tp = (struct tcpcb *)inp->inp_ppcb;
960
961 /*
962 * Ignore some errors if we are hooked up.
963 * If connection hasn't completed, has retransmitted several times,
964 * and receives a second error, give up now. This is better
965 * than waiting a long time to establish a connection that
966 * can never complete.
967 */
968 if (tp->t_state == TCPS_ESTABLISHED &&
969 (error == EHOSTUNREACH || error == ENETUNREACH ||
970 error == EHOSTDOWN)) {
971 return;
972 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
973 tp->t_softerror)
974 tcp_drop(tp, error);
975 else
976 tp->t_softerror = error;
977 #if 0
978 wakeup((caddr_t) &so->so_timeo);
979 sorwakeup(so);
980 sowwakeup(so);
981 #endif
982 }
983
984 static int
985 tcp_pcblist SYSCTL_HANDLER_ARGS
986 {
987 int error, i, n, s;
988 struct inpcb *inp, **inp_list;
989 inp_gen_t gencnt;
990 struct xinpgen xig;
991
992 /*
993 * The process of preparing the TCB list is too time-consuming and
994 * resource-intensive to repeat twice on every request.
995 */
996 if (req->oldptr == 0) {
997 n = tcbinfo.ipi_count;
998 req->oldidx = 2 * (sizeof xig)
999 + (n + n/8) * sizeof(struct xtcpcb);
1000 return 0;
1001 }
1002
1003 if (req->newptr != 0)
1004 return EPERM;
1005
1006 /*
1007 * OK, now we're committed to doing something.
1008 */
1009 s = splnet();
1010 gencnt = tcbinfo.ipi_gencnt;
1011 n = tcbinfo.ipi_count;
1012 splx(s);
1013
1014 xig.xig_len = sizeof xig;
1015 xig.xig_count = n;
1016 xig.xig_gen = gencnt;
1017 xig.xig_sogen = so_gencnt;
1018 error = SYSCTL_OUT(req, &xig, sizeof xig);
1019 if (error)
1020 return error;
1021 /*
1022 * We are done if there is no pcb
1023 */
1024 if (n == 0)
1025 return 0;
1026
1027 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1028 if (inp_list == 0)
1029 return ENOMEM;
1030
1031 s = splnet();
1032 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
1033 inp = LIST_NEXT(inp, inp_list)) {
1034 #ifdef __APPLE__
1035 if (inp->inp_gencnt <= gencnt)
1036 #else
1037 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp))
1038 #endif
1039 inp_list[i++] = inp;
1040 }
1041 splx(s);
1042 n = i;
1043
1044 error = 0;
1045 for (i = 0; i < n; i++) {
1046 inp = inp_list[i];
1047 if (inp->inp_gencnt <= gencnt) {
1048 struct xtcpcb xt;
1049 caddr_t inp_ppcb;
1050 xt.xt_len = sizeof xt;
1051 /* XXX should avoid extra copy */
1052 bcopy(inp, &xt.xt_inp, sizeof *inp);
1053 inp_ppcb = inp->inp_ppcb;
1054 if (inp_ppcb != NULL)
1055 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1056 else
1057 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1058 if (inp->inp_socket)
1059 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1060 error = SYSCTL_OUT(req, &xt, sizeof xt);
1061 }
1062 }
1063 if (!error) {
1064 /*
1065 * Give the user an updated idea of our state.
1066 * If the generation differs from what we told
1067 * her before, she knows that something happened
1068 * while we were processing this request, and it
1069 * might be necessary to retry.
1070 */
1071 s = splnet();
1072 xig.xig_gen = tcbinfo.ipi_gencnt;
1073 xig.xig_sogen = so_gencnt;
1074 xig.xig_count = tcbinfo.ipi_count;
1075 splx(s);
1076 error = SYSCTL_OUT(req, &xig, sizeof xig);
1077 }
1078 FREE(inp_list, M_TEMP);
1079 return error;
1080 }
1081
1082 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1083 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1084
1085 #ifndef __APPLE__
1086 static int
1087 tcp_getcred(SYSCTL_HANDLER_ARGS)
1088 {
1089 struct sockaddr_in addrs[2];
1090 struct inpcb *inp;
1091 int error, s;
1092
1093 error = suser(req->p);
1094 if (error)
1095 return (error);
1096 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1097 if (error)
1098 return (error);
1099 s = splnet();
1100 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
1101 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1102 if (inp == NULL || inp->inp_socket == NULL) {
1103 error = ENOENT;
1104 goto out;
1105 }
1106 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred));
1107 out:
1108 splx(s);
1109 return (error);
1110 }
1111
1112 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
1113 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection");
1114
1115 #if INET6
1116 static int
1117 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1118 {
1119 struct sockaddr_in6 addrs[2];
1120 struct inpcb *inp;
1121 int error, s, mapped = 0;
1122
1123 error = suser(req->p);
1124 if (error)
1125 return (error);
1126 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1127 if (error)
1128 return (error);
1129 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1130 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1131 mapped = 1;
1132 else
1133 return (EINVAL);
1134 }
1135 s = splnet();
1136 if (mapped == 1)
1137 inp = in_pcblookup_hash(&tcbinfo,
1138 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1139 addrs[1].sin6_port,
1140 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1141 addrs[0].sin6_port,
1142 0, NULL);
1143 else
1144 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr,
1145 addrs[1].sin6_port,
1146 &addrs[0].sin6_addr, addrs[0].sin6_port,
1147 0, NULL);
1148 if (inp == NULL || inp->inp_socket == NULL) {
1149 error = ENOENT;
1150 goto out;
1151 }
1152 error = SYSCTL_OUT(req, inp->inp_socket->so_cred,
1153 sizeof(struct ucred));
1154 out:
1155 splx(s);
1156 return (error);
1157 }
1158
1159 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
1160 0, 0,
1161 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection");
1162 #endif
1163 #endif /* __APPLE__*/
1164
1165 void
1166 tcp_ctlinput(cmd, sa, vip)
1167 int cmd;
1168 struct sockaddr *sa;
1169 void *vip;
1170 {
1171 struct ip *ip = vip;
1172 struct tcphdr *th;
1173 struct in_addr faddr;
1174 struct inpcb *inp;
1175 struct tcpcb *tp;
1176 void (*notify) __P((struct inpcb *, int)) = tcp_notify;
1177 tcp_seq icmp_seq;
1178 int s;
1179
1180 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1181 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1182 return;
1183
1184 if (cmd == PRC_QUENCH)
1185 notify = tcp_quench;
1186 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1187 cmd == PRC_UNREACH_PORT) && ip)
1188 notify = tcp_drop_syn_sent;
1189 else if (cmd == PRC_MSGSIZE)
1190 notify = tcp_mtudisc;
1191 else if (PRC_IS_REDIRECT(cmd)) {
1192 ip = 0;
1193 notify = in_rtchange;
1194 } else if (cmd == PRC_HOSTDEAD)
1195 ip = 0;
1196 else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)
1197 return;
1198 if (ip) {
1199 s = splnet();
1200 th = (struct tcphdr *)((caddr_t)ip
1201 + (IP_VHL_HL(ip->ip_vhl) << 2));
1202 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1203 ip->ip_src, th->th_sport, 0, NULL);
1204 if (inp != NULL && inp->inp_socket != NULL) {
1205 icmp_seq = htonl(th->th_seq);
1206 tp = intotcpcb(inp);
1207 if (SEQ_GEQ(icmp_seq, tp->snd_una) &&
1208 SEQ_LT(icmp_seq, tp->snd_max))
1209 (*notify)(inp, inetctlerrmap[cmd]);
1210 }
1211 splx(s);
1212 } else
1213 in_pcbnotifyall(&tcb, faddr, inetctlerrmap[cmd], notify);
1214 }
1215
1216 #if INET6
1217 void
1218 tcp6_ctlinput(cmd, sa, d)
1219 int cmd;
1220 struct sockaddr *sa;
1221 void *d;
1222 {
1223 struct tcphdr th;
1224 void (*notify) __P((struct inpcb *, int)) = tcp_notify;
1225 struct ip6_hdr *ip6;
1226 struct mbuf *m;
1227 struct ip6ctlparam *ip6cp = NULL;
1228 const struct sockaddr_in6 *sa6_src = NULL;
1229 int off;
1230 struct tcp_portonly {
1231 u_int16_t th_sport;
1232 u_int16_t th_dport;
1233 } *thp;
1234
1235 if (sa->sa_family != AF_INET6 ||
1236 sa->sa_len != sizeof(struct sockaddr_in6))
1237 return;
1238
1239 if (cmd == PRC_QUENCH)
1240 notify = tcp_quench;
1241 else if (cmd == PRC_MSGSIZE)
1242 notify = tcp_mtudisc;
1243 else if (!PRC_IS_REDIRECT(cmd) &&
1244 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1245 return;
1246
1247 /* if the parameter is from icmp6, decode it. */
1248 if (d != NULL) {
1249 ip6cp = (struct ip6ctlparam *)d;
1250 m = ip6cp->ip6c_m;
1251 ip6 = ip6cp->ip6c_ip6;
1252 off = ip6cp->ip6c_off;
1253 sa6_src = ip6cp->ip6c_src;
1254 } else {
1255 m = NULL;
1256 ip6 = NULL;
1257 off = 0; /* fool gcc */
1258 sa6_src = &sa6_any;
1259 }
1260
1261 if (ip6) {
1262 /*
1263 * XXX: We assume that when IPV6 is non NULL,
1264 * M and OFF are valid.
1265 */
1266
1267 /* check if we can safely examine src and dst ports */
1268 if (m->m_pkthdr.len < off + sizeof(*thp))
1269 return;
1270
1271 bzero(&th, sizeof(th));
1272 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1273
1274 in6_pcbnotify(&tcb, sa, th.th_dport,
1275 (struct sockaddr *)ip6cp->ip6c_src,
1276 th.th_sport, cmd, notify);
1277 } else
1278 in6_pcbnotify(&tcb, sa, 0, (struct sockaddr *)sa6_src,
1279 0, cmd, notify);
1280 }
1281 #endif /* INET6 */
1282
1283
1284 /*
1285 * Following is where TCP initial sequence number generation occurs.
1286 *
1287 * There are two places where we must use initial sequence numbers:
1288 * 1. In SYN-ACK packets.
1289 * 2. In SYN packets.
1290 *
1291 * The ISNs in SYN-ACK packets have no monotonicity requirement,
1292 * and should be as unpredictable as possible to avoid the possibility
1293 * of spoofing and/or connection hijacking. To satisfy this
1294 * requirement, SYN-ACK ISNs are generated via the arc4random()
1295 * function. If exact RFC 1948 compliance is requested via sysctl,
1296 * these ISNs will be generated just like those in SYN packets.
1297 *
1298 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1299 * depends on this property. In addition, these ISNs should be
1300 * unguessable so as to prevent connection hijacking. To satisfy
1301 * the requirements of this situation, the algorithm outlined in
1302 * RFC 1948 is used to generate sequence numbers.
1303 *
1304 * For more information on the theory of operation, please see
1305 * RFC 1948.
1306 *
1307 * Implementation details:
1308 *
1309 * Time is based off the system timer, and is corrected so that it
1310 * increases by one megabyte per second. This allows for proper
1311 * recycling on high speed LANs while still leaving over an hour
1312 * before rollover.
1313 *
1314 * Two sysctls control the generation of ISNs:
1315 *
1316 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1317 * between seeding of isn_secret. This is normally set to zero,
1318 * as reseeding should not be necessary.
1319 *
1320 * net.inet.tcp.strict_rfc1948 controls whether RFC 1948 is followed
1321 * strictly. When strict compliance is requested, reseeding is
1322 * disabled and SYN-ACKs will be generated in the same manner as
1323 * SYNs. Strict mode is disabled by default.
1324 *
1325 */
1326
1327 #define ISN_BYTES_PER_SECOND 1048576
1328
1329 u_char isn_secret[32];
1330 int isn_last_reseed;
1331 MD5_CTX isn_ctx;
1332
1333 tcp_seq
1334 tcp_new_isn(tp)
1335 struct tcpcb *tp;
1336 {
1337 u_int32_t md5_buffer[4];
1338 tcp_seq new_isn;
1339 struct timeval time;
1340
1341 /* Use arc4random for SYN-ACKs when not in exact RFC1948 mode. */
1342 if (((tp->t_state == TCPS_LISTEN) || (tp->t_state == TCPS_TIME_WAIT))
1343 && tcp_strict_rfc1948 == 0)
1344 #ifdef __APPLE__
1345 return random();
1346 #else
1347 return arc4random();
1348 #endif
1349
1350 /* Seed if this is the first use, reseed if requested. */
1351 if ((isn_last_reseed == 0) ||
1352 ((tcp_strict_rfc1948 == 0) && (tcp_isn_reseed_interval > 0) &&
1353 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1354 < (u_int)time.tv_sec))) {
1355 #ifdef __APPLE__
1356 read_random(&isn_secret, sizeof(isn_secret));
1357 #else
1358 read_random_unlimited(&isn_secret, sizeof(isn_secret));
1359 #endif
1360 isn_last_reseed = time.tv_sec;
1361 }
1362
1363 /* Compute the md5 hash and return the ISN. */
1364 MD5Init(&isn_ctx);
1365 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1366 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1367 #if INET6
1368 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1369 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1370 sizeof(struct in6_addr));
1371 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1372 sizeof(struct in6_addr));
1373 } else
1374 #endif
1375 {
1376 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1377 sizeof(struct in_addr));
1378 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1379 sizeof(struct in_addr));
1380 }
1381 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1382 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1383 new_isn = (tcp_seq) md5_buffer[0];
1384 new_isn += time.tv_sec * (ISN_BYTES_PER_SECOND / hz);
1385 return new_isn;
1386 }
1387
1388 /*
1389 * When a source quench is received, close congestion window
1390 * to one segment. We will gradually open it again as we proceed.
1391 */
1392 void
1393 tcp_quench(inp, errno)
1394 struct inpcb *inp;
1395 int errno;
1396 {
1397 struct tcpcb *tp = intotcpcb(inp);
1398
1399 if (tp)
1400 tp->snd_cwnd = tp->t_maxseg;
1401 }
1402
1403 /*
1404 * When a specific ICMP unreachable message is received and the
1405 * connection state is SYN-SENT, drop the connection. This behavior
1406 * is controlled by the icmp_may_rst sysctl.
1407 */
1408 void
1409 tcp_drop_syn_sent(inp, errno)
1410 struct inpcb *inp;
1411 int errno;
1412 {
1413 struct tcpcb *tp = intotcpcb(inp);
1414
1415 if (tp && tp->t_state == TCPS_SYN_SENT)
1416 tcp_drop(tp, errno);
1417 }
1418
1419 /*
1420 * When `need fragmentation' ICMP is received, update our idea of the MSS
1421 * based on the new value in the route. Also nudge TCP to send something,
1422 * since we know the packet we just sent was dropped.
1423 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1424 */
1425 void
1426 tcp_mtudisc(inp, errno)
1427 struct inpcb *inp;
1428 int errno;
1429 {
1430 struct tcpcb *tp = intotcpcb(inp);
1431 struct rtentry *rt;
1432 struct rmxp_tao *taop;
1433 struct socket *so = inp->inp_socket;
1434 int offered;
1435 int mss;
1436 #if INET6
1437 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1438 #endif /* INET6 */
1439
1440 if (tp) {
1441 #if INET6
1442 if (isipv6)
1443 rt = tcp_rtlookup6(inp);
1444 else
1445 #endif /* INET6 */
1446 rt = tcp_rtlookup(inp);
1447 if (!rt || !rt->rt_rmx.rmx_mtu) {
1448 tp->t_maxopd = tp->t_maxseg =
1449 #if INET6
1450 isipv6 ? tcp_v6mssdflt :
1451 #endif /* INET6 */
1452 tcp_mssdflt;
1453 return;
1454 }
1455 taop = rmx_taop(rt->rt_rmx);
1456 offered = taop->tao_mssopt;
1457 mss = rt->rt_rmx.rmx_mtu -
1458 #if INET6
1459 (isipv6 ?
1460 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1461 #endif /* INET6 */
1462 sizeof(struct tcpiphdr)
1463 #if INET6
1464 )
1465 #endif /* INET6 */
1466 ;
1467
1468 if (offered)
1469 mss = min(mss, offered);
1470 /*
1471 * XXX - The above conditional probably violates the TCP
1472 * spec. The problem is that, since we don't know the
1473 * other end's MSS, we are supposed to use a conservative
1474 * default. But, if we do that, then MTU discovery will
1475 * never actually take place, because the conservative
1476 * default is much less than the MTUs typically seen
1477 * on the Internet today. For the moment, we'll sweep
1478 * this under the carpet.
1479 *
1480 * The conservative default might not actually be a problem
1481 * if the only case this occurs is when sending an initial
1482 * SYN with options and data to a host we've never talked
1483 * to before. Then, they will reply with an MSS value which
1484 * will get recorded and the new parameters should get
1485 * recomputed. For Further Study.
1486 */
1487 if (tp->t_maxopd <= mss)
1488 return;
1489 tp->t_maxopd = mss;
1490
1491 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1492 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1493 mss -= TCPOLEN_TSTAMP_APPA;
1494 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
1495 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)
1496 mss -= TCPOLEN_CC_APPA;
1497
1498 if (so->so_snd.sb_hiwat < mss)
1499 mss = so->so_snd.sb_hiwat;
1500
1501 tp->t_maxseg = mss;
1502
1503 tcpstat.tcps_mturesent++;
1504 tp->t_rtttime = 0;
1505 tp->snd_nxt = tp->snd_una;
1506 tcp_output(tp);
1507 }
1508 }
1509
1510 /*
1511 * Look-up the routing entry to the peer of this inpcb. If no route
1512 * is found and it cannot be allocated the return NULL. This routine
1513 * is called by TCP routines that access the rmx structure and by tcp_mss
1514 * to get the interface MTU.
1515 */
1516 struct rtentry *
1517 tcp_rtlookup(inp)
1518 struct inpcb *inp;
1519 {
1520 struct route *ro;
1521 struct rtentry *rt;
1522
1523 ro = &inp->inp_route;
1524 if (ro == NULL)
1525 return (NULL);
1526 rt = ro->ro_rt;
1527 if (rt == NULL || !(rt->rt_flags & RTF_UP) || rt->generation_id != route_generation) {
1528 /* No route yet, so try to acquire one */
1529 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1530 ro->ro_dst.sa_family = AF_INET;
1531 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1532 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
1533 inp->inp_faddr;
1534 rtalloc(ro);
1535 rt = ro->ro_rt;
1536 }
1537 }
1538 return rt;
1539 }
1540
1541 #if INET6
1542 struct rtentry *
1543 tcp_rtlookup6(inp)
1544 struct inpcb *inp;
1545 {
1546 struct route_in6 *ro6;
1547 struct rtentry *rt;
1548
1549 ro6 = &inp->in6p_route;
1550 rt = ro6->ro_rt;
1551 if (rt == NULL || !(rt->rt_flags & RTF_UP)) {
1552 /* No route yet, so try to acquire one */
1553 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
1554 struct sockaddr_in6 *dst6;
1555
1556 dst6 = (struct sockaddr_in6 *)&ro6->ro_dst;
1557 dst6->sin6_family = AF_INET6;
1558 dst6->sin6_len = sizeof(*dst6);
1559 dst6->sin6_addr = inp->in6p_faddr;
1560 rtalloc((struct route *)ro6);
1561 rt = ro6->ro_rt;
1562 }
1563 }
1564 return rt;
1565 }
1566 #endif /* INET6 */
1567
1568 #if IPSEC
1569 /* compute ESP/AH header size for TCP, including outer IP header. */
1570 size_t
1571 ipsec_hdrsiz_tcp(tp)
1572 struct tcpcb *tp;
1573 {
1574 struct inpcb *inp;
1575 struct mbuf *m;
1576 size_t hdrsiz;
1577 struct ip *ip;
1578 #if INET6
1579 struct ip6_hdr *ip6 = NULL;
1580 #endif /* INET6 */
1581 struct tcphdr *th;
1582
1583 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1584 return 0;
1585 MGETHDR(m, M_DONTWAIT, MT_DATA);
1586 if (!m)
1587 return 0;
1588
1589 #if INET6
1590 if ((inp->inp_vflag & INP_IPV6) != 0) {
1591 ip6 = mtod(m, struct ip6_hdr *);
1592 th = (struct tcphdr *)(ip6 + 1);
1593 m->m_pkthdr.len = m->m_len =
1594 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1595 tcp_fillheaders(tp, ip6, th);
1596 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1597 } else
1598 #endif /* INET6 */
1599 {
1600 ip = mtod(m, struct ip *);
1601 th = (struct tcphdr *)(ip + 1);
1602 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1603 tcp_fillheaders(tp, ip, th);
1604 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1605 }
1606
1607 m_free(m);
1608 return hdrsiz;
1609 }
1610 #endif /*IPSEC*/
1611
1612 /*
1613 * Return a pointer to the cached information about the remote host.
1614 * The cached information is stored in the protocol specific part of
1615 * the route metrics.
1616 */
1617 struct rmxp_tao *
1618 tcp_gettaocache(inp)
1619 struct inpcb *inp;
1620 {
1621 struct rtentry *rt;
1622
1623 #if INET6
1624 if ((inp->inp_vflag & INP_IPV6) != 0)
1625 rt = tcp_rtlookup6(inp);
1626 else
1627 #endif /* INET6 */
1628 rt = tcp_rtlookup(inp);
1629
1630 /* Make sure this is a host route and is up. */
1631 if (rt == NULL ||
1632 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST))
1633 return NULL;
1634
1635 return rmx_taop(rt->rt_rmx);
1636 }
1637
1638 /*
1639 * Clear all the TAO cache entries, called from tcp_init.
1640 *
1641 * XXX
1642 * This routine is just an empty one, because we assume that the routing
1643 * routing tables are initialized at the same time when TCP, so there is
1644 * nothing in the cache left over.
1645 */
1646 static void
1647 tcp_cleartaocache()
1648 {
1649 }