]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_subr.c
a94f8ad2a7f0e8f4cf6f10f7059919f952d3468f
[apple/xnu.git] / bsd / netinet / tcp_subr.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.22 2001/08/22 00:59:12 silby Exp $
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/callout.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <kern/locks.h>
86 #include <kern/zalloc.h>
87
88 #include <net/route.h>
89 #include <net/if.h>
90
91 #define _IP_VHL
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #if INET6
96 #include <netinet/ip6.h>
97 #endif
98 #include <netinet/in_pcb.h>
99 #if INET6
100 #include <netinet6/in6_pcb.h>
101 #endif
102 #include <netinet/in_var.h>
103 #include <netinet/ip_var.h>
104 #if INET6
105 #include <netinet6/ip6_var.h>
106 #endif
107 #include <netinet/tcp.h>
108 #include <netinet/tcp_fsm.h>
109 #include <netinet/tcp_seq.h>
110 #include <netinet/tcp_timer.h>
111 #include <netinet/tcp_var.h>
112 #if INET6
113 #include <netinet6/tcp6_var.h>
114 #endif
115 #include <netinet/tcpip.h>
116 #if TCPDEBUG
117 #include <netinet/tcp_debug.h>
118 #endif
119 #include <netinet6/ip6protosw.h>
120
121 #if IPSEC
122 #include <netinet6/ipsec.h>
123 #if INET6
124 #include <netinet6/ipsec6.h>
125 #endif
126 #endif /*IPSEC*/
127
128 #if CONFIG_MACF_NET
129 #include <security/mac_framework.h>
130 #endif /* MAC_NET */
131
132 #include <libkern/crypto/md5.h>
133 #include <sys/kdebug.h>
134
135 #define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
136
137 extern int tcp_lq_overflow;
138
139 /* temporary: for testing */
140 #if IPSEC
141 extern int ipsec_bypass;
142 #endif
143
144 int tcp_mssdflt = TCP_MSS;
145 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
146 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
147
148 #if INET6
149 int tcp_v6mssdflt = TCP6_MSS;
150 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
151 CTLFLAG_RW, &tcp_v6mssdflt , 0,
152 "Default TCP Maximum Segment Size for IPv6");
153 #endif
154
155 /*
156 * Minimum MSS we accept and use. This prevents DoS attacks where
157 * we are forced to a ridiculous low MSS like 20 and send hundreds
158 * of packets instead of one. The effect scales with the available
159 * bandwidth and quickly saturates the CPU and network interface
160 * with packet generation and sending. Set to zero to disable MINMSS
161 * checking. This setting prevents us from sending too small packets.
162 */
163 int tcp_minmss = TCP_MINMSS;
164 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
165 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
166
167 /*
168 * Number of TCP segments per second we accept from remote host
169 * before we start to calculate average segment size. If average
170 * segment size drops below the minimum TCP MSS we assume a DoS
171 * attack and reset+drop the connection. Care has to be taken not to
172 * set this value too small to not kill interactive type connections
173 * (telnet, SSH) which send many small packets.
174 */
175 #ifdef FIX_WORKAROUND_FOR_3894301
176 __private_extern__ int tcp_minmssoverload = TCP_MINMSSOVERLOAD;
177 #else
178 __private_extern__ int tcp_minmssoverload = 0;
179 #endif
180 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
181 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
182 "be under the MINMSS Size");
183
184 static int tcp_do_rfc1323 = 1;
185 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
186 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
187
188 static int tcp_do_rfc1644 = 0;
189 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
190 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
191
192 static int tcp_tcbhashsize = 0;
193 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
194 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
195
196 static int do_tcpdrain = 0;
197 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
198 "Enable tcp_drain routine for extra help when low on mbufs");
199
200 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
201 &tcbinfo.ipi_count, 0, "Number of active PCBs");
202
203 static int icmp_may_rst = 1;
204 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
205 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
206
207 static int tcp_strict_rfc1948 = 0;
208 SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW,
209 &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
210
211 static int tcp_isn_reseed_interval = 0;
212 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
213 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
214 static int tcp_background_io_enabled = 1;
215 SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_io_enabled, CTLFLAG_RW,
216 &tcp_background_io_enabled, 0, "Background IO Enabled");
217
218 int tcp_TCPTV_MIN = 1;
219 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_min, CTLFLAG_RW,
220 &tcp_TCPTV_MIN, 0, "min rtt value allowed");
221
222 static void tcp_cleartaocache(void);
223 static void tcp_notify(struct inpcb *, int);
224 struct zone *sack_hole_zone;
225
226 extern unsigned int total_mb_cnt;
227 extern unsigned int total_cl_cnt;
228 extern int sbspace_factor;
229 extern int tcp_sockthreshold;
230 extern int slowlink_wsize; /* window correction for slow links */
231 extern int path_mtu_discovery;
232
233
234 /*
235 * Target size of TCP PCB hash tables. Must be a power of two.
236 *
237 * Note that this can be overridden by the kernel environment
238 * variable net.inet.tcp.tcbhashsize
239 */
240 #ifndef TCBHASHSIZE
241 #define TCBHASHSIZE CONFIG_TCBHASHSIZE
242 #endif
243
244 /*
245 * This is the actual shape of what we allocate using the zone
246 * allocator. Doing it this way allows us to protect both structures
247 * using the same generation count, and also eliminates the overhead
248 * of allocating tcpcbs separately. By hiding the structure here,
249 * we avoid changing most of the rest of the code (although it needs
250 * to be changed, eventually, for greater efficiency).
251 */
252 #define ALIGNMENT 32
253 #define ALIGNM1 (ALIGNMENT - 1)
254 struct inp_tp {
255 union {
256 struct inpcb inp;
257 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
258 } inp_tp_u;
259 struct tcpcb tcb;
260 };
261 #undef ALIGNMENT
262 #undef ALIGNM1
263
264 static struct tcpcb dummy_tcb;
265
266
267 extern struct inpcbhead time_wait_slots[];
268 extern int cur_tw_slot;
269 extern u_long *delack_bitmask;
270 extern u_long route_generation;
271
272 int get_inpcb_str_size(void);
273 int get_tcp_str_size(void);
274
275
276 int get_inpcb_str_size(void)
277 {
278 return sizeof(struct inpcb);
279 }
280
281
282 int get_tcp_str_size(void)
283 {
284 return sizeof(struct tcpcb);
285 }
286
287 int tcp_freeq(struct tcpcb *tp);
288
289
290 /*
291 * Tcp initialization
292 */
293 void
294 tcp_init()
295 {
296 int hashsize = TCBHASHSIZE;
297 vm_size_t str_size;
298 int i;
299 struct inpcbinfo *pcbinfo;
300
301 tcp_ccgen = 1;
302 tcp_cleartaocache();
303
304 tcp_keepinit = TCPTV_KEEP_INIT;
305 tcp_keepidle = TCPTV_KEEP_IDLE;
306 tcp_keepintvl = TCPTV_KEEPINTVL;
307 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
308 tcp_msl = TCPTV_MSL;
309 read_random(&tcp_now, sizeof(tcp_now));
310 tcp_now = tcp_now & 0x3fffffff; /* Starts tcp internal 100ms clock at a random value */
311
312
313 LIST_INIT(&tcb);
314 tcbinfo.listhead = &tcb;
315 pcbinfo = &tcbinfo;
316 if (!powerof2(hashsize)) {
317 printf("WARNING: TCB hash size not a power of 2\n");
318 hashsize = 512; /* safe default */
319 }
320 tcp_tcbhashsize = hashsize;
321 tcbinfo.hashsize = hashsize;
322 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
323 tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
324 &tcbinfo.porthashmask);
325 str_size = (vm_size_t) sizeof(struct inp_tp);
326 tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "tcpcb");
327 sack_hole_zone = zinit(str_size, 120000*str_size, 8192, "sack_hole zone");
328 tcp_reass_maxseg = nmbclusters / 16;
329
330 #if INET6
331 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
332 #else /* INET6 */
333 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
334 #endif /* INET6 */
335 if (max_protohdr < TCP_MINPROTOHDR)
336 max_protohdr = TCP_MINPROTOHDR;
337 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
338 panic("tcp_init");
339 #undef TCP_MINPROTOHDR
340 dummy_tcb.t_state = TCP_NSTATES;
341 dummy_tcb.t_flags = 0;
342 tcbinfo.dummy_cb = (caddr_t) &dummy_tcb;
343
344 /*
345 * allocate lock group attribute and group for tcp pcb mutexes
346 */
347 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
348 pcbinfo->mtx_grp = lck_grp_alloc_init("tcppcb", pcbinfo->mtx_grp_attr);
349
350 /*
351 * allocate the lock attribute for tcp pcb mutexes
352 */
353 pcbinfo->mtx_attr = lck_attr_alloc_init();
354
355 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL) {
356 printf("tcp_init: mutex not alloced!\n");
357 return; /* pretty much dead if this fails... */
358 }
359
360
361 in_pcb_nat_init(&tcbinfo, AF_INET, IPPROTO_TCP, SOCK_STREAM);
362
363 delack_bitmask = _MALLOC((4 * hashsize)/32, M_PCB, M_WAITOK);
364 if (delack_bitmask == 0)
365 panic("Delack Memory");
366
367 for (i=0; i < (tcbinfo.hashsize / 32); i++)
368 delack_bitmask[i] = 0;
369
370 for (i=0; i < N_TIME_WAIT_SLOTS; i++) {
371 LIST_INIT(&time_wait_slots[i]);
372 }
373
374 timeout(tcp_fasttimo, NULL, hz/TCP_RETRANSHZ);
375 }
376
377 /*
378 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
379 * tcp_template used to store this data in mbufs, but we now recopy it out
380 * of the tcpcb each time to conserve mbufs.
381 */
382 void
383 tcp_fillheaders(tp, ip_ptr, tcp_ptr)
384 struct tcpcb *tp;
385 void *ip_ptr;
386 void *tcp_ptr;
387 {
388 struct inpcb *inp = tp->t_inpcb;
389 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
390
391 #if INET6
392 if ((inp->inp_vflag & INP_IPV6) != 0) {
393 struct ip6_hdr *ip6;
394
395 ip6 = (struct ip6_hdr *)ip_ptr;
396 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
397 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
398 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
399 (IPV6_VERSION & IPV6_VERSION_MASK);
400 ip6->ip6_nxt = IPPROTO_TCP;
401 ip6->ip6_plen = sizeof(struct tcphdr);
402 ip6->ip6_src = inp->in6p_laddr;
403 ip6->ip6_dst = inp->in6p_faddr;
404 tcp_hdr->th_sum = 0;
405 } else
406 #endif
407 {
408 struct ip *ip = (struct ip *) ip_ptr;
409
410 ip->ip_vhl = IP_VHL_BORING;
411 ip->ip_tos = 0;
412 ip->ip_len = 0;
413 ip->ip_id = 0;
414 ip->ip_off = 0;
415 ip->ip_ttl = 0;
416 ip->ip_sum = 0;
417 ip->ip_p = IPPROTO_TCP;
418 ip->ip_src = inp->inp_laddr;
419 ip->ip_dst = inp->inp_faddr;
420 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
421 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
422 }
423
424 tcp_hdr->th_sport = inp->inp_lport;
425 tcp_hdr->th_dport = inp->inp_fport;
426 tcp_hdr->th_seq = 0;
427 tcp_hdr->th_ack = 0;
428 tcp_hdr->th_x2 = 0;
429 tcp_hdr->th_off = 5;
430 tcp_hdr->th_flags = 0;
431 tcp_hdr->th_win = 0;
432 tcp_hdr->th_urp = 0;
433 }
434
435 /*
436 * Create template to be used to send tcp packets on a connection.
437 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
438 * use for this function is in keepalives, which use tcp_respond.
439 */
440 struct tcptemp *
441 tcp_maketemplate(tp)
442 struct tcpcb *tp;
443 {
444 struct mbuf *m;
445 struct tcptemp *n;
446
447 m = m_get(M_DONTWAIT, MT_HEADER);
448 if (m == NULL)
449 return (0);
450 m->m_len = sizeof(struct tcptemp);
451 n = mtod(m, struct tcptemp *);
452
453 tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
454 return (n);
455 }
456
457 /*
458 * Send a single message to the TCP at address specified by
459 * the given TCP/IP header. If m == 0, then we make a copy
460 * of the tcpiphdr at ti and send directly to the addressed host.
461 * This is used to force keep alive messages out using the TCP
462 * template for a connection. If flags are given then we send
463 * a message back to the TCP which originated the * segment ti,
464 * and discard the mbuf containing it and any other attached mbufs.
465 *
466 * In any case the ack and sequence number of the transmitted
467 * segment are as specified by the parameters.
468 *
469 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
470 */
471 void
472 tcp_respond(
473 struct tcpcb *tp,
474 void *ipgen,
475 register struct tcphdr *th,
476 register struct mbuf *m,
477 tcp_seq ack,
478 tcp_seq seq,
479 int flags,
480 #if CONFIG_FORCE_OUT_IFP
481 ifnet_t ifp
482 #else
483 __unused ifnet_t ifp
484 #endif
485 )
486 {
487 register int tlen;
488 int win = 0;
489 struct route *ro = 0;
490 struct route sro;
491 struct ip *ip;
492 struct tcphdr *nth;
493 #if INET6
494 struct route_in6 *ro6 = 0;
495 struct route_in6 sro6;
496 struct ip6_hdr *ip6;
497 int isipv6;
498 #endif /* INET6 */
499 int ipflags = 0;
500
501 #if INET6
502 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
503 ip6 = ipgen;
504 #endif /* INET6 */
505 ip = ipgen;
506
507 if (tp) {
508 if (!(flags & TH_RST)) {
509 win = tcp_sbspace(tp);
510 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
511 win = (long)TCP_MAXWIN << tp->rcv_scale;
512 }
513 #if INET6
514 if (isipv6)
515 ro6 = &tp->t_inpcb->in6p_route;
516 else
517 #endif /* INET6 */
518 ro = &tp->t_inpcb->inp_route;
519 } else {
520 #if INET6
521 if (isipv6) {
522 ro6 = &sro6;
523 bzero(ro6, sizeof *ro6);
524 } else
525 #endif /* INET6 */
526 {
527 ro = &sro;
528 bzero(ro, sizeof *ro);
529 }
530 }
531 if (m == 0) {
532 m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */
533 if (m == NULL)
534 return;
535 tlen = 0;
536 m->m_data += max_linkhdr;
537 #if INET6
538 if (isipv6) {
539 bcopy((caddr_t)ip6, mtod(m, caddr_t),
540 sizeof(struct ip6_hdr));
541 ip6 = mtod(m, struct ip6_hdr *);
542 nth = (struct tcphdr *)(ip6 + 1);
543 } else
544 #endif /* INET6 */
545 {
546 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
547 ip = mtod(m, struct ip *);
548 nth = (struct tcphdr *)(ip + 1);
549 }
550 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
551 flags = TH_ACK;
552 } else {
553 m_freem(m->m_next);
554 m->m_next = 0;
555 m->m_data = (caddr_t)ipgen;
556 /* m_len is set later */
557 tlen = 0;
558 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
559 #if INET6
560 if (isipv6) {
561 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
562 nth = (struct tcphdr *)(ip6 + 1);
563 } else
564 #endif /* INET6 */
565 {
566 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
567 nth = (struct tcphdr *)(ip + 1);
568 }
569 if (th != nth) {
570 /*
571 * this is usually a case when an extension header
572 * exists between the IPv6 header and the
573 * TCP header.
574 */
575 nth->th_sport = th->th_sport;
576 nth->th_dport = th->th_dport;
577 }
578 xchg(nth->th_dport, nth->th_sport, n_short);
579 #undef xchg
580 }
581 #if INET6
582 if (isipv6) {
583 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
584 tlen));
585 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
586 } else
587 #endif
588 {
589 tlen += sizeof (struct tcpiphdr);
590 ip->ip_len = tlen;
591 ip->ip_ttl = ip_defttl;
592 }
593 m->m_len = tlen;
594 m->m_pkthdr.len = tlen;
595 m->m_pkthdr.rcvif = 0;
596 #if CONFIG_MACF_NET
597 if (tp != NULL && tp->t_inpcb != NULL) {
598 /*
599 * Packet is associated with a socket, so allow the
600 * label of the response to reflect the socket label.
601 */
602 mac_mbuf_label_associate_inpcb(tp->t_inpcb, m);
603 } else {
604 /*
605 * Packet is not associated with a socket, so possibly
606 * update the label in place.
607 */
608 mac_netinet_tcp_reply(m);
609 }
610 #endif
611
612 #if CONFIG_IP_EDGEHOLE
613 if (tp && tp->t_inpcb)
614 ip_edgehole_mbuf_tag(tp->t_inpcb, m);
615 #endif
616
617 nth->th_seq = htonl(seq);
618 nth->th_ack = htonl(ack);
619 nth->th_x2 = 0;
620 nth->th_off = sizeof (struct tcphdr) >> 2;
621 nth->th_flags = flags;
622 if (tp)
623 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
624 else
625 nth->th_win = htons((u_short)win);
626 nth->th_urp = 0;
627 #if INET6
628 if (isipv6) {
629 nth->th_sum = 0;
630 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
631 sizeof(struct ip6_hdr),
632 tlen - sizeof(struct ip6_hdr));
633 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
634 ro6 && ro6->ro_rt ?
635 ro6->ro_rt->rt_ifp :
636 NULL);
637 } else
638 #endif /* INET6 */
639 {
640 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
641 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
642 m->m_pkthdr.csum_flags = CSUM_TCP;
643 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
644 }
645 #if TCPDEBUG
646 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
647 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
648 #endif
649 #if IPSEC
650 if (ipsec_bypass == 0 && ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
651 m_freem(m);
652 return;
653 }
654 #endif
655 #if INET6
656 if (isipv6) {
657 (void)ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 0);
658 if (ro6 == &sro6 && ro6->ro_rt) {
659 rtfree(ro6->ro_rt);
660 ro6->ro_rt = NULL;
661 }
662 } else
663 #endif /* INET6 */
664 {
665 #if CONFIG_FORCE_OUT_IFP
666 ifp = (tp && tp->t_inpcb) ? tp->t_inpcb->pdp_ifp :
667 (ifp && (ifp->if_flags & IFF_POINTOPOINT) != 0) ? ifp : NULL;
668 #endif
669 (void) ip_output_list(m, 0, NULL, ro, ipflags, NULL, ifp);
670 if (ro == &sro && ro->ro_rt) {
671 rtfree(ro->ro_rt);
672 ro->ro_rt = NULL;
673 }
674 }
675 }
676
677 /*
678 * Create a new TCP control block, making an
679 * empty reassembly queue and hooking it to the argument
680 * protocol control block. The `inp' parameter must have
681 * come from the zone allocator set up in tcp_init().
682 */
683 struct tcpcb *
684 tcp_newtcpcb(inp)
685 struct inpcb *inp;
686 {
687 struct inp_tp *it;
688 register struct tcpcb *tp;
689 register struct socket *so = inp->inp_socket;
690 #if INET6
691 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
692 #endif /* INET6 */
693
694 if (so->cached_in_sock_layer == 0) {
695 it = (struct inp_tp *)inp;
696 tp = &it->tcb;
697 }
698 else
699 tp = (struct tcpcb *) inp->inp_saved_ppcb;
700
701 bzero((char *) tp, sizeof(struct tcpcb));
702 LIST_INIT(&tp->t_segq);
703 tp->t_maxseg = tp->t_maxopd =
704 #if INET6
705 isipv6 ? tcp_v6mssdflt :
706 #endif /* INET6 */
707 tcp_mssdflt;
708
709 if (tcp_do_rfc1323)
710 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
711 tp->sack_enable = tcp_do_sack;
712 TAILQ_INIT(&tp->snd_holes);
713 tp->t_inpcb = inp; /* XXX */
714 /*
715 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
716 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
717 * reasonable initial retransmit time.
718 */
719 tp->t_srtt = TCPTV_SRTTBASE;
720 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
721 tp->t_rttmin = tcp_TCPTV_MIN;
722 tp->t_rxtcur = TCPTV_RTOBASE;
723 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
724 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
725 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
726 tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
727 tp->t_rcvtime = 0;
728 tp->t_bw_rtttime = 0;
729 /*
730 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
731 * because the socket may be bound to an IPv6 wildcard address,
732 * which may match an IPv4-mapped IPv6 address.
733 */
734 inp->inp_ip_ttl = ip_defttl;
735 inp->inp_ppcb = (caddr_t)tp;
736 return (tp); /* XXX */
737 }
738
739 /*
740 * Drop a TCP connection, reporting
741 * the specified error. If connection is synchronized,
742 * then send a RST to peer.
743 */
744 struct tcpcb *
745 tcp_drop(tp, errno)
746 register struct tcpcb *tp;
747 int errno;
748 {
749 struct socket *so = tp->t_inpcb->inp_socket;
750
751 if (TCPS_HAVERCVDSYN(tp->t_state)) {
752 tp->t_state = TCPS_CLOSED;
753 (void) tcp_output(tp);
754 tcpstat.tcps_drops++;
755 } else
756 tcpstat.tcps_conndrops++;
757 if (errno == ETIMEDOUT && tp->t_softerror)
758 errno = tp->t_softerror;
759 so->so_error = errno;
760 return (tcp_close(tp));
761 }
762
763 /*
764 * Close a TCP control block:
765 * discard all space held by the tcp
766 * discard internet protocol block
767 * wake up any sleepers
768 */
769 struct tcpcb *
770 tcp_close(tp)
771 register struct tcpcb *tp;
772 {
773 struct inpcb *inp = tp->t_inpcb;
774 struct socket *so = inp->inp_socket;
775 #if INET6
776 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
777 #endif /* INET6 */
778 register struct rtentry *rt;
779 int dosavessthresh;
780
781 if ( inp->inp_ppcb == NULL) /* tcp_close was called previously, bail */
782 return NULL;
783
784 /* Clear the timers before we delete the PCB. */
785 {
786 int i;
787 for (i = 0; i < TCPT_NTIMERS; i++) {
788 tp->t_timer[i] = 0;
789 }
790 }
791
792 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0);
793 switch (tp->t_state)
794 {
795 case TCPS_ESTABLISHED:
796 case TCPS_FIN_WAIT_1:
797 case TCPS_CLOSING:
798 case TCPS_CLOSE_WAIT:
799 case TCPS_LAST_ACK:
800 break;
801 }
802
803 /*
804 * If another thread for this tcp is currently in ip (indicated by
805 * the TF_SENDINPROG flag), defer the cleanup until after it returns
806 * back to tcp. This is done to serialize the close until after all
807 * pending output is finished, in order to avoid having the PCB be
808 * detached and the cached route cleaned, only for ip to cache the
809 * route back into the PCB again. Note that we've cleared all the
810 * timers at this point. Set TF_CLOSING to indicate to tcp_output()
811 * that is should call us again once it returns from ip; at that
812 * point both flags should be cleared and we can proceed further
813 * with the cleanup.
814 */
815 if (tp->t_flags & (TF_CLOSING|TF_SENDINPROG)) {
816 tp->t_flags |= TF_CLOSING;
817 return (NULL);
818 }
819
820 lck_mtx_lock(rt_mtx);
821 /*
822 * If we got enough samples through the srtt filter,
823 * save the rtt and rttvar in the routing entry.
824 * 'Enough' is arbitrarily defined as the 16 samples.
825 * 16 samples is enough for the srtt filter to converge
826 * to within 5% of the correct value; fewer samples and
827 * we could save a very bogus rtt.
828 *
829 * Don't update the default route's characteristics and don't
830 * update anything that the user "locked".
831 */
832 if (tp->t_rttupdated >= 16) {
833 register u_long i = 0;
834
835 #if INET6
836 if (isipv6) {
837 struct sockaddr_in6 *sin6;
838
839 if ((rt = inp->in6p_route.ro_rt) == NULL)
840 goto no_valid_rt;
841 sin6 = (struct sockaddr_in6 *)rt_key(rt);
842 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
843 goto no_valid_rt;
844 }
845 else
846 #endif /* INET6 */
847 rt = inp->inp_route.ro_rt;
848 if (rt == NULL ||
849 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr
850 == INADDR_ANY || rt->generation_id != route_generation) {
851 if (tp->t_state >= TCPS_CLOSE_WAIT)
852 tp->t_state = TCPS_CLOSING;
853
854 goto no_valid_rt;
855 }
856
857 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
858 i = tp->t_srtt *
859 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
860 if (rt->rt_rmx.rmx_rtt && i)
861 /*
862 * filter this update to half the old & half
863 * the new values, converting scale.
864 * See route.h and tcp_var.h for a
865 * description of the scaling constants.
866 */
867 rt->rt_rmx.rmx_rtt =
868 (rt->rt_rmx.rmx_rtt + i) / 2;
869 else
870 rt->rt_rmx.rmx_rtt = i;
871 tcpstat.tcps_cachedrtt++;
872 }
873 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
874 i = tp->t_rttvar *
875 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
876 if (rt->rt_rmx.rmx_rttvar && i)
877 rt->rt_rmx.rmx_rttvar =
878 (rt->rt_rmx.rmx_rttvar + i) / 2;
879 else
880 rt->rt_rmx.rmx_rttvar = i;
881 tcpstat.tcps_cachedrttvar++;
882 }
883 /*
884 * The old comment here said:
885 * update the pipelimit (ssthresh) if it has been updated
886 * already or if a pipesize was specified & the threshhold
887 * got below half the pipesize. I.e., wait for bad news
888 * before we start updating, then update on both good
889 * and bad news.
890 *
891 * But we want to save the ssthresh even if no pipesize is
892 * specified explicitly in the route, because such
893 * connections still have an implicit pipesize specified
894 * by the global tcp_sendspace. In the absence of a reliable
895 * way to calculate the pipesize, it will have to do.
896 */
897 i = tp->snd_ssthresh;
898 if (rt->rt_rmx.rmx_sendpipe != 0)
899 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
900 else
901 dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
902 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
903 i != 0 && rt->rt_rmx.rmx_ssthresh != 0)
904 || dosavessthresh) {
905 /*
906 * convert the limit from user data bytes to
907 * packets then to packet data bytes.
908 */
909 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
910 if (i < 2)
911 i = 2;
912 i *= (u_long)(tp->t_maxseg +
913 #if INET6
914 (isipv6 ? sizeof (struct ip6_hdr) +
915 sizeof (struct tcphdr) :
916 #endif
917 sizeof (struct tcpiphdr)
918 #if INET6
919 )
920 #endif
921 );
922 if (rt->rt_rmx.rmx_ssthresh)
923 rt->rt_rmx.rmx_ssthresh =
924 (rt->rt_rmx.rmx_ssthresh + i) / 2;
925 else
926 rt->rt_rmx.rmx_ssthresh = i;
927 tcpstat.tcps_cachedssthresh++;
928 }
929 }
930 rt = inp->inp_route.ro_rt;
931 if (rt) {
932 /*
933 * mark route for deletion if no information is
934 * cached.
935 */
936 if ((so->so_flags & SOF_OVERFLOW) && tcp_lq_overflow &&
937 ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0)){
938 if (rt->rt_rmx.rmx_rtt == 0)
939 rt->rt_flags |= RTF_DELCLONE;
940 }
941 }
942 no_valid_rt:
943 /* free the reassembly queue, if any */
944 lck_mtx_unlock(rt_mtx);
945
946 (void) tcp_freeq(tp);
947
948 tcp_free_sackholes(tp);
949
950 /* Free the packet list */
951 if (tp->t_pktlist_head != NULL)
952 m_freem_list(tp->t_pktlist_head);
953 TCP_PKTLIST_CLEAR(tp);
954
955 #ifdef __APPLE__
956 if (so->cached_in_sock_layer)
957 inp->inp_saved_ppcb = (caddr_t) tp;
958 #endif
959
960 soisdisconnected(so);
961 #if INET6
962 if (INP_CHECK_SOCKAF(so, AF_INET6))
963 in6_pcbdetach(inp);
964 else
965 #endif /* INET6 */
966 in_pcbdetach(inp);
967 tcpstat.tcps_closed++;
968 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed,0,0,0,0);
969 return ((struct tcpcb *)0);
970 }
971
972 int
973 tcp_freeq(tp)
974 struct tcpcb *tp;
975 {
976
977 register struct tseg_qent *q;
978 int rv = 0;
979
980 while((q = LIST_FIRST(&tp->t_segq)) != NULL) {
981 LIST_REMOVE(q, tqe_q);
982 m_freem(q->tqe_m);
983 FREE(q, M_TSEGQ);
984 tcp_reass_qsize--;
985 rv = 1;
986 }
987 return (rv);
988 }
989
990 void
991 tcp_drain()
992 {
993 if (do_tcpdrain)
994 {
995 struct inpcb *inpb;
996 struct tcpcb *tcpb;
997 struct tseg_qent *te;
998
999 /*
1000 * Walk the tcpbs, if existing, and flush the reassembly queue,
1001 * if there is one...
1002 * XXX: The "Net/3" implementation doesn't imply that the TCP
1003 * reassembly queue should be flushed, but in a situation
1004 * where we're really low on mbufs, this is potentially
1005 * usefull.
1006 */
1007 if (!lck_rw_try_lock_exclusive(tcbinfo.mtx)) /* do it next time if the lock is in use */
1008 return;
1009
1010 for (inpb = LIST_FIRST(tcbinfo.listhead); inpb;
1011 inpb = LIST_NEXT(inpb, inp_list)) {
1012 if ((tcpb = intotcpcb(inpb))) {
1013 while ((te = LIST_FIRST(&tcpb->t_segq))
1014 != NULL) {
1015 LIST_REMOVE(te, tqe_q);
1016 m_freem(te->tqe_m);
1017 FREE(te, M_TSEGQ);
1018 tcp_reass_qsize--;
1019 }
1020 }
1021 }
1022 lck_rw_done(tcbinfo.mtx);
1023
1024 }
1025 }
1026
1027 /*
1028 * Notify a tcp user of an asynchronous error;
1029 * store error as soft error, but wake up user
1030 * (for now, won't do anything until can select for soft error).
1031 *
1032 * Do not wake up user since there currently is no mechanism for
1033 * reporting soft errors (yet - a kqueue filter may be added).
1034 */
1035 static void
1036 tcp_notify(inp, error)
1037 struct inpcb *inp;
1038 int error;
1039 {
1040 struct tcpcb *tp;
1041
1042 if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD))
1043 return; /* pcb is gone already */
1044
1045 tp = (struct tcpcb *)inp->inp_ppcb;
1046
1047 /*
1048 * Ignore some errors if we are hooked up.
1049 * If connection hasn't completed, has retransmitted several times,
1050 * and receives a second error, give up now. This is better
1051 * than waiting a long time to establish a connection that
1052 * can never complete.
1053 */
1054 if (tp->t_state == TCPS_ESTABLISHED &&
1055 (error == EHOSTUNREACH || error == ENETUNREACH ||
1056 error == EHOSTDOWN)) {
1057 return;
1058 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1059 tp->t_softerror)
1060 tcp_drop(tp, error);
1061 else
1062 tp->t_softerror = error;
1063 #if 0
1064 wakeup((caddr_t) &so->so_timeo);
1065 sorwakeup(so);
1066 sowwakeup(so);
1067 #endif
1068 }
1069
1070 static int
1071 tcp_pcblist SYSCTL_HANDLER_ARGS
1072 {
1073 #pragma unused(oidp, arg1, arg2)
1074 int error, i, n;
1075 struct inpcb *inp, **inp_list;
1076 inp_gen_t gencnt;
1077 struct xinpgen xig;
1078 int slot;
1079
1080 /*
1081 * The process of preparing the TCB list is too time-consuming and
1082 * resource-intensive to repeat twice on every request.
1083 */
1084 lck_rw_lock_shared(tcbinfo.mtx);
1085 if (req->oldptr == USER_ADDR_NULL) {
1086 n = tcbinfo.ipi_count;
1087 req->oldidx = 2 * (sizeof xig)
1088 + (n + n/8) * sizeof(struct xtcpcb);
1089 lck_rw_done(tcbinfo.mtx);
1090 return 0;
1091 }
1092
1093 if (req->newptr != USER_ADDR_NULL) {
1094 lck_rw_done(tcbinfo.mtx);
1095 return EPERM;
1096 }
1097
1098 /*
1099 * OK, now we're committed to doing something.
1100 */
1101 gencnt = tcbinfo.ipi_gencnt;
1102 n = tcbinfo.ipi_count;
1103
1104 bzero(&xig, sizeof(xig));
1105 xig.xig_len = sizeof xig;
1106 xig.xig_count = n;
1107 xig.xig_gen = gencnt;
1108 xig.xig_sogen = so_gencnt;
1109 error = SYSCTL_OUT(req, &xig, sizeof xig);
1110 if (error) {
1111 lck_rw_done(tcbinfo.mtx);
1112 return error;
1113 }
1114 /*
1115 * We are done if there is no pcb
1116 */
1117 if (n == 0) {
1118 lck_rw_done(tcbinfo.mtx);
1119 return 0;
1120 }
1121
1122 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1123 if (inp_list == 0) {
1124 lck_rw_done(tcbinfo.mtx);
1125 return ENOMEM;
1126 }
1127
1128 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
1129 inp = LIST_NEXT(inp, inp_list)) {
1130 #ifdef __APPLE__
1131 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1132 #else
1133 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp))
1134 #endif
1135 inp_list[i++] = inp;
1136 }
1137
1138 for (slot = 0; slot < N_TIME_WAIT_SLOTS; slot++) {
1139 struct inpcb *inpnxt;
1140
1141 for (inp = time_wait_slots[slot].lh_first; inp && i < n; inp = inpnxt) {
1142 inpnxt = inp->inp_list.le_next;
1143 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1144 inp_list[i++] = inp;
1145 }
1146 }
1147
1148 n = i;
1149
1150 error = 0;
1151 for (i = 0; i < n; i++) {
1152 inp = inp_list[i];
1153 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1154 struct xtcpcb xt;
1155 caddr_t inp_ppcb;
1156
1157 bzero(&xt, sizeof(xt));
1158 xt.xt_len = sizeof xt;
1159 /* XXX should avoid extra copy */
1160 inpcb_to_compat(inp, &xt.xt_inp);
1161 inp_ppcb = inp->inp_ppcb;
1162 if (inp_ppcb != NULL) {
1163 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1164 }
1165 else
1166 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1167 if (inp->inp_socket)
1168 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1169 error = SYSCTL_OUT(req, &xt, sizeof xt);
1170 }
1171 }
1172 if (!error) {
1173 /*
1174 * Give the user an updated idea of our state.
1175 * If the generation differs from what we told
1176 * her before, she knows that something happened
1177 * while we were processing this request, and it
1178 * might be necessary to retry.
1179 */
1180 bzero(&xig, sizeof(xig));
1181 xig.xig_len = sizeof xig;
1182 xig.xig_gen = tcbinfo.ipi_gencnt;
1183 xig.xig_sogen = so_gencnt;
1184 xig.xig_count = tcbinfo.ipi_count;
1185 error = SYSCTL_OUT(req, &xig, sizeof xig);
1186 }
1187 FREE(inp_list, M_TEMP);
1188 lck_rw_done(tcbinfo.mtx);
1189 return error;
1190 }
1191
1192 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1193 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1194
1195 #ifndef __APPLE__
1196 static int
1197 tcp_getcred(SYSCTL_HANDLER_ARGS)
1198 {
1199 struct sockaddr_in addrs[2];
1200 struct inpcb *inp;
1201 int error, s;
1202
1203 error = suser(req->p);
1204 if (error)
1205 return (error);
1206 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1207 if (error)
1208 return (error);
1209 s = splnet();
1210 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
1211 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1212 if (inp == NULL || inp->inp_socket == NULL) {
1213 error = ENOENT;
1214 goto out;
1215 }
1216 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(*(kauth_cred_t)0);
1217 out:
1218 splx(s);
1219 return (error);
1220 }
1221
1222 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
1223 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection");
1224
1225 #if INET6
1226 static int
1227 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1228 {
1229 struct sockaddr_in6 addrs[2];
1230 struct inpcb *inp;
1231 int error, s, mapped = 0;
1232
1233 error = suser(req->p);
1234 if (error)
1235 return (error);
1236 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1237 if (error)
1238 return (error);
1239 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1240 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1241 mapped = 1;
1242 else
1243 return (EINVAL);
1244 }
1245 s = splnet();
1246 if (mapped == 1)
1247 inp = in_pcblookup_hash(&tcbinfo,
1248 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1249 addrs[1].sin6_port,
1250 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1251 addrs[0].sin6_port,
1252 0, NULL);
1253 else
1254 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr,
1255 addrs[1].sin6_port,
1256 &addrs[0].sin6_addr, addrs[0].sin6_port,
1257 0, NULL);
1258 if (inp == NULL || inp->inp_socket == NULL) {
1259 error = ENOENT;
1260 goto out;
1261 }
1262 error = SYSCTL_OUT(req, inp->inp_socket->so_cred,
1263 sizeof(*(kauth_cred_t)0);
1264 out:
1265 splx(s);
1266 return (error);
1267 }
1268
1269 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
1270 0, 0,
1271 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection");
1272 #endif
1273 #endif /* __APPLE__*/
1274
1275 void
1276 tcp_ctlinput(cmd, sa, vip)
1277 int cmd;
1278 struct sockaddr *sa;
1279 void *vip;
1280 {
1281 struct ip *ip = vip;
1282 struct tcphdr *th;
1283 struct in_addr faddr;
1284 struct inpcb *inp;
1285 struct tcpcb *tp;
1286 void (*notify)(struct inpcb *, int) = tcp_notify;
1287 tcp_seq icmp_seq;
1288
1289 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1290 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1291 return;
1292
1293 if (cmd == PRC_QUENCH)
1294 notify = tcp_quench;
1295 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1296 cmd == PRC_UNREACH_PORT) && ip)
1297 notify = tcp_drop_syn_sent;
1298 else if (cmd == PRC_MSGSIZE)
1299 notify = tcp_mtudisc;
1300 else if (PRC_IS_REDIRECT(cmd)) {
1301 ip = 0;
1302 notify = in_rtchange;
1303 } else if (cmd == PRC_HOSTDEAD)
1304 ip = 0;
1305 else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)
1306 return;
1307 if (ip) {
1308 th = (struct tcphdr *)((caddr_t)ip
1309 + (IP_VHL_HL(ip->ip_vhl) << 2));
1310 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1311 ip->ip_src, th->th_sport, 0, NULL);
1312 if (inp != NULL && inp->inp_socket != NULL) {
1313 tcp_lock(inp->inp_socket, 1, 0);
1314 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1315 tcp_unlock(inp->inp_socket, 1, 0);
1316 return;
1317 }
1318 icmp_seq = htonl(th->th_seq);
1319 tp = intotcpcb(inp);
1320 if (SEQ_GEQ(icmp_seq, tp->snd_una) &&
1321 SEQ_LT(icmp_seq, tp->snd_max))
1322 (*notify)(inp, inetctlerrmap[cmd]);
1323 tcp_unlock(inp->inp_socket, 1, 0);
1324 }
1325 } else
1326 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1327 }
1328
1329 #if INET6
1330 void
1331 tcp6_ctlinput(cmd, sa, d)
1332 int cmd;
1333 struct sockaddr *sa;
1334 void *d;
1335 {
1336 struct tcphdr th;
1337 void (*notify)(struct inpcb *, int) = tcp_notify;
1338 struct ip6_hdr *ip6;
1339 struct mbuf *m;
1340 struct ip6ctlparam *ip6cp = NULL;
1341 const struct sockaddr_in6 *sa6_src = NULL;
1342 int off;
1343 struct tcp_portonly {
1344 u_int16_t th_sport;
1345 u_int16_t th_dport;
1346 } *thp;
1347
1348 if (sa->sa_family != AF_INET6 ||
1349 sa->sa_len != sizeof(struct sockaddr_in6))
1350 return;
1351
1352 if (cmd == PRC_QUENCH)
1353 notify = tcp_quench;
1354 else if (cmd == PRC_MSGSIZE)
1355 notify = tcp_mtudisc;
1356 else if (!PRC_IS_REDIRECT(cmd) &&
1357 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1358 return;
1359
1360 /* if the parameter is from icmp6, decode it. */
1361 if (d != NULL) {
1362 ip6cp = (struct ip6ctlparam *)d;
1363 m = ip6cp->ip6c_m;
1364 ip6 = ip6cp->ip6c_ip6;
1365 off = ip6cp->ip6c_off;
1366 sa6_src = ip6cp->ip6c_src;
1367 } else {
1368 m = NULL;
1369 ip6 = NULL;
1370 off = 0; /* fool gcc */
1371 sa6_src = &sa6_any;
1372 }
1373
1374 if (ip6) {
1375 /*
1376 * XXX: We assume that when IPV6 is non NULL,
1377 * M and OFF are valid.
1378 */
1379
1380 /* check if we can safely examine src and dst ports */
1381 if (m->m_pkthdr.len < off + sizeof(*thp))
1382 return;
1383
1384 bzero(&th, sizeof(th));
1385 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1386
1387 in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1388 (struct sockaddr *)ip6cp->ip6c_src,
1389 th.th_sport, cmd, notify);
1390 } else
1391 in6_pcbnotify(&tcbinfo, sa, 0, (struct sockaddr *)sa6_src,
1392 0, cmd, notify);
1393 }
1394 #endif /* INET6 */
1395
1396
1397 /*
1398 * Following is where TCP initial sequence number generation occurs.
1399 *
1400 * There are two places where we must use initial sequence numbers:
1401 * 1. In SYN-ACK packets.
1402 * 2. In SYN packets.
1403 *
1404 * The ISNs in SYN-ACK packets have no monotonicity requirement,
1405 * and should be as unpredictable as possible to avoid the possibility
1406 * of spoofing and/or connection hijacking. To satisfy this
1407 * requirement, SYN-ACK ISNs are generated via the arc4random()
1408 * function. If exact RFC 1948 compliance is requested via sysctl,
1409 * these ISNs will be generated just like those in SYN packets.
1410 *
1411 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1412 * depends on this property. In addition, these ISNs should be
1413 * unguessable so as to prevent connection hijacking. To satisfy
1414 * the requirements of this situation, the algorithm outlined in
1415 * RFC 1948 is used to generate sequence numbers.
1416 *
1417 * For more information on the theory of operation, please see
1418 * RFC 1948.
1419 *
1420 * Implementation details:
1421 *
1422 * Time is based off the system timer, and is corrected so that it
1423 * increases by one megabyte per second. This allows for proper
1424 * recycling on high speed LANs while still leaving over an hour
1425 * before rollover.
1426 *
1427 * Two sysctls control the generation of ISNs:
1428 *
1429 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1430 * between seeding of isn_secret. This is normally set to zero,
1431 * as reseeding should not be necessary.
1432 *
1433 * net.inet.tcp.strict_rfc1948 controls whether RFC 1948 is followed
1434 * strictly. When strict compliance is requested, reseeding is
1435 * disabled and SYN-ACKs will be generated in the same manner as
1436 * SYNs. Strict mode is disabled by default.
1437 *
1438 */
1439
1440 #define ISN_BYTES_PER_SECOND 1048576
1441
1442 tcp_seq
1443 tcp_new_isn(tp)
1444 struct tcpcb *tp;
1445 {
1446 u_int32_t md5_buffer[4];
1447 tcp_seq new_isn;
1448 struct timeval timenow;
1449 u_char isn_secret[32];
1450 int isn_last_reseed = 0;
1451 MD5_CTX isn_ctx;
1452
1453 /* Use arc4random for SYN-ACKs when not in exact RFC1948 mode. */
1454 if (((tp->t_state == TCPS_LISTEN) || (tp->t_state == TCPS_TIME_WAIT))
1455 && tcp_strict_rfc1948 == 0)
1456 #ifdef __APPLE__
1457 return random();
1458 #else
1459 return arc4random();
1460 #endif
1461 getmicrotime(&timenow);
1462
1463 /* Seed if this is the first use, reseed if requested. */
1464 if ((isn_last_reseed == 0) ||
1465 ((tcp_strict_rfc1948 == 0) && (tcp_isn_reseed_interval > 0) &&
1466 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1467 < (u_int)timenow.tv_sec))) {
1468 #ifdef __APPLE__
1469 read_random(&isn_secret, sizeof(isn_secret));
1470 #else
1471 read_random_unlimited(&isn_secret, sizeof(isn_secret));
1472 #endif
1473 isn_last_reseed = timenow.tv_sec;
1474 }
1475
1476 /* Compute the md5 hash and return the ISN. */
1477 MD5Init(&isn_ctx);
1478 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1479 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1480 #if INET6
1481 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1482 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1483 sizeof(struct in6_addr));
1484 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1485 sizeof(struct in6_addr));
1486 } else
1487 #endif
1488 {
1489 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1490 sizeof(struct in_addr));
1491 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1492 sizeof(struct in_addr));
1493 }
1494 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1495 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1496 new_isn = (tcp_seq) md5_buffer[0];
1497 new_isn += timenow.tv_sec * (ISN_BYTES_PER_SECOND / hz);
1498 return new_isn;
1499 }
1500
1501 /*
1502 * When a source quench is received, close congestion window
1503 * to one segment. We will gradually open it again as we proceed.
1504 */
1505 void
1506 tcp_quench(
1507 struct inpcb *inp,
1508 __unused int errno
1509 )
1510 {
1511 struct tcpcb *tp = intotcpcb(inp);
1512
1513 if (tp) {
1514 tp->snd_cwnd = tp->t_maxseg;
1515 tp->t_bytes_acked = 0;
1516 }
1517 }
1518
1519 /*
1520 * When a specific ICMP unreachable message is received and the
1521 * connection state is SYN-SENT, drop the connection. This behavior
1522 * is controlled by the icmp_may_rst sysctl.
1523 */
1524 void
1525 tcp_drop_syn_sent(inp, errno)
1526 struct inpcb *inp;
1527 int errno;
1528 {
1529 struct tcpcb *tp = intotcpcb(inp);
1530
1531 if (tp && tp->t_state == TCPS_SYN_SENT)
1532 tcp_drop(tp, errno);
1533 }
1534
1535 /*
1536 * When `need fragmentation' ICMP is received, update our idea of the MSS
1537 * based on the new value in the route. Also nudge TCP to send something,
1538 * since we know the packet we just sent was dropped.
1539 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1540 */
1541 void
1542 tcp_mtudisc(
1543 struct inpcb *inp,
1544 __unused int errno
1545 )
1546 {
1547 struct tcpcb *tp = intotcpcb(inp);
1548 struct rtentry *rt;
1549 struct rmxp_tao *taop;
1550 struct socket *so = inp->inp_socket;
1551 int offered;
1552 int mss;
1553 #if INET6
1554 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1555 #endif /* INET6 */
1556
1557 if (tp) {
1558 lck_mtx_lock(rt_mtx);
1559 #if INET6
1560 if (isipv6)
1561 rt = tcp_rtlookup6(inp);
1562 else
1563 #endif /* INET6 */
1564 rt = tcp_rtlookup(inp);
1565 if (!rt || !rt->rt_rmx.rmx_mtu) {
1566 tp->t_maxopd = tp->t_maxseg =
1567 #if INET6
1568 isipv6 ? tcp_v6mssdflt :
1569 #endif /* INET6 */
1570 tcp_mssdflt;
1571 lck_mtx_unlock(rt_mtx);
1572 return;
1573 }
1574 taop = rmx_taop(rt->rt_rmx);
1575 offered = taop->tao_mssopt;
1576 mss = rt->rt_rmx.rmx_mtu -
1577 #if INET6
1578 (isipv6 ?
1579 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1580 #endif /* INET6 */
1581 sizeof(struct tcpiphdr)
1582 #if INET6
1583 )
1584 #endif /* INET6 */
1585 ;
1586
1587 lck_mtx_unlock(rt_mtx);
1588 if (offered)
1589 mss = min(mss, offered);
1590 /*
1591 * XXX - The above conditional probably violates the TCP
1592 * spec. The problem is that, since we don't know the
1593 * other end's MSS, we are supposed to use a conservative
1594 * default. But, if we do that, then MTU discovery will
1595 * never actually take place, because the conservative
1596 * default is much less than the MTUs typically seen
1597 * on the Internet today. For the moment, we'll sweep
1598 * this under the carpet.
1599 *
1600 * The conservative default might not actually be a problem
1601 * if the only case this occurs is when sending an initial
1602 * SYN with options and data to a host we've never talked
1603 * to before. Then, they will reply with an MSS value which
1604 * will get recorded and the new parameters should get
1605 * recomputed. For Further Study.
1606 */
1607 if (tp->t_maxopd <= mss)
1608 return;
1609 tp->t_maxopd = mss;
1610
1611 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1612 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1613 mss -= TCPOLEN_TSTAMP_APPA;
1614
1615 if (so->so_snd.sb_hiwat < mss)
1616 mss = so->so_snd.sb_hiwat;
1617
1618 tp->t_maxseg = mss;
1619
1620 tcpstat.tcps_mturesent++;
1621 tp->t_rtttime = 0;
1622 tp->snd_nxt = tp->snd_una;
1623 tcp_output(tp);
1624 }
1625 }
1626
1627 /*
1628 * Look-up the routing entry to the peer of this inpcb. If no route
1629 * is found and it cannot be allocated then return NULL. This routine
1630 * is called by TCP routines that access the rmx structure and by tcp_mss
1631 * to get the interface MTU.
1632 */
1633 struct rtentry *
1634 tcp_rtlookup(inp)
1635 struct inpcb *inp;
1636 {
1637 struct route *ro;
1638 struct rtentry *rt;
1639 struct tcpcb *tp;
1640
1641 ro = &inp->inp_route;
1642 if (ro == NULL)
1643 return (NULL);
1644 rt = ro->ro_rt;
1645
1646 lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
1647
1648 if (rt == NULL || !(rt->rt_flags & RTF_UP) || rt->generation_id != route_generation) {
1649 /* No route yet, so try to acquire one */
1650 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1651 ro->ro_dst.sa_family = AF_INET;
1652 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1653 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
1654 inp->inp_faddr;
1655 rtalloc_ign_locked(ro, 0UL);
1656 rt = ro->ro_rt;
1657 }
1658 }
1659 if (rt != NULL && rt->rt_ifp != NULL)
1660 somultipages(inp->inp_socket,
1661 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
1662
1663 /*
1664 * Update MTU discovery determination. Don't do it if:
1665 * 1) it is disabled via the sysctl
1666 * 2) the route isn't up
1667 * 3) the MTU is locked (if it is, then discovery has been
1668 * disabled)
1669 */
1670
1671 tp = intotcpcb(inp);
1672
1673 if (!path_mtu_discovery || ((rt != NULL) &&
1674 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
1675 tp->t_flags &= ~TF_PMTUD;
1676 else
1677 tp->t_flags |= TF_PMTUD;
1678
1679 #ifdef IFEF_NOWINDOWSCALE
1680 if (tp->t_state == TCPS_SYN_SENT && rt != NULL && rt->rt_ifp != NULL &&
1681 (rt->rt_ifp->if_eflags & IFEF_NOWINDOWSCALE) != 0)
1682 {
1683 // Timestamps are not enabled on this interface
1684 tp->t_flags &= ~(TF_REQ_SCALE);
1685 }
1686 #endif
1687
1688 return rt;
1689 }
1690
1691 #if INET6
1692 struct rtentry *
1693 tcp_rtlookup6(inp)
1694 struct inpcb *inp;
1695 {
1696 struct route_in6 *ro6;
1697 struct rtentry *rt;
1698 struct tcpcb *tp;
1699
1700 lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
1701
1702 ro6 = &inp->in6p_route;
1703 rt = ro6->ro_rt;
1704 if (rt == NULL || !(rt->rt_flags & RTF_UP)) {
1705 /* No route yet, so try to acquire one */
1706 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
1707 struct sockaddr_in6 *dst6;
1708
1709 dst6 = (struct sockaddr_in6 *)&ro6->ro_dst;
1710 dst6->sin6_family = AF_INET6;
1711 dst6->sin6_len = sizeof(*dst6);
1712 dst6->sin6_addr = inp->in6p_faddr;
1713 rtalloc_ign_locked((struct route *)ro6, 0UL);
1714 rt = ro6->ro_rt;
1715 }
1716 }
1717 if (rt != NULL && rt->rt_ifp != NULL)
1718 somultipages(inp->inp_socket,
1719 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
1720 /*
1721 * Update path MTU Discovery determination
1722 * while looking up the route:
1723 * 1) we have a valid route to the destination
1724 * 2) the MTU is not locked (if it is, then discovery has been
1725 * disabled)
1726 */
1727
1728
1729 tp = intotcpcb(inp);
1730
1731 /*
1732 * Update MTU discovery determination. Don't do it if:
1733 * 1) it is disabled via the sysctl
1734 * 2) the route isn't up
1735 * 3) the MTU is locked (if it is, then discovery has been
1736 * disabled)
1737 */
1738
1739 if (!path_mtu_discovery || ((rt != NULL) &&
1740 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
1741 tp->t_flags &= ~TF_PMTUD;
1742 else
1743 tp->t_flags |= TF_PMTUD;
1744
1745 return rt;
1746 }
1747 #endif /* INET6 */
1748
1749 #if IPSEC
1750 /* compute ESP/AH header size for TCP, including outer IP header. */
1751 size_t
1752 ipsec_hdrsiz_tcp(tp)
1753 struct tcpcb *tp;
1754 {
1755 struct inpcb *inp;
1756 struct mbuf *m;
1757 size_t hdrsiz;
1758 struct ip *ip;
1759 #if INET6
1760 struct ip6_hdr *ip6 = NULL;
1761 #endif /* INET6 */
1762 struct tcphdr *th;
1763
1764 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1765 return 0;
1766 MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */
1767 if (!m)
1768 return 0;
1769
1770 #if INET6
1771 if ((inp->inp_vflag & INP_IPV6) != 0) {
1772 ip6 = mtod(m, struct ip6_hdr *);
1773 th = (struct tcphdr *)(ip6 + 1);
1774 m->m_pkthdr.len = m->m_len =
1775 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1776 tcp_fillheaders(tp, ip6, th);
1777 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1778 } else
1779 #endif /* INET6 */
1780 {
1781 ip = mtod(m, struct ip *);
1782 th = (struct tcphdr *)(ip + 1);
1783 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1784 tcp_fillheaders(tp, ip, th);
1785 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1786 }
1787 m_free(m);
1788 return hdrsiz;
1789 }
1790 #endif /*IPSEC*/
1791
1792 /*
1793 * Return a pointer to the cached information about the remote host.
1794 * The cached information is stored in the protocol specific part of
1795 * the route metrics.
1796 */
1797 struct rmxp_tao *
1798 tcp_gettaocache(inp)
1799 struct inpcb *inp;
1800 {
1801 struct rtentry *rt;
1802 struct rmxp_tao *taop;
1803
1804 lck_mtx_lock(rt_mtx);
1805 #if INET6
1806 if ((inp->inp_vflag & INP_IPV6) != 0)
1807 rt = tcp_rtlookup6(inp);
1808 else
1809 #endif /* INET6 */
1810 rt = tcp_rtlookup(inp);
1811
1812 /* Make sure this is a host route and is up. */
1813 if (rt == NULL ||
1814 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) {
1815 lck_mtx_unlock(rt_mtx);
1816 return NULL;
1817 }
1818
1819 taop = rmx_taop(rt->rt_rmx);
1820 lck_mtx_unlock(rt_mtx);
1821 return (taop);
1822 }
1823
1824 /*
1825 * Clear all the TAO cache entries, called from tcp_init.
1826 *
1827 * XXX
1828 * This routine is just an empty one, because we assume that the routing
1829 * routing tables are initialized at the same time when TCP, so there is
1830 * nothing in the cache left over.
1831 */
1832 static void
1833 tcp_cleartaocache()
1834 {
1835 }
1836
1837 int
1838 tcp_lock(so, refcount, lr)
1839 struct socket *so;
1840 int refcount;
1841 int lr;
1842 {
1843 int lr_saved;
1844 if (lr == 0)
1845 lr_saved = (unsigned int) __builtin_return_address(0);
1846 else lr_saved = lr;
1847
1848 if (so->so_pcb) {
1849 lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1850 }
1851 else {
1852 panic("tcp_lock: so=%p NO PCB! lr=%x\n", so, lr_saved);
1853 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
1854 }
1855
1856 if (so->so_usecount < 0)
1857 panic("tcp_lock: so=%p so_pcb=%p lr=%x ref=%x\n",
1858 so, so->so_pcb, lr_saved, so->so_usecount);
1859
1860 if (refcount)
1861 so->so_usecount++;
1862 so->lock_lr[so->next_lock_lr] = (u_int32_t)lr_saved;
1863 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
1864 return (0);
1865 }
1866
1867 int
1868 tcp_unlock(so, refcount, lr)
1869 struct socket *so;
1870 int refcount;
1871 int lr;
1872 {
1873 int lr_saved;
1874 if (lr == 0)
1875 lr_saved = (unsigned int) __builtin_return_address(0);
1876 else lr_saved = lr;
1877
1878 #ifdef MORE_TCPLOCK_DEBUG
1879 printf("tcp_unlock: so=%p sopcb=%x lock=%x ref=%x lr=%x\n",
1880 so, so->so_pcb, ((struct inpcb *)so->so_pcb)->inpcb_mtx, so->so_usecount, lr_saved);
1881 #endif
1882 if (refcount)
1883 so->so_usecount--;
1884
1885 if (so->so_usecount < 0)
1886 panic("tcp_unlock: so=%p usecount=%x\n", so, so->so_usecount);
1887 if (so->so_pcb == NULL)
1888 panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
1889 else {
1890 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1891 so->unlock_lr[so->next_unlock_lr] = (u_int32_t)lr_saved;
1892 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1893 lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1894 }
1895 return (0);
1896 }
1897
1898 lck_mtx_t *
1899 tcp_getlock(
1900 struct socket *so,
1901 __unused int locktype)
1902 {
1903 struct inpcb *inp = sotoinpcb(so);
1904
1905 if (so->so_pcb) {
1906 if (so->so_usecount < 0)
1907 panic("tcp_getlock: so=%p usecount=%x\n", so, so->so_usecount);
1908 return(inp->inpcb_mtx);
1909 }
1910 else {
1911 panic("tcp_getlock: so=%p NULL so_pcb\n", so);
1912 return (so->so_proto->pr_domain->dom_mtx);
1913 }
1914 }
1915 long
1916 tcp_sbspace(struct tcpcb *tp)
1917 {
1918 struct sockbuf *sb = &tp->t_inpcb->inp_socket->so_rcv;
1919 long space, newspace;
1920
1921 space = ((long) lmin((sb->sb_hiwat - sb->sb_cc),
1922 (sb->sb_mbmax - sb->sb_mbcnt)));
1923
1924 #if TRAFFIC_MGT
1925 if (tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND) {
1926 if (tcp_background_io_enabled &&
1927 tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_SUPPRESSED) {
1928 tp->t_flags |= TF_RXWIN0SENT;
1929 return 0; /* Triggers TCP window closing by responding there is no space */
1930 }
1931 }
1932 #endif /* TRAFFIC_MGT */
1933
1934 /* Avoid inscreasing window size if the current window
1935 * is already very low, we could be in "persist" mode and
1936 * we could break some apps (see rdar://5409343)
1937 */
1938
1939 if (space < tp->t_maxseg)
1940 return space;
1941
1942 /* Clip window size for slower link */
1943
1944 if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0 )
1945 return lmin(space, slowlink_wsize);
1946
1947 /*
1948 * Check for ressources constraints before over-ajusting the amount of space we can
1949 * advertise in the TCP window size updates.
1950 */
1951
1952 if (sbspace_factor && (tp->t_inpcb->inp_pcbinfo->ipi_count < tcp_sockthreshold) &&
1953 (total_mb_cnt / 8) < (mbstat.m_clusters / sbspace_factor)) {
1954 if (space < (long)(sb->sb_maxused - sb->sb_cc)) {/* make sure we don't constrain the window if we have enough ressources */
1955 space = (long) lmax((sb->sb_maxused - sb->sb_cc), tp->rcv_maxbyps);
1956 }
1957 newspace = (long) lmax(((long)sb->sb_maxused - sb->sb_cc), (long)tp->rcv_maxbyps);
1958
1959 if (newspace > space)
1960 space = newspace;
1961 }
1962 return space;
1963 }
1964 /* DSEP Review Done pl-20051213-v02 @3253,@3391,@3400 */