]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_subr.c
xnu-1228.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_subr.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.22 2001/08/22 00:59:12 silby Exp $
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/callout.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/domain.h>
78 #include <sys/proc.h>
79 #include <sys/kauth.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
82 #include <sys/protosw.h>
83 #include <sys/random.h>
84 #include <sys/syslog.h>
85 #include <kern/locks.h>
86 #include <kern/zalloc.h>
87
88 #include <net/route.h>
89 #include <net/if.h>
90
91 #define _IP_VHL
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #if INET6
96 #include <netinet/ip6.h>
97 #endif
98 #include <netinet/in_pcb.h>
99 #if INET6
100 #include <netinet6/in6_pcb.h>
101 #endif
102 #include <netinet/in_var.h>
103 #include <netinet/ip_var.h>
104 #if INET6
105 #include <netinet6/ip6_var.h>
106 #endif
107 #include <netinet/tcp.h>
108 #include <netinet/tcp_fsm.h>
109 #include <netinet/tcp_seq.h>
110 #include <netinet/tcp_timer.h>
111 #include <netinet/tcp_var.h>
112 #if INET6
113 #include <netinet6/tcp6_var.h>
114 #endif
115 #include <netinet/tcpip.h>
116 #if TCPDEBUG
117 #include <netinet/tcp_debug.h>
118 #endif
119 #include <netinet6/ip6protosw.h>
120
121 #if IPSEC
122 #include <netinet6/ipsec.h>
123 #if INET6
124 #include <netinet6/ipsec6.h>
125 #endif
126 #endif /*IPSEC*/
127
128 #if CONFIG_MACF_NET
129 #include <security/mac_framework.h>
130 #endif /* MAC_NET */
131
132 #include <libkern/crypto/md5.h>
133 #include <sys/kdebug.h>
134
135 #define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
136
137 extern int tcp_lq_overflow;
138
139 /* temporary: for testing */
140 #if IPSEC
141 extern int ipsec_bypass;
142 #endif
143
144 int tcp_mssdflt = TCP_MSS;
145 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
146 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
147
148 #if INET6
149 int tcp_v6mssdflt = TCP6_MSS;
150 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
151 CTLFLAG_RW, &tcp_v6mssdflt , 0,
152 "Default TCP Maximum Segment Size for IPv6");
153 #endif
154
155 /*
156 * Minimum MSS we accept and use. This prevents DoS attacks where
157 * we are forced to a ridiculous low MSS like 20 and send hundreds
158 * of packets instead of one. The effect scales with the available
159 * bandwidth and quickly saturates the CPU and network interface
160 * with packet generation and sending. Set to zero to disable MINMSS
161 * checking. This setting prevents us from sending too small packets.
162 */
163 int tcp_minmss = TCP_MINMSS;
164 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
165 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
166
167 /*
168 * Number of TCP segments per second we accept from remote host
169 * before we start to calculate average segment size. If average
170 * segment size drops below the minimum TCP MSS we assume a DoS
171 * attack and reset+drop the connection. Care has to be taken not to
172 * set this value too small to not kill interactive type connections
173 * (telnet, SSH) which send many small packets.
174 */
175 #ifdef FIX_WORKAROUND_FOR_3894301
176 __private_extern__ int tcp_minmssoverload = TCP_MINMSSOVERLOAD;
177 #else
178 __private_extern__ int tcp_minmssoverload = 0;
179 #endif
180 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
181 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
182 "be under the MINMSS Size");
183
184 static int tcp_do_rfc1323 = 1;
185 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
186 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
187
188 static int tcp_do_rfc1644 = 0;
189 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
190 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
191
192 static int tcp_tcbhashsize = 0;
193 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD,
194 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
195
196 static int do_tcpdrain = 0;
197 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
198 "Enable tcp_drain routine for extra help when low on mbufs");
199
200 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
201 &tcbinfo.ipi_count, 0, "Number of active PCBs");
202
203 static int icmp_may_rst = 1;
204 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
205 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
206
207 static int tcp_strict_rfc1948 = 0;
208 SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW,
209 &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
210
211 static int tcp_isn_reseed_interval = 0;
212 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
213 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
214 static int tcp_background_io_enabled = 1;
215 SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_io_enabled, CTLFLAG_RW,
216 &tcp_background_io_enabled, 0, "Background IO Enabled");
217
218 int tcp_TCPTV_MIN = 1;
219 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_min, CTLFLAG_RW,
220 &tcp_TCPTV_MIN, 0, "min rtt value allowed");
221
222 static void tcp_cleartaocache(void);
223 static void tcp_notify(struct inpcb *, int);
224 struct zone *sack_hole_zone;
225
226 extern unsigned int total_mb_cnt;
227 extern unsigned int total_cl_cnt;
228 extern int sbspace_factor;
229 extern int tcp_sockthreshold;
230 extern int slowlink_wsize; /* window correction for slow links */
231 extern int path_mtu_discovery;
232
233
234 /*
235 * Target size of TCP PCB hash tables. Must be a power of two.
236 *
237 * Note that this can be overridden by the kernel environment
238 * variable net.inet.tcp.tcbhashsize
239 */
240 #ifndef TCBHASHSIZE
241 #define TCBHASHSIZE CONFIG_TCBHASHSIZE
242 #endif
243
244 /*
245 * This is the actual shape of what we allocate using the zone
246 * allocator. Doing it this way allows us to protect both structures
247 * using the same generation count, and also eliminates the overhead
248 * of allocating tcpcbs separately. By hiding the structure here,
249 * we avoid changing most of the rest of the code (although it needs
250 * to be changed, eventually, for greater efficiency).
251 */
252 #define ALIGNMENT 32
253 #define ALIGNM1 (ALIGNMENT - 1)
254 struct inp_tp {
255 union {
256 struct inpcb inp;
257 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
258 } inp_tp_u;
259 struct tcpcb tcb;
260 };
261 #undef ALIGNMENT
262 #undef ALIGNM1
263
264 static struct tcpcb dummy_tcb;
265
266
267 extern struct inpcbhead time_wait_slots[];
268 extern int cur_tw_slot;
269 extern u_long *delack_bitmask;
270 extern u_long route_generation;
271
272 int get_inpcb_str_size(void);
273 int get_tcp_str_size(void);
274
275
276 int get_inpcb_str_size(void)
277 {
278 return sizeof(struct inpcb);
279 }
280
281
282 int get_tcp_str_size(void)
283 {
284 return sizeof(struct tcpcb);
285 }
286
287 int tcp_freeq(struct tcpcb *tp);
288
289
290 /*
291 * Tcp initialization
292 */
293 void
294 tcp_init()
295 {
296 int hashsize = TCBHASHSIZE;
297 vm_size_t str_size;
298 int i;
299 struct inpcbinfo *pcbinfo;
300
301 tcp_ccgen = 1;
302 tcp_cleartaocache();
303
304 tcp_keepinit = TCPTV_KEEP_INIT;
305 tcp_keepidle = TCPTV_KEEP_IDLE;
306 tcp_keepintvl = TCPTV_KEEPINTVL;
307 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
308 tcp_msl = TCPTV_MSL;
309 read_random(&tcp_now, sizeof(tcp_now));
310 tcp_now = tcp_now & 0x3fffffff; /* Starts tcp internal 100ms clock at a random value */
311
312
313 LIST_INIT(&tcb);
314 tcbinfo.listhead = &tcb;
315 pcbinfo = &tcbinfo;
316 if (!powerof2(hashsize)) {
317 printf("WARNING: TCB hash size not a power of 2\n");
318 hashsize = 512; /* safe default */
319 }
320 tcp_tcbhashsize = hashsize;
321 tcbinfo.hashsize = hashsize;
322 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
323 tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
324 &tcbinfo.porthashmask);
325 str_size = (vm_size_t) sizeof(struct inp_tp);
326 tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "tcpcb");
327 sack_hole_zone = zinit(str_size, 120000*str_size, 8192, "sack_hole zone");
328 tcp_reass_maxseg = nmbclusters / 16;
329
330 #if INET6
331 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
332 #else /* INET6 */
333 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
334 #endif /* INET6 */
335 if (max_protohdr < TCP_MINPROTOHDR)
336 max_protohdr = TCP_MINPROTOHDR;
337 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
338 panic("tcp_init");
339 #undef TCP_MINPROTOHDR
340 dummy_tcb.t_state = TCP_NSTATES;
341 dummy_tcb.t_flags = 0;
342 tcbinfo.dummy_cb = (caddr_t) &dummy_tcb;
343
344 /*
345 * allocate lock group attribute and group for tcp pcb mutexes
346 */
347 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
348 pcbinfo->mtx_grp = lck_grp_alloc_init("tcppcb", pcbinfo->mtx_grp_attr);
349
350 /*
351 * allocate the lock attribute for tcp pcb mutexes
352 */
353 pcbinfo->mtx_attr = lck_attr_alloc_init();
354
355 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL) {
356 printf("tcp_init: mutex not alloced!\n");
357 return; /* pretty much dead if this fails... */
358 }
359
360
361 in_pcb_nat_init(&tcbinfo, AF_INET, IPPROTO_TCP, SOCK_STREAM);
362
363 delack_bitmask = _MALLOC((4 * hashsize)/32, M_PCB, M_WAITOK);
364 if (delack_bitmask == 0)
365 panic("Delack Memory");
366
367 for (i=0; i < (tcbinfo.hashsize / 32); i++)
368 delack_bitmask[i] = 0;
369
370 for (i=0; i < N_TIME_WAIT_SLOTS; i++) {
371 LIST_INIT(&time_wait_slots[i]);
372 }
373
374 timeout(tcp_fasttimo, NULL, hz/TCP_RETRANSHZ);
375 }
376
377 /*
378 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
379 * tcp_template used to store this data in mbufs, but we now recopy it out
380 * of the tcpcb each time to conserve mbufs.
381 */
382 void
383 tcp_fillheaders(tp, ip_ptr, tcp_ptr)
384 struct tcpcb *tp;
385 void *ip_ptr;
386 void *tcp_ptr;
387 {
388 struct inpcb *inp = tp->t_inpcb;
389 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
390
391 #if INET6
392 if ((inp->inp_vflag & INP_IPV6) != 0) {
393 struct ip6_hdr *ip6;
394
395 ip6 = (struct ip6_hdr *)ip_ptr;
396 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
397 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
398 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
399 (IPV6_VERSION & IPV6_VERSION_MASK);
400 ip6->ip6_nxt = IPPROTO_TCP;
401 ip6->ip6_plen = sizeof(struct tcphdr);
402 ip6->ip6_src = inp->in6p_laddr;
403 ip6->ip6_dst = inp->in6p_faddr;
404 tcp_hdr->th_sum = 0;
405 } else
406 #endif
407 {
408 struct ip *ip = (struct ip *) ip_ptr;
409
410 ip->ip_vhl = IP_VHL_BORING;
411 ip->ip_tos = 0;
412 ip->ip_len = 0;
413 ip->ip_id = 0;
414 ip->ip_off = 0;
415 ip->ip_ttl = 0;
416 ip->ip_sum = 0;
417 ip->ip_p = IPPROTO_TCP;
418 ip->ip_src = inp->inp_laddr;
419 ip->ip_dst = inp->inp_faddr;
420 tcp_hdr->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
421 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
422 }
423
424 tcp_hdr->th_sport = inp->inp_lport;
425 tcp_hdr->th_dport = inp->inp_fport;
426 tcp_hdr->th_seq = 0;
427 tcp_hdr->th_ack = 0;
428 tcp_hdr->th_x2 = 0;
429 tcp_hdr->th_off = 5;
430 tcp_hdr->th_flags = 0;
431 tcp_hdr->th_win = 0;
432 tcp_hdr->th_urp = 0;
433 }
434
435 /*
436 * Create template to be used to send tcp packets on a connection.
437 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
438 * use for this function is in keepalives, which use tcp_respond.
439 */
440 struct tcptemp *
441 tcp_maketemplate(tp)
442 struct tcpcb *tp;
443 {
444 struct mbuf *m;
445 struct tcptemp *n;
446
447 m = m_get(M_DONTWAIT, MT_HEADER);
448 if (m == NULL)
449 return (0);
450 m->m_len = sizeof(struct tcptemp);
451 n = mtod(m, struct tcptemp *);
452
453 tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
454 return (n);
455 }
456
457 /*
458 * Send a single message to the TCP at address specified by
459 * the given TCP/IP header. If m == 0, then we make a copy
460 * of the tcpiphdr at ti and send directly to the addressed host.
461 * This is used to force keep alive messages out using the TCP
462 * template for a connection. If flags are given then we send
463 * a message back to the TCP which originated the * segment ti,
464 * and discard the mbuf containing it and any other attached mbufs.
465 *
466 * In any case the ack and sequence number of the transmitted
467 * segment are as specified by the parameters.
468 *
469 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
470 */
471 void
472 tcp_respond(
473 struct tcpcb *tp,
474 void *ipgen,
475 register struct tcphdr *th,
476 register struct mbuf *m,
477 tcp_seq ack,
478 tcp_seq seq,
479 int flags,
480 #if CONFIG_FORCE_OUT_IFP
481 ifnet_t ifp
482 #else
483 __unused ifnet_t ifp
484 #endif
485 )
486 {
487 register int tlen;
488 int win = 0;
489 struct route *ro = 0;
490 struct route sro;
491 struct ip *ip;
492 struct tcphdr *nth;
493 #if INET6
494 struct route_in6 *ro6 = 0;
495 struct route_in6 sro6;
496 struct ip6_hdr *ip6;
497 int isipv6;
498 #endif /* INET6 */
499 int ipflags = 0;
500
501 #if INET6
502 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
503 ip6 = ipgen;
504 #endif /* INET6 */
505 ip = ipgen;
506
507 if (tp) {
508 if (!(flags & TH_RST)) {
509 win = tcp_sbspace(tp);
510 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
511 win = (long)TCP_MAXWIN << tp->rcv_scale;
512 }
513 #if INET6
514 if (isipv6)
515 ro6 = &tp->t_inpcb->in6p_route;
516 else
517 #endif /* INET6 */
518 ro = &tp->t_inpcb->inp_route;
519 } else {
520 #if INET6
521 if (isipv6) {
522 ro6 = &sro6;
523 bzero(ro6, sizeof *ro6);
524 } else
525 #endif /* INET6 */
526 {
527 ro = &sro;
528 bzero(ro, sizeof *ro);
529 }
530 }
531 if (m == 0) {
532 m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */
533 if (m == NULL)
534 return;
535 tlen = 0;
536 m->m_data += max_linkhdr;
537 #if INET6
538 if (isipv6) {
539 bcopy((caddr_t)ip6, mtod(m, caddr_t),
540 sizeof(struct ip6_hdr));
541 ip6 = mtod(m, struct ip6_hdr *);
542 nth = (struct tcphdr *)(ip6 + 1);
543 } else
544 #endif /* INET6 */
545 {
546 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
547 ip = mtod(m, struct ip *);
548 nth = (struct tcphdr *)(ip + 1);
549 }
550 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
551 flags = TH_ACK;
552 } else {
553 m_freem(m->m_next);
554 m->m_next = 0;
555 m->m_data = (caddr_t)ipgen;
556 /* m_len is set later */
557 tlen = 0;
558 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
559 #if INET6
560 if (isipv6) {
561 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
562 nth = (struct tcphdr *)(ip6 + 1);
563 } else
564 #endif /* INET6 */
565 {
566 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
567 nth = (struct tcphdr *)(ip + 1);
568 }
569 if (th != nth) {
570 /*
571 * this is usually a case when an extension header
572 * exists between the IPv6 header and the
573 * TCP header.
574 */
575 nth->th_sport = th->th_sport;
576 nth->th_dport = th->th_dport;
577 }
578 xchg(nth->th_dport, nth->th_sport, n_short);
579 #undef xchg
580 }
581 #if INET6
582 if (isipv6) {
583 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
584 tlen));
585 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
586 } else
587 #endif
588 {
589 tlen += sizeof (struct tcpiphdr);
590 ip->ip_len = tlen;
591 ip->ip_ttl = ip_defttl;
592 }
593 m->m_len = tlen;
594 m->m_pkthdr.len = tlen;
595 m->m_pkthdr.rcvif = 0;
596 #if CONFIG_MACF_NET
597 if (tp != NULL && tp->t_inpcb != NULL) {
598 /*
599 * Packet is associated with a socket, so allow the
600 * label of the response to reflect the socket label.
601 */
602 mac_mbuf_label_associate_inpcb(tp->t_inpcb, m);
603 } else {
604 /*
605 * Packet is not associated with a socket, so possibly
606 * update the label in place.
607 */
608 mac_netinet_tcp_reply(m);
609 }
610 #endif
611 nth->th_seq = htonl(seq);
612 nth->th_ack = htonl(ack);
613 nth->th_x2 = 0;
614 nth->th_off = sizeof (struct tcphdr) >> 2;
615 nth->th_flags = flags;
616 if (tp)
617 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
618 else
619 nth->th_win = htons((u_short)win);
620 nth->th_urp = 0;
621 #if INET6
622 if (isipv6) {
623 nth->th_sum = 0;
624 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
625 sizeof(struct ip6_hdr),
626 tlen - sizeof(struct ip6_hdr));
627 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
628 ro6 && ro6->ro_rt ?
629 ro6->ro_rt->rt_ifp :
630 NULL);
631 } else
632 #endif /* INET6 */
633 {
634 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
635 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
636 m->m_pkthdr.csum_flags = CSUM_TCP;
637 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
638 }
639 #if TCPDEBUG
640 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
641 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
642 #endif
643 #if IPSEC
644 if (ipsec_bypass == 0 && ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
645 m_freem(m);
646 return;
647 }
648 #endif
649 #if INET6
650 if (isipv6) {
651 (void)ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 0);
652 if (ro6 == &sro6 && ro6->ro_rt) {
653 rtfree(ro6->ro_rt);
654 ro6->ro_rt = NULL;
655 }
656 } else
657 #endif /* INET6 */
658 {
659 #if CONFIG_FORCE_OUT_IFP
660 ifp = (tp && tp->t_inpcb) ? tp->t_inpcb->pdp_ifp :
661 (ifp && (ifp->if_flags & IFF_POINTOPOINT) != 0) ? ifp : NULL;
662 #endif
663 (void) ip_output_list(m, 0, NULL, ro, ipflags, NULL, ifp);
664 if (ro == &sro && ro->ro_rt) {
665 rtfree(ro->ro_rt);
666 ro->ro_rt = NULL;
667 }
668 }
669 }
670
671 /*
672 * Create a new TCP control block, making an
673 * empty reassembly queue and hooking it to the argument
674 * protocol control block. The `inp' parameter must have
675 * come from the zone allocator set up in tcp_init().
676 */
677 struct tcpcb *
678 tcp_newtcpcb(inp)
679 struct inpcb *inp;
680 {
681 struct inp_tp *it;
682 register struct tcpcb *tp;
683 register struct socket *so = inp->inp_socket;
684 #if INET6
685 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
686 #endif /* INET6 */
687
688 if (so->cached_in_sock_layer == 0) {
689 it = (struct inp_tp *)inp;
690 tp = &it->tcb;
691 }
692 else
693 tp = (struct tcpcb *) inp->inp_saved_ppcb;
694
695 bzero((char *) tp, sizeof(struct tcpcb));
696 LIST_INIT(&tp->t_segq);
697 tp->t_maxseg = tp->t_maxopd =
698 #if INET6
699 isipv6 ? tcp_v6mssdflt :
700 #endif /* INET6 */
701 tcp_mssdflt;
702
703 if (tcp_do_rfc1323)
704 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
705 tp->sack_enable = tcp_do_sack;
706 TAILQ_INIT(&tp->snd_holes);
707 tp->t_inpcb = inp; /* XXX */
708 /*
709 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
710 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
711 * reasonable initial retransmit time.
712 */
713 tp->t_srtt = TCPTV_SRTTBASE;
714 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
715 tp->t_rttmin = tcp_TCPTV_MIN;
716 tp->t_rxtcur = TCPTV_RTOBASE;
717 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
718 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
719 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
720 tp->t_rcvtime = 0;
721 tp->t_bw_rtttime = 0;
722 /*
723 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
724 * because the socket may be bound to an IPv6 wildcard address,
725 * which may match an IPv4-mapped IPv6 address.
726 */
727 inp->inp_ip_ttl = ip_defttl;
728 inp->inp_ppcb = (caddr_t)tp;
729 return (tp); /* XXX */
730 }
731
732 /*
733 * Drop a TCP connection, reporting
734 * the specified error. If connection is synchronized,
735 * then send a RST to peer.
736 */
737 struct tcpcb *
738 tcp_drop(tp, errno)
739 register struct tcpcb *tp;
740 int errno;
741 {
742 struct socket *so = tp->t_inpcb->inp_socket;
743
744 if (TCPS_HAVERCVDSYN(tp->t_state)) {
745 tp->t_state = TCPS_CLOSED;
746 (void) tcp_output(tp);
747 tcpstat.tcps_drops++;
748 } else
749 tcpstat.tcps_conndrops++;
750 if (errno == ETIMEDOUT && tp->t_softerror)
751 errno = tp->t_softerror;
752 so->so_error = errno;
753 return (tcp_close(tp));
754 }
755
756 /*
757 * Close a TCP control block:
758 * discard all space held by the tcp
759 * discard internet protocol block
760 * wake up any sleepers
761 */
762 struct tcpcb *
763 tcp_close(tp)
764 register struct tcpcb *tp;
765 {
766 struct inpcb *inp = tp->t_inpcb;
767 struct socket *so = inp->inp_socket;
768 #if INET6
769 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
770 #endif /* INET6 */
771 register struct rtentry *rt;
772 int dosavessthresh;
773
774 if ( inp->inp_ppcb == NULL) /* tcp_close was called previously, bail */
775 return NULL;
776
777 /* Clear the timers before we delete the PCB. */
778 {
779 int i;
780 for (i = 0; i < TCPT_NTIMERS; i++) {
781 tp->t_timer[i] = 0;
782 }
783 }
784
785 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0);
786 switch (tp->t_state)
787 {
788 case TCPS_ESTABLISHED:
789 case TCPS_FIN_WAIT_1:
790 case TCPS_CLOSING:
791 case TCPS_CLOSE_WAIT:
792 case TCPS_LAST_ACK:
793 break;
794 }
795
796 /*
797 * If another thread for this tcp is currently in ip (indicated by
798 * the TF_SENDINPROG flag), defer the cleanup until after it returns
799 * back to tcp. This is done to serialize the close until after all
800 * pending output is finished, in order to avoid having the PCB be
801 * detached and the cached route cleaned, only for ip to cache the
802 * route back into the PCB again. Note that we've cleared all the
803 * timers at this point. Set TF_CLOSING to indicate to tcp_output()
804 * that is should call us again once it returns from ip; at that
805 * point both flags should be cleared and we can proceed further
806 * with the cleanup.
807 */
808 if (tp->t_flags & (TF_CLOSING|TF_SENDINPROG)) {
809 tp->t_flags |= TF_CLOSING;
810 return (NULL);
811 }
812
813 lck_mtx_lock(rt_mtx);
814 /*
815 * If we got enough samples through the srtt filter,
816 * save the rtt and rttvar in the routing entry.
817 * 'Enough' is arbitrarily defined as the 16 samples.
818 * 16 samples is enough for the srtt filter to converge
819 * to within 5% of the correct value; fewer samples and
820 * we could save a very bogus rtt.
821 *
822 * Don't update the default route's characteristics and don't
823 * update anything that the user "locked".
824 */
825 if (tp->t_rttupdated >= 16) {
826 register u_long i = 0;
827
828 #if INET6
829 if (isipv6) {
830 struct sockaddr_in6 *sin6;
831
832 if ((rt = inp->in6p_route.ro_rt) == NULL)
833 goto no_valid_rt;
834 sin6 = (struct sockaddr_in6 *)rt_key(rt);
835 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
836 goto no_valid_rt;
837 }
838 else
839 #endif /* INET6 */
840 rt = inp->inp_route.ro_rt;
841 if (rt == NULL ||
842 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr
843 == INADDR_ANY || rt->generation_id != route_generation) {
844 if (tp->t_state >= TCPS_CLOSE_WAIT)
845 tp->t_state = TCPS_CLOSING;
846
847 goto no_valid_rt;
848 }
849
850 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
851 i = tp->t_srtt *
852 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
853 if (rt->rt_rmx.rmx_rtt && i)
854 /*
855 * filter this update to half the old & half
856 * the new values, converting scale.
857 * See route.h and tcp_var.h for a
858 * description of the scaling constants.
859 */
860 rt->rt_rmx.rmx_rtt =
861 (rt->rt_rmx.rmx_rtt + i) / 2;
862 else
863 rt->rt_rmx.rmx_rtt = i;
864 tcpstat.tcps_cachedrtt++;
865 }
866 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
867 i = tp->t_rttvar *
868 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
869 if (rt->rt_rmx.rmx_rttvar && i)
870 rt->rt_rmx.rmx_rttvar =
871 (rt->rt_rmx.rmx_rttvar + i) / 2;
872 else
873 rt->rt_rmx.rmx_rttvar = i;
874 tcpstat.tcps_cachedrttvar++;
875 }
876 /*
877 * The old comment here said:
878 * update the pipelimit (ssthresh) if it has been updated
879 * already or if a pipesize was specified & the threshhold
880 * got below half the pipesize. I.e., wait for bad news
881 * before we start updating, then update on both good
882 * and bad news.
883 *
884 * But we want to save the ssthresh even if no pipesize is
885 * specified explicitly in the route, because such
886 * connections still have an implicit pipesize specified
887 * by the global tcp_sendspace. In the absence of a reliable
888 * way to calculate the pipesize, it will have to do.
889 */
890 i = tp->snd_ssthresh;
891 if (rt->rt_rmx.rmx_sendpipe != 0)
892 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
893 else
894 dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
895 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
896 i != 0 && rt->rt_rmx.rmx_ssthresh != 0)
897 || dosavessthresh) {
898 /*
899 * convert the limit from user data bytes to
900 * packets then to packet data bytes.
901 */
902 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
903 if (i < 2)
904 i = 2;
905 i *= (u_long)(tp->t_maxseg +
906 #if INET6
907 (isipv6 ? sizeof (struct ip6_hdr) +
908 sizeof (struct tcphdr) :
909 #endif
910 sizeof (struct tcpiphdr)
911 #if INET6
912 )
913 #endif
914 );
915 if (rt->rt_rmx.rmx_ssthresh)
916 rt->rt_rmx.rmx_ssthresh =
917 (rt->rt_rmx.rmx_ssthresh + i) / 2;
918 else
919 rt->rt_rmx.rmx_ssthresh = i;
920 tcpstat.tcps_cachedssthresh++;
921 }
922 }
923 rt = inp->inp_route.ro_rt;
924 if (rt) {
925 /*
926 * mark route for deletion if no information is
927 * cached.
928 */
929 if ((so->so_flags & SOF_OVERFLOW) && tcp_lq_overflow &&
930 ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0)){
931 if (rt->rt_rmx.rmx_rtt == 0)
932 rt->rt_flags |= RTF_DELCLONE;
933 }
934 }
935 no_valid_rt:
936 /* free the reassembly queue, if any */
937 lck_mtx_unlock(rt_mtx);
938
939 (void) tcp_freeq(tp);
940
941 tcp_free_sackholes(tp);
942
943 /* Free the packet list */
944 if (tp->t_pktlist_head != NULL)
945 m_freem_list(tp->t_pktlist_head);
946 TCP_PKTLIST_CLEAR(tp);
947
948 #ifdef __APPLE__
949 if (so->cached_in_sock_layer)
950 inp->inp_saved_ppcb = (caddr_t) tp;
951 #endif
952
953 soisdisconnected(so);
954 #if INET6
955 if (INP_CHECK_SOCKAF(so, AF_INET6))
956 in6_pcbdetach(inp);
957 else
958 #endif /* INET6 */
959 in_pcbdetach(inp);
960 tcpstat.tcps_closed++;
961 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed,0,0,0,0);
962 return ((struct tcpcb *)0);
963 }
964
965 int
966 tcp_freeq(tp)
967 struct tcpcb *tp;
968 {
969
970 register struct tseg_qent *q;
971 int rv = 0;
972
973 while((q = LIST_FIRST(&tp->t_segq)) != NULL) {
974 LIST_REMOVE(q, tqe_q);
975 m_freem(q->tqe_m);
976 FREE(q, M_TSEGQ);
977 tcp_reass_qsize--;
978 rv = 1;
979 }
980 return (rv);
981 }
982
983 void
984 tcp_drain()
985 {
986 if (do_tcpdrain)
987 {
988 struct inpcb *inpb;
989 struct tcpcb *tcpb;
990 struct tseg_qent *te;
991
992 /*
993 * Walk the tcpbs, if existing, and flush the reassembly queue,
994 * if there is one...
995 * XXX: The "Net/3" implementation doesn't imply that the TCP
996 * reassembly queue should be flushed, but in a situation
997 * where we're really low on mbufs, this is potentially
998 * usefull.
999 */
1000 if (!lck_rw_try_lock_exclusive(tcbinfo.mtx)) /* do it next time if the lock is in use */
1001 return;
1002
1003 for (inpb = LIST_FIRST(tcbinfo.listhead); inpb;
1004 inpb = LIST_NEXT(inpb, inp_list)) {
1005 if ((tcpb = intotcpcb(inpb))) {
1006 while ((te = LIST_FIRST(&tcpb->t_segq))
1007 != NULL) {
1008 LIST_REMOVE(te, tqe_q);
1009 m_freem(te->tqe_m);
1010 FREE(te, M_TSEGQ);
1011 tcp_reass_qsize--;
1012 }
1013 }
1014 }
1015 lck_rw_done(tcbinfo.mtx);
1016
1017 }
1018 }
1019
1020 /*
1021 * Notify a tcp user of an asynchronous error;
1022 * store error as soft error, but wake up user
1023 * (for now, won't do anything until can select for soft error).
1024 *
1025 * Do not wake up user since there currently is no mechanism for
1026 * reporting soft errors (yet - a kqueue filter may be added).
1027 */
1028 static void
1029 tcp_notify(inp, error)
1030 struct inpcb *inp;
1031 int error;
1032 {
1033 struct tcpcb *tp;
1034
1035 if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD))
1036 return; /* pcb is gone already */
1037
1038 tp = (struct tcpcb *)inp->inp_ppcb;
1039
1040 /*
1041 * Ignore some errors if we are hooked up.
1042 * If connection hasn't completed, has retransmitted several times,
1043 * and receives a second error, give up now. This is better
1044 * than waiting a long time to establish a connection that
1045 * can never complete.
1046 */
1047 if (tp->t_state == TCPS_ESTABLISHED &&
1048 (error == EHOSTUNREACH || error == ENETUNREACH ||
1049 error == EHOSTDOWN)) {
1050 return;
1051 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1052 tp->t_softerror)
1053 tcp_drop(tp, error);
1054 else
1055 tp->t_softerror = error;
1056 #if 0
1057 wakeup((caddr_t) &so->so_timeo);
1058 sorwakeup(so);
1059 sowwakeup(so);
1060 #endif
1061 }
1062
1063 static int
1064 tcp_pcblist SYSCTL_HANDLER_ARGS
1065 {
1066 #pragma unused(oidp, arg1, arg2)
1067 int error, i, n;
1068 struct inpcb *inp, **inp_list;
1069 inp_gen_t gencnt;
1070 struct xinpgen xig;
1071 int slot;
1072
1073 /*
1074 * The process of preparing the TCB list is too time-consuming and
1075 * resource-intensive to repeat twice on every request.
1076 */
1077 lck_rw_lock_shared(tcbinfo.mtx);
1078 if (req->oldptr == USER_ADDR_NULL) {
1079 n = tcbinfo.ipi_count;
1080 req->oldidx = 2 * (sizeof xig)
1081 + (n + n/8) * sizeof(struct xtcpcb);
1082 lck_rw_done(tcbinfo.mtx);
1083 return 0;
1084 }
1085
1086 if (req->newptr != USER_ADDR_NULL) {
1087 lck_rw_done(tcbinfo.mtx);
1088 return EPERM;
1089 }
1090
1091 /*
1092 * OK, now we're committed to doing something.
1093 */
1094 gencnt = tcbinfo.ipi_gencnt;
1095 n = tcbinfo.ipi_count;
1096
1097 bzero(&xig, sizeof(xig));
1098 xig.xig_len = sizeof xig;
1099 xig.xig_count = n;
1100 xig.xig_gen = gencnt;
1101 xig.xig_sogen = so_gencnt;
1102 error = SYSCTL_OUT(req, &xig, sizeof xig);
1103 if (error) {
1104 lck_rw_done(tcbinfo.mtx);
1105 return error;
1106 }
1107 /*
1108 * We are done if there is no pcb
1109 */
1110 if (n == 0) {
1111 lck_rw_done(tcbinfo.mtx);
1112 return 0;
1113 }
1114
1115 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1116 if (inp_list == 0) {
1117 lck_rw_done(tcbinfo.mtx);
1118 return ENOMEM;
1119 }
1120
1121 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
1122 inp = LIST_NEXT(inp, inp_list)) {
1123 #ifdef __APPLE__
1124 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1125 #else
1126 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp))
1127 #endif
1128 inp_list[i++] = inp;
1129 }
1130
1131 for (slot = 0; slot < N_TIME_WAIT_SLOTS; slot++) {
1132 struct inpcb *inpnxt;
1133
1134 for (inp = time_wait_slots[slot].lh_first; inp && i < n; inp = inpnxt) {
1135 inpnxt = inp->inp_list.le_next;
1136 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1137 inp_list[i++] = inp;
1138 }
1139 }
1140
1141 n = i;
1142
1143 error = 0;
1144 for (i = 0; i < n; i++) {
1145 inp = inp_list[i];
1146 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1147 struct xtcpcb xt;
1148 caddr_t inp_ppcb;
1149
1150 bzero(&xt, sizeof(xt));
1151 xt.xt_len = sizeof xt;
1152 /* XXX should avoid extra copy */
1153 inpcb_to_compat(inp, &xt.xt_inp);
1154 inp_ppcb = inp->inp_ppcb;
1155 if (inp_ppcb != NULL) {
1156 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
1157 }
1158 else
1159 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
1160 if (inp->inp_socket)
1161 sotoxsocket(inp->inp_socket, &xt.xt_socket);
1162 error = SYSCTL_OUT(req, &xt, sizeof xt);
1163 }
1164 }
1165 if (!error) {
1166 /*
1167 * Give the user an updated idea of our state.
1168 * If the generation differs from what we told
1169 * her before, she knows that something happened
1170 * while we were processing this request, and it
1171 * might be necessary to retry.
1172 */
1173 bzero(&xig, sizeof(xig));
1174 xig.xig_len = sizeof xig;
1175 xig.xig_gen = tcbinfo.ipi_gencnt;
1176 xig.xig_sogen = so_gencnt;
1177 xig.xig_count = tcbinfo.ipi_count;
1178 error = SYSCTL_OUT(req, &xig, sizeof xig);
1179 }
1180 FREE(inp_list, M_TEMP);
1181 lck_rw_done(tcbinfo.mtx);
1182 return error;
1183 }
1184
1185 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
1186 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
1187
1188 #ifndef __APPLE__
1189 static int
1190 tcp_getcred(SYSCTL_HANDLER_ARGS)
1191 {
1192 struct sockaddr_in addrs[2];
1193 struct inpcb *inp;
1194 int error, s;
1195
1196 error = suser(req->p);
1197 if (error)
1198 return (error);
1199 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1200 if (error)
1201 return (error);
1202 s = splnet();
1203 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
1204 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1205 if (inp == NULL || inp->inp_socket == NULL) {
1206 error = ENOENT;
1207 goto out;
1208 }
1209 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(*(kauth_cred_t)0);
1210 out:
1211 splx(s);
1212 return (error);
1213 }
1214
1215 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
1216 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection");
1217
1218 #if INET6
1219 static int
1220 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1221 {
1222 struct sockaddr_in6 addrs[2];
1223 struct inpcb *inp;
1224 int error, s, mapped = 0;
1225
1226 error = suser(req->p);
1227 if (error)
1228 return (error);
1229 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1230 if (error)
1231 return (error);
1232 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1233 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1234 mapped = 1;
1235 else
1236 return (EINVAL);
1237 }
1238 s = splnet();
1239 if (mapped == 1)
1240 inp = in_pcblookup_hash(&tcbinfo,
1241 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1242 addrs[1].sin6_port,
1243 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1244 addrs[0].sin6_port,
1245 0, NULL);
1246 else
1247 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr,
1248 addrs[1].sin6_port,
1249 &addrs[0].sin6_addr, addrs[0].sin6_port,
1250 0, NULL);
1251 if (inp == NULL || inp->inp_socket == NULL) {
1252 error = ENOENT;
1253 goto out;
1254 }
1255 error = SYSCTL_OUT(req, inp->inp_socket->so_cred,
1256 sizeof(*(kauth_cred_t)0);
1257 out:
1258 splx(s);
1259 return (error);
1260 }
1261
1262 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
1263 0, 0,
1264 tcp6_getcred, "S,ucred", "Get the ucred of a TCP6 connection");
1265 #endif
1266 #endif /* __APPLE__*/
1267
1268 void
1269 tcp_ctlinput(cmd, sa, vip)
1270 int cmd;
1271 struct sockaddr *sa;
1272 void *vip;
1273 {
1274 struct ip *ip = vip;
1275 struct tcphdr *th;
1276 struct in_addr faddr;
1277 struct inpcb *inp;
1278 struct tcpcb *tp;
1279 void (*notify)(struct inpcb *, int) = tcp_notify;
1280 tcp_seq icmp_seq;
1281
1282 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1283 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1284 return;
1285
1286 if (cmd == PRC_QUENCH)
1287 notify = tcp_quench;
1288 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1289 cmd == PRC_UNREACH_PORT) && ip)
1290 notify = tcp_drop_syn_sent;
1291 else if (cmd == PRC_MSGSIZE)
1292 notify = tcp_mtudisc;
1293 else if (PRC_IS_REDIRECT(cmd)) {
1294 ip = 0;
1295 notify = in_rtchange;
1296 } else if (cmd == PRC_HOSTDEAD)
1297 ip = 0;
1298 else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)
1299 return;
1300 if (ip) {
1301 th = (struct tcphdr *)((caddr_t)ip
1302 + (IP_VHL_HL(ip->ip_vhl) << 2));
1303 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1304 ip->ip_src, th->th_sport, 0, NULL);
1305 if (inp != NULL && inp->inp_socket != NULL) {
1306 tcp_lock(inp->inp_socket, 1, 0);
1307 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1308 tcp_unlock(inp->inp_socket, 1, 0);
1309 return;
1310 }
1311 icmp_seq = htonl(th->th_seq);
1312 tp = intotcpcb(inp);
1313 if (SEQ_GEQ(icmp_seq, tp->snd_una) &&
1314 SEQ_LT(icmp_seq, tp->snd_max))
1315 (*notify)(inp, inetctlerrmap[cmd]);
1316 tcp_unlock(inp->inp_socket, 1, 0);
1317 }
1318 } else
1319 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1320 }
1321
1322 #if INET6
1323 void
1324 tcp6_ctlinput(cmd, sa, d)
1325 int cmd;
1326 struct sockaddr *sa;
1327 void *d;
1328 {
1329 struct tcphdr th;
1330 void (*notify)(struct inpcb *, int) = tcp_notify;
1331 struct ip6_hdr *ip6;
1332 struct mbuf *m;
1333 struct ip6ctlparam *ip6cp = NULL;
1334 const struct sockaddr_in6 *sa6_src = NULL;
1335 int off;
1336 struct tcp_portonly {
1337 u_int16_t th_sport;
1338 u_int16_t th_dport;
1339 } *thp;
1340
1341 if (sa->sa_family != AF_INET6 ||
1342 sa->sa_len != sizeof(struct sockaddr_in6))
1343 return;
1344
1345 if (cmd == PRC_QUENCH)
1346 notify = tcp_quench;
1347 else if (cmd == PRC_MSGSIZE)
1348 notify = tcp_mtudisc;
1349 else if (!PRC_IS_REDIRECT(cmd) &&
1350 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1351 return;
1352
1353 /* if the parameter is from icmp6, decode it. */
1354 if (d != NULL) {
1355 ip6cp = (struct ip6ctlparam *)d;
1356 m = ip6cp->ip6c_m;
1357 ip6 = ip6cp->ip6c_ip6;
1358 off = ip6cp->ip6c_off;
1359 sa6_src = ip6cp->ip6c_src;
1360 } else {
1361 m = NULL;
1362 ip6 = NULL;
1363 off = 0; /* fool gcc */
1364 sa6_src = &sa6_any;
1365 }
1366
1367 if (ip6) {
1368 /*
1369 * XXX: We assume that when IPV6 is non NULL,
1370 * M and OFF are valid.
1371 */
1372
1373 /* check if we can safely examine src and dst ports */
1374 if (m->m_pkthdr.len < off + sizeof(*thp))
1375 return;
1376
1377 bzero(&th, sizeof(th));
1378 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1379
1380 in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1381 (struct sockaddr *)ip6cp->ip6c_src,
1382 th.th_sport, cmd, notify);
1383 } else
1384 in6_pcbnotify(&tcbinfo, sa, 0, (struct sockaddr *)sa6_src,
1385 0, cmd, notify);
1386 }
1387 #endif /* INET6 */
1388
1389
1390 /*
1391 * Following is where TCP initial sequence number generation occurs.
1392 *
1393 * There are two places where we must use initial sequence numbers:
1394 * 1. In SYN-ACK packets.
1395 * 2. In SYN packets.
1396 *
1397 * The ISNs in SYN-ACK packets have no monotonicity requirement,
1398 * and should be as unpredictable as possible to avoid the possibility
1399 * of spoofing and/or connection hijacking. To satisfy this
1400 * requirement, SYN-ACK ISNs are generated via the arc4random()
1401 * function. If exact RFC 1948 compliance is requested via sysctl,
1402 * these ISNs will be generated just like those in SYN packets.
1403 *
1404 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1405 * depends on this property. In addition, these ISNs should be
1406 * unguessable so as to prevent connection hijacking. To satisfy
1407 * the requirements of this situation, the algorithm outlined in
1408 * RFC 1948 is used to generate sequence numbers.
1409 *
1410 * For more information on the theory of operation, please see
1411 * RFC 1948.
1412 *
1413 * Implementation details:
1414 *
1415 * Time is based off the system timer, and is corrected so that it
1416 * increases by one megabyte per second. This allows for proper
1417 * recycling on high speed LANs while still leaving over an hour
1418 * before rollover.
1419 *
1420 * Two sysctls control the generation of ISNs:
1421 *
1422 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1423 * between seeding of isn_secret. This is normally set to zero,
1424 * as reseeding should not be necessary.
1425 *
1426 * net.inet.tcp.strict_rfc1948 controls whether RFC 1948 is followed
1427 * strictly. When strict compliance is requested, reseeding is
1428 * disabled and SYN-ACKs will be generated in the same manner as
1429 * SYNs. Strict mode is disabled by default.
1430 *
1431 */
1432
1433 #define ISN_BYTES_PER_SECOND 1048576
1434
1435 //PWC - md5 routines cause alignment exceptions. Need to figure out why. For now use lame incremental
1436 // isn. how's that for not easily guessable!?
1437
1438 int pwc_bogus;
1439
1440 tcp_seq
1441 tcp_new_isn(tp)
1442 struct tcpcb *tp;
1443 {
1444 u_int32_t md5_buffer[4];
1445 tcp_seq new_isn;
1446 struct timeval timenow;
1447 u_char isn_secret[32];
1448 int isn_last_reseed = 0;
1449 MD5_CTX isn_ctx;
1450
1451 /* Use arc4random for SYN-ACKs when not in exact RFC1948 mode. */
1452 if (((tp->t_state == TCPS_LISTEN) || (tp->t_state == TCPS_TIME_WAIT))
1453 && tcp_strict_rfc1948 == 0)
1454 #ifdef __APPLE__
1455 return random();
1456 #else
1457 return arc4random();
1458 #endif
1459 getmicrotime(&timenow);
1460
1461 /* Seed if this is the first use, reseed if requested. */
1462 if ((isn_last_reseed == 0) ||
1463 ((tcp_strict_rfc1948 == 0) && (tcp_isn_reseed_interval > 0) &&
1464 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1465 < (u_int)timenow.tv_sec))) {
1466 #ifdef __APPLE__
1467 read_random(&isn_secret, sizeof(isn_secret));
1468 #else
1469 read_random_unlimited(&isn_secret, sizeof(isn_secret));
1470 #endif
1471 isn_last_reseed = timenow.tv_sec;
1472 }
1473
1474 /* Compute the md5 hash and return the ISN. */
1475 MD5Init(&isn_ctx);
1476 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1477 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1478 #if INET6
1479 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1480 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1481 sizeof(struct in6_addr));
1482 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1483 sizeof(struct in6_addr));
1484 } else
1485 #endif
1486 {
1487 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1488 sizeof(struct in_addr));
1489 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1490 sizeof(struct in_addr));
1491 }
1492 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1493 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1494 new_isn = (tcp_seq) md5_buffer[0];
1495 new_isn += timenow.tv_sec * (ISN_BYTES_PER_SECOND / hz);
1496 return new_isn;
1497 }
1498
1499 /*
1500 * When a source quench is received, close congestion window
1501 * to one segment. We will gradually open it again as we proceed.
1502 */
1503 void
1504 tcp_quench(
1505 struct inpcb *inp,
1506 __unused int errno
1507 )
1508 {
1509 struct tcpcb *tp = intotcpcb(inp);
1510
1511 if (tp) {
1512 tp->snd_cwnd = tp->t_maxseg;
1513 tp->t_bytes_acked = 0;
1514 }
1515 }
1516
1517 /*
1518 * When a specific ICMP unreachable message is received and the
1519 * connection state is SYN-SENT, drop the connection. This behavior
1520 * is controlled by the icmp_may_rst sysctl.
1521 */
1522 void
1523 tcp_drop_syn_sent(inp, errno)
1524 struct inpcb *inp;
1525 int errno;
1526 {
1527 struct tcpcb *tp = intotcpcb(inp);
1528
1529 if (tp && tp->t_state == TCPS_SYN_SENT)
1530 tcp_drop(tp, errno);
1531 }
1532
1533 /*
1534 * When `need fragmentation' ICMP is received, update our idea of the MSS
1535 * based on the new value in the route. Also nudge TCP to send something,
1536 * since we know the packet we just sent was dropped.
1537 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1538 */
1539 void
1540 tcp_mtudisc(
1541 struct inpcb *inp,
1542 __unused int errno
1543 )
1544 {
1545 struct tcpcb *tp = intotcpcb(inp);
1546 struct rtentry *rt;
1547 struct rmxp_tao *taop;
1548 struct socket *so = inp->inp_socket;
1549 int offered;
1550 int mss;
1551 #if INET6
1552 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1553 #endif /* INET6 */
1554
1555 if (tp) {
1556 lck_mtx_lock(rt_mtx);
1557 #if INET6
1558 if (isipv6)
1559 rt = tcp_rtlookup6(inp);
1560 else
1561 #endif /* INET6 */
1562 rt = tcp_rtlookup(inp);
1563 if (!rt || !rt->rt_rmx.rmx_mtu) {
1564 tp->t_maxopd = tp->t_maxseg =
1565 #if INET6
1566 isipv6 ? tcp_v6mssdflt :
1567 #endif /* INET6 */
1568 tcp_mssdflt;
1569 lck_mtx_unlock(rt_mtx);
1570 return;
1571 }
1572 taop = rmx_taop(rt->rt_rmx);
1573 offered = taop->tao_mssopt;
1574 mss = rt->rt_rmx.rmx_mtu -
1575 #if INET6
1576 (isipv6 ?
1577 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1578 #endif /* INET6 */
1579 sizeof(struct tcpiphdr)
1580 #if INET6
1581 )
1582 #endif /* INET6 */
1583 ;
1584
1585 lck_mtx_unlock(rt_mtx);
1586 if (offered)
1587 mss = min(mss, offered);
1588 /*
1589 * XXX - The above conditional probably violates the TCP
1590 * spec. The problem is that, since we don't know the
1591 * other end's MSS, we are supposed to use a conservative
1592 * default. But, if we do that, then MTU discovery will
1593 * never actually take place, because the conservative
1594 * default is much less than the MTUs typically seen
1595 * on the Internet today. For the moment, we'll sweep
1596 * this under the carpet.
1597 *
1598 * The conservative default might not actually be a problem
1599 * if the only case this occurs is when sending an initial
1600 * SYN with options and data to a host we've never talked
1601 * to before. Then, they will reply with an MSS value which
1602 * will get recorded and the new parameters should get
1603 * recomputed. For Further Study.
1604 */
1605 if (tp->t_maxopd <= mss)
1606 return;
1607 tp->t_maxopd = mss;
1608
1609 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1610 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1611 mss -= TCPOLEN_TSTAMP_APPA;
1612
1613 if (so->so_snd.sb_hiwat < mss)
1614 mss = so->so_snd.sb_hiwat;
1615
1616 tp->t_maxseg = mss;
1617
1618 tcpstat.tcps_mturesent++;
1619 tp->t_rtttime = 0;
1620 tp->snd_nxt = tp->snd_una;
1621 tcp_output(tp);
1622 }
1623 }
1624
1625 /*
1626 * Look-up the routing entry to the peer of this inpcb. If no route
1627 * is found and it cannot be allocated the return NULL. This routine
1628 * is called by TCP routines that access the rmx structure and by tcp_mss
1629 * to get the interface MTU.
1630 */
1631 struct rtentry *
1632 tcp_rtlookup(inp)
1633 struct inpcb *inp;
1634 {
1635 struct route *ro;
1636 struct rtentry *rt;
1637 struct tcpcb *tp;
1638
1639 ro = &inp->inp_route;
1640 if (ro == NULL)
1641 return (NULL);
1642 rt = ro->ro_rt;
1643
1644 lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
1645
1646 if (rt == NULL || !(rt->rt_flags & RTF_UP) || rt->generation_id != route_generation) {
1647 /* No route yet, so try to acquire one */
1648 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1649 ro->ro_dst.sa_family = AF_INET;
1650 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1651 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
1652 inp->inp_faddr;
1653 rtalloc_ign_locked(ro, 0UL);
1654 rt = ro->ro_rt;
1655 }
1656 }
1657 if (rt != NULL && rt->rt_ifp != NULL)
1658 somultipages(inp->inp_socket,
1659 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
1660
1661 /*
1662 * Update MTU discovery determination. Don't do it if:
1663 * 1) it is disabled via the sysctl
1664 * 2) the route isn't up
1665 * 3) the MTU is locked (if it is, then discovery has been
1666 * disabled)
1667 */
1668
1669 tp = intotcpcb(inp);
1670
1671 if (!path_mtu_discovery || ((rt != NULL) &&
1672 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
1673 tp->t_flags &= ~TF_PMTUD;
1674 else
1675 tp->t_flags |= TF_PMTUD;
1676
1677 return rt;
1678 }
1679
1680 #if INET6
1681 struct rtentry *
1682 tcp_rtlookup6(inp)
1683 struct inpcb *inp;
1684 {
1685 struct route_in6 *ro6;
1686 struct rtentry *rt;
1687 struct tcpcb *tp;
1688
1689 lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
1690
1691 ro6 = &inp->in6p_route;
1692 rt = ro6->ro_rt;
1693 if (rt == NULL || !(rt->rt_flags & RTF_UP)) {
1694 /* No route yet, so try to acquire one */
1695 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
1696 struct sockaddr_in6 *dst6;
1697
1698 dst6 = (struct sockaddr_in6 *)&ro6->ro_dst;
1699 dst6->sin6_family = AF_INET6;
1700 dst6->sin6_len = sizeof(*dst6);
1701 dst6->sin6_addr = inp->in6p_faddr;
1702 rtalloc_ign_locked((struct route *)ro6, 0UL);
1703 rt = ro6->ro_rt;
1704 }
1705 }
1706 if (rt != NULL && rt->rt_ifp != NULL)
1707 somultipages(inp->inp_socket,
1708 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
1709 /*
1710 * Update path MTU Discovery determination
1711 * while looking up the route:
1712 * 1) we have a valid route to the destination
1713 * 2) the MTU is not locked (if it is, then discovery has been
1714 * disabled)
1715 */
1716
1717
1718 tp = intotcpcb(inp);
1719
1720 /*
1721 * Update MTU discovery determination. Don't do it if:
1722 * 1) it is disabled via the sysctl
1723 * 2) the route isn't up
1724 * 3) the MTU is locked (if it is, then discovery has been
1725 * disabled)
1726 */
1727
1728 if (!path_mtu_discovery || ((rt != NULL) &&
1729 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU))))
1730 tp->t_flags &= ~TF_PMTUD;
1731 else
1732 tp->t_flags |= TF_PMTUD;
1733
1734 return rt;
1735 }
1736 #endif /* INET6 */
1737
1738 #if IPSEC
1739 /* compute ESP/AH header size for TCP, including outer IP header. */
1740 size_t
1741 ipsec_hdrsiz_tcp(tp)
1742 struct tcpcb *tp;
1743 {
1744 struct inpcb *inp;
1745 struct mbuf *m;
1746 size_t hdrsiz;
1747 struct ip *ip;
1748 #if INET6
1749 struct ip6_hdr *ip6 = NULL;
1750 #endif /* INET6 */
1751 struct tcphdr *th;
1752
1753 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1754 return 0;
1755 MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */
1756 if (!m)
1757 return 0;
1758
1759 #if INET6
1760 if ((inp->inp_vflag & INP_IPV6) != 0) {
1761 ip6 = mtod(m, struct ip6_hdr *);
1762 th = (struct tcphdr *)(ip6 + 1);
1763 m->m_pkthdr.len = m->m_len =
1764 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1765 tcp_fillheaders(tp, ip6, th);
1766 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1767 } else
1768 #endif /* INET6 */
1769 {
1770 ip = mtod(m, struct ip *);
1771 th = (struct tcphdr *)(ip + 1);
1772 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1773 tcp_fillheaders(tp, ip, th);
1774 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1775 }
1776 m_free(m);
1777 return hdrsiz;
1778 }
1779 #endif /*IPSEC*/
1780
1781 /*
1782 * Return a pointer to the cached information about the remote host.
1783 * The cached information is stored in the protocol specific part of
1784 * the route metrics.
1785 */
1786 struct rmxp_tao *
1787 tcp_gettaocache(inp)
1788 struct inpcb *inp;
1789 {
1790 struct rtentry *rt;
1791 struct rmxp_tao *taop;
1792
1793 lck_mtx_lock(rt_mtx);
1794 #if INET6
1795 if ((inp->inp_vflag & INP_IPV6) != 0)
1796 rt = tcp_rtlookup6(inp);
1797 else
1798 #endif /* INET6 */
1799 rt = tcp_rtlookup(inp);
1800
1801 /* Make sure this is a host route and is up. */
1802 if (rt == NULL ||
1803 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) {
1804 lck_mtx_unlock(rt_mtx);
1805 return NULL;
1806 }
1807
1808 taop = rmx_taop(rt->rt_rmx);
1809 lck_mtx_unlock(rt_mtx);
1810 return (taop);
1811 }
1812
1813 /*
1814 * Clear all the TAO cache entries, called from tcp_init.
1815 *
1816 * XXX
1817 * This routine is just an empty one, because we assume that the routing
1818 * routing tables are initialized at the same time when TCP, so there is
1819 * nothing in the cache left over.
1820 */
1821 static void
1822 tcp_cleartaocache()
1823 {
1824 }
1825
1826 int
1827 tcp_lock(so, refcount, lr)
1828 struct socket *so;
1829 int refcount;
1830 int lr;
1831 {
1832 int lr_saved;
1833 if (lr == 0)
1834 lr_saved = (unsigned int) __builtin_return_address(0);
1835 else lr_saved = lr;
1836
1837 if (so->so_pcb) {
1838 lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1839 }
1840 else {
1841 panic("tcp_lock: so=%p NO PCB! lr=%x\n", so, lr_saved);
1842 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
1843 }
1844
1845 if (so->so_usecount < 0)
1846 panic("tcp_lock: so=%p so_pcb=%p lr=%x ref=%x\n",
1847 so, so->so_pcb, lr_saved, so->so_usecount);
1848
1849 if (refcount)
1850 so->so_usecount++;
1851 so->lock_lr[so->next_lock_lr] = (u_int32_t)lr_saved;
1852 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
1853 return (0);
1854 }
1855
1856 int
1857 tcp_unlock(so, refcount, lr)
1858 struct socket *so;
1859 int refcount;
1860 int lr;
1861 {
1862 int lr_saved;
1863 if (lr == 0)
1864 lr_saved = (unsigned int) __builtin_return_address(0);
1865 else lr_saved = lr;
1866
1867 #ifdef MORE_TCPLOCK_DEBUG
1868 printf("tcp_unlock: so=%p sopcb=%x lock=%x ref=%x lr=%x\n",
1869 so, so->so_pcb, ((struct inpcb *)so->so_pcb)->inpcb_mtx, so->so_usecount, lr_saved);
1870 #endif
1871 if (refcount)
1872 so->so_usecount--;
1873
1874 if (so->so_usecount < 0)
1875 panic("tcp_unlock: so=%p usecount=%x\n", so, so->so_usecount);
1876 if (so->so_pcb == NULL)
1877 panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
1878 else {
1879 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1880 so->unlock_lr[so->next_unlock_lr] = (u_int32_t)lr_saved;
1881 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1882 lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1883 }
1884 return (0);
1885 }
1886
1887 lck_mtx_t *
1888 tcp_getlock(
1889 struct socket *so,
1890 __unused int locktype)
1891 {
1892 struct inpcb *inp = sotoinpcb(so);
1893
1894 if (so->so_pcb) {
1895 if (so->so_usecount < 0)
1896 panic("tcp_getlock: so=%p usecount=%x\n", so, so->so_usecount);
1897 return(inp->inpcb_mtx);
1898 }
1899 else {
1900 panic("tcp_getlock: so=%p NULL so_pcb\n", so);
1901 return (so->so_proto->pr_domain->dom_mtx);
1902 }
1903 }
1904 long
1905 tcp_sbspace(struct tcpcb *tp)
1906 {
1907 struct sockbuf *sb = &tp->t_inpcb->inp_socket->so_rcv;
1908 long space, newspace;
1909
1910 space = ((long) lmin((sb->sb_hiwat - sb->sb_cc),
1911 (sb->sb_mbmax - sb->sb_mbcnt)));
1912
1913 #if TRAFFIC_MGT
1914 if (tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND) {
1915 if (tcp_background_io_enabled &&
1916 tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_SUPPRESSED) {
1917 tp->t_flags |= TF_RXWIN0SENT;
1918 return 0; /* Triggers TCP window closing by responding there is no space */
1919 }
1920 }
1921 #endif /* TRAFFIC_MGT */
1922
1923 /* Avoid inscreasing window size if the current window
1924 * is already very low, we could be in "persist" mode and
1925 * we could break some apps (see rdar://5409343)
1926 */
1927
1928 if (space < tp->t_maxseg)
1929 return space;
1930
1931 /* Clip window size for slower link */
1932
1933 if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0 )
1934 return lmin(space, slowlink_wsize);
1935
1936 /*
1937 * Check for ressources constraints before over-ajusting the amount of space we can
1938 * advertise in the TCP window size updates.
1939 */
1940
1941 if (sbspace_factor && (tp->t_inpcb->inp_pcbinfo->ipi_count < tcp_sockthreshold) &&
1942 (total_mb_cnt / 8) < (mbstat.m_clusters / sbspace_factor)) {
1943 if (space < (long)(sb->sb_maxused - sb->sb_cc)) {/* make sure we don't constrain the window if we have enough ressources */
1944 space = (long) lmax((sb->sb_maxused - sb->sb_cc), tp->rcv_maxbyps);
1945 }
1946 newspace = (long) lmax(((long)sb->sb_maxused - sb->sb_cc), (long)tp->rcv_maxbyps);
1947
1948 if (newspace > space)
1949 space = newspace;
1950 }
1951 return space;
1952 }
1953 /* DSEP Review Done pl-20051213-v02 @3253,@3391,@3400 */