]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/udp_usrreq.c
e5462393d0df2948afe2af3d2bbd5e46c3e042c0
[apple/xnu.git] / bsd / netinet / udp_usrreq.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
61 * $FreeBSD: src/sys/netinet/udp_usrreq.c,v 1.64.2.13 2001/08/08 18:59:54 ghelmer Exp $
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/domain.h>
70 #include <sys/protosw.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/sysctl.h>
74 #include <sys/syslog.h>
75 #include <sys/mcache.h>
76 #include <net/ntstat.h>
77
78 #include <kern/zalloc.h>
79
80 #include <net/if.h>
81 #include <net/if_types.h>
82 #include <net/route.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #if INET6
88 #include <netinet/ip6.h>
89 #endif
90 #include <netinet/in_pcb.h>
91 #include <netinet/in_var.h>
92 #include <netinet/ip_var.h>
93 #if INET6
94 #include <netinet6/in6_pcb.h>
95 #include <netinet6/ip6_var.h>
96 #endif
97 #include <netinet/ip_icmp.h>
98 #include <netinet/icmp_var.h>
99 #include <netinet/udp.h>
100 #include <netinet/udp_var.h>
101 #include <sys/kdebug.h>
102
103 #if IPSEC
104 #include <netinet6/ipsec.h>
105 #include <netinet6/esp.h>
106 extern int ipsec_bypass;
107 #endif /*IPSEC*/
108
109
110 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0)
111 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2)
112 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1)
113 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3)
114 #define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8))
115 #define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1)
116
117 /*
118 * UDP protocol implementation.
119 * Per RFC 768, August, 1980.
120 */
121 #ifndef COMPAT_42
122 static int udpcksum = 1;
123 #else
124 static int udpcksum = 0; /* XXX */
125 #endif
126 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW | CTLFLAG_LOCKED,
127 &udpcksum, 0, "");
128
129 static u_int32_t udps_in_sw_cksum;
130 SYSCTL_UINT(_net_inet_udp, OID_AUTO, in_sw_cksum, CTLFLAG_RD | CTLFLAG_LOCKED,
131 &udps_in_sw_cksum, 0,
132 "Number of received packets checksummed in software");
133
134 static u_int64_t udps_in_sw_cksum_bytes;
135 SYSCTL_QUAD(_net_inet_udp, OID_AUTO, in_sw_cksum_bytes, CTLFLAG_RD | CTLFLAG_LOCKED,
136 &udps_in_sw_cksum_bytes,
137 "Amount of received data checksummed in software");
138
139 static u_int32_t udps_out_sw_cksum;
140 SYSCTL_UINT(_net_inet_udp, OID_AUTO, out_sw_cksum, CTLFLAG_RD | CTLFLAG_LOCKED,
141 &udps_out_sw_cksum, 0,
142 "Number of transmitted packets checksummed in software");
143
144 static u_int64_t udps_out_sw_cksum_bytes;
145 SYSCTL_QUAD(_net_inet_udp, OID_AUTO, out_sw_cksum_bytes, CTLFLAG_RD | CTLFLAG_LOCKED,
146 &udps_out_sw_cksum_bytes,
147 "Amount of transmitted data checksummed in software");
148
149 int log_in_vain = 0;
150 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED,
151 &log_in_vain, 0, "Log all incoming UDP packets");
152
153 static int blackhole = 0;
154 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED,
155 &blackhole, 0, "Do not send port unreachables for refused connects");
156
157 struct inpcbhead udb; /* from udp_var.h */
158 #define udb6 udb /* for KAME src sync over BSD*'s */
159 struct inpcbinfo udbinfo;
160
161 #ifndef UDBHASHSIZE
162 #define UDBHASHSIZE 16
163 #endif
164
165 extern int esp_udp_encap_port;
166
167 extern void ipfwsyslog( int level, const char *format,...);
168
169 extern int fw_verbose;
170 static int udp_gc_done = FALSE; /* Garbage collection performed last slowtimo */
171
172 #if IPFIREWALL
173 #define log_in_vain_log( a ) { \
174 if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \
175 ipfwsyslog a ; \
176 } \
177 else log a ; \
178 }
179 #else
180 #define log_in_vain_log( a ) { log a; }
181 #endif
182
183 struct udpstat udpstat; /* from udp_var.h */
184 SYSCTL_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
185 &udpstat, udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
186 SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
187 &udbinfo.ipi_count, 0, "Number of active PCBs");
188
189 __private_extern__ int udp_use_randomport = 1;
190 SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
191 &udp_use_randomport, 0, "Randomize UDP port numbers");
192
193 #if INET6
194 struct udp_in6 {
195 struct sockaddr_in6 uin6_sin;
196 u_char uin6_init_done : 1;
197 };
198 struct udp_ip6 {
199 struct ip6_hdr uip6_ip6;
200 u_char uip6_init_done : 1;
201 };
202 static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
203 static void udp_append(struct inpcb *last, struct ip *ip,
204 struct mbuf *n, int off, struct sockaddr_in *pudp_in,
205 struct udp_in6 *pudp_in6, struct udp_ip6 *pudp_ip6);
206 #else
207 static void udp_append(struct inpcb *last, struct ip *ip,
208 struct mbuf *n, int off, struct sockaddr_in *pudp_in);
209 #endif
210
211 static int udp_detach(struct socket *so);
212 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
213 struct mbuf *, struct proc *);
214 extern int ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr );
215
216 void
217 udp_init()
218 {
219 vm_size_t str_size;
220 struct inpcbinfo *pcbinfo;
221
222
223 LIST_INIT(&udb);
224 udbinfo.listhead = &udb;
225 udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask);
226 udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB,
227 &udbinfo.porthashmask);
228 #ifdef __APPLE__
229 str_size = (vm_size_t) sizeof(struct inpcb);
230 udbinfo.ipi_zone = (void *) zinit(str_size, 80000*str_size, 8192, "udpcb");
231
232 pcbinfo = &udbinfo;
233 /*
234 * allocate lock group attribute and group for udp pcb mutexes
235 */
236 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
237
238 pcbinfo->mtx_grp = lck_grp_alloc_init("udppcb", pcbinfo->mtx_grp_attr);
239
240 pcbinfo->mtx_attr = lck_attr_alloc_init();
241
242 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL)
243 return; /* pretty much dead if this fails... */
244 #else
245 udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets,
246 ZONE_INTERRUPT, 0);
247 #endif
248 }
249
250 void
251 udp_input(m, iphlen)
252 register struct mbuf *m;
253 int iphlen;
254 {
255 register struct ip *ip;
256 register struct udphdr *uh;
257 register struct inpcb *inp;
258 struct mbuf *opts = 0;
259 int len, isbroadcast;
260 struct ip save_ip;
261 struct sockaddr *append_sa;
262 struct inpcbinfo *pcbinfo = &udbinfo;
263 struct sockaddr_in udp_in = {
264 sizeof (udp_in), AF_INET, 0, { 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }
265 };
266 struct ip_moptions *imo = NULL;
267 int foundmembership = 0, ret = 0;
268 #if INET6
269 struct udp_in6 udp_in6 = {
270 { sizeof (udp_in6.uin6_sin), AF_INET6, 0, 0,
271 IN6ADDR_ANY_INIT, 0 },
272 0
273 };
274 struct udp_ip6 udp_ip6;
275 #endif /* INET6 */
276 struct ifnet *ifp = (m->m_pkthdr.rcvif != NULL) ? m->m_pkthdr.rcvif: NULL;
277
278 udpstat.udps_ipackets++;
279
280 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0,0,0,0,0);
281 if (m->m_pkthdr.csum_flags & CSUM_TCP_SUM16)
282 m->m_pkthdr.csum_flags = 0; /* invalidate hwcksum for UDP */
283
284 /* Expect 32-bit aligned data pointer on strict-align platforms */
285 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
286
287 /*
288 * Strip IP options, if any; should skip this,
289 * make available to user, and use on returned packets,
290 * but we don't yet have a way to check the checksum
291 * with options still present.
292 */
293 if (iphlen > sizeof (struct ip)) {
294 ip_stripoptions(m, (struct mbuf *)0);
295 iphlen = sizeof(struct ip);
296 }
297
298 /*
299 * Get IP and UDP header together in first mbuf.
300 */
301 ip = mtod(m, struct ip *);
302 if (m->m_len < iphlen + sizeof(struct udphdr)) {
303 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
304 udpstat.udps_hdrops++;
305 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
306 return;
307 }
308 ip = mtod(m, struct ip *);
309 }
310 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
311
312 /* destination port of 0 is illegal, based on RFC768. */
313 if (uh->uh_dport == 0) {
314
315 if (ifp->if_udp_stat != NULL)
316 atomic_add_64(&ifp->if_udp_stat->port0, 1);
317
318 goto bad;
319 }
320
321 KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
322 ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
323
324 /*
325 * Make mbuf data length reflect UDP length.
326 * If not enough data to reflect UDP length, drop.
327 */
328 len = ntohs((u_short)uh->uh_ulen);
329 if (ip->ip_len != len) {
330 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
331 udpstat.udps_badlen++;
332
333 if (ifp->if_udp_stat != NULL)
334 atomic_add_64(&ifp->if_udp_stat->badlength, 1);
335
336 goto bad;
337 }
338 m_adj(m, len - ip->ip_len);
339 /* ip->ip_len = len; */
340 }
341 /*
342 * Save a copy of the IP header in case we want restore it
343 * for sending an ICMP error message in response.
344 */
345 save_ip = *ip;
346
347 /*
348 * Checksum extended UDP header and data.
349 */
350 if (uh->uh_sum) {
351 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
352 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
353 uh->uh_sum = m->m_pkthdr.csum_data;
354 else
355 goto doudpcksum;
356 uh->uh_sum ^= 0xffff;
357 } else {
358 char b[9];
359 doudpcksum:
360 bcopy(((struct ipovly *)ip)->ih_x1, b,
361 sizeof (((struct ipovly *)ip)->ih_x1));
362 bzero(((struct ipovly *)ip)->ih_x1,
363 sizeof (((struct ipovly *)ip)->ih_x1));
364 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
365 uh->uh_sum = in_cksum(m, len + sizeof (struct ip));
366 bcopy(b, ((struct ipovly *)ip)->ih_x1,
367 sizeof (((struct ipovly *)ip)->ih_x1));
368
369 udp_in_cksum_stats(len);
370 }
371 if (uh->uh_sum) {
372 udpstat.udps_badsum++;
373
374 if (ifp->if_udp_stat != NULL)
375 atomic_add_64(&ifp->if_udp_stat->badchksum, 1);
376
377 m_freem(m);
378 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
379 return;
380 }
381 }
382 #ifndef __APPLE__
383 else
384 udpstat.udps_nosum++;
385 #endif
386
387 isbroadcast = in_broadcast(ip->ip_dst, ifp);
388
389 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || isbroadcast) {
390
391 int reuse_sock = 0, mcast_delivered = 0;
392
393 lck_rw_lock_shared(pcbinfo->mtx);
394 /*
395 * Deliver a multicast or broadcast datagram to *all* sockets
396 * for which the local and remote addresses and ports match
397 * those of the incoming datagram. This allows more than
398 * one process to receive multi/broadcasts on the same port.
399 * (This really ought to be done for unicast datagrams as
400 * well, but that would cause problems with existing
401 * applications that open both address-specific sockets and
402 * a wildcard socket listening to the same port -- they would
403 * end up receiving duplicates of every unicast datagram.
404 * Those applications open the multiple sockets to overcome an
405 * inadequacy of the UDP socket interface, but for backwards
406 * compatibility we avoid the problem here rather than
407 * fixing the interface. Maybe 4.5BSD will remedy this?)
408 */
409
410
411 /*
412 * Construct sockaddr format source address.
413 */
414 udp_in.sin_port = uh->uh_sport;
415 udp_in.sin_addr = ip->ip_src;
416 /*
417 * Locate pcb(s) for datagram.
418 * (Algorithm copied from raw_intr().)
419 */
420 #if INET6
421 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
422 #endif
423 LIST_FOREACH(inp, &udb, inp_list) {
424 if (inp->inp_socket == NULL)
425 continue;
426 if (inp != sotoinpcb(inp->inp_socket))
427 panic("udp_input: bad so back ptr inp=%p\n", inp);
428 #if INET6
429 if ((inp->inp_vflag & INP_IPV4) == 0)
430 continue;
431 #endif
432 if (ip_restrictrecvif && ifp != NULL &&
433 (ifp->if_eflags & IFEF_RESTRICTED_RECV) &&
434 !(inp->inp_flags & INP_RECV_ANYIF))
435 continue;
436
437 if ((inp->inp_moptions == NULL) &&
438 (ntohl(ip->ip_dst.s_addr) != INADDR_ALLHOSTS_GROUP) &&
439 (isbroadcast == 0) )
440 continue;
441
442
443 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
444 continue;
445 }
446
447 udp_lock(inp->inp_socket, 1, 0);
448
449 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
450 udp_unlock(inp->inp_socket, 1, 0);
451 continue;
452 }
453
454 if (inp->inp_lport != uh->uh_dport) {
455 udp_unlock(inp->inp_socket, 1, 0);
456 continue;
457 }
458 if (inp->inp_laddr.s_addr != INADDR_ANY) {
459 if (inp->inp_laddr.s_addr !=
460 ip->ip_dst.s_addr) {
461 udp_unlock(inp->inp_socket, 1, 0);
462 continue;
463 }
464 }
465 if (inp->inp_faddr.s_addr != INADDR_ANY) {
466 if (inp->inp_faddr.s_addr !=
467 ip->ip_src.s_addr ||
468 inp->inp_fport != uh->uh_sport) {
469 udp_unlock(inp->inp_socket, 1, 0);
470 continue;
471 }
472 }
473
474 if (isbroadcast == 0 && (ntohl(ip->ip_dst.s_addr) != INADDR_ALLHOSTS_GROUP)) {
475 if((imo = inp->inp_moptions) == NULL) {
476 udp_unlock(inp->inp_socket, 1, 0);
477 continue;
478 } else {
479 struct sockaddr_in group;
480 int blocked;
481
482 IMO_LOCK(imo);
483
484 bzero(&group, sizeof(struct sockaddr_in));
485 group.sin_len = sizeof(struct sockaddr_in);
486 group.sin_family = AF_INET;
487 group.sin_addr = ip->ip_dst;
488
489 blocked = imo_multi_filter(imo, ifp,
490 (struct sockaddr *)&group,
491 (struct sockaddr *)&udp_in);
492 if (blocked == MCAST_PASS)
493 foundmembership = 1;
494
495 IMO_UNLOCK(imo);
496 if (!foundmembership) {
497 udp_unlock(inp->inp_socket, 1, 0);
498 continue;
499 }
500 foundmembership = 0;
501 }
502 }
503 reuse_sock = inp->inp_socket->so_options& (SO_REUSEPORT|SO_REUSEADDR);
504 {
505 #if IPSEC
506 int skipit = 0;
507 /* check AH/ESP integrity. */
508 if (ipsec_bypass == 0) {
509 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
510 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
511 /* do not inject data to pcb */
512 skipit = 1;
513 }
514 }
515 if (skipit == 0)
516 #endif /*IPSEC*/
517 {
518 struct mbuf *n = NULL;
519
520 if (reuse_sock)
521 n = m_copy(m, 0, M_COPYALL);
522 #if INET6
523 udp_append(inp, ip, m,
524 iphlen + sizeof(struct udphdr),
525 &udp_in, &udp_in6, &udp_ip6);
526 #else
527 udp_append(inp, ip, m,
528 iphlen + sizeof(struct udphdr),
529 &udp_in);
530 #endif /* INET6 */
531 mcast_delivered++;
532
533 m = n;
534 }
535 udp_unlock(inp->inp_socket, 1, 0);
536 }
537 /*
538 * Don't look for additional matches if this one does
539 * not have either the SO_REUSEPORT or SO_REUSEADDR
540 * socket options set. This heuristic avoids searching
541 * through all pcbs in the common case of a non-shared
542 * port. It assumes that an application will never
543 * clear these options after setting them.
544 */
545 if (reuse_sock == 0 || m == NULL)
546 break;
547
548 /*
549 * Expect 32-bit aligned data pointer on strict-align
550 * platforms.
551 */
552 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
553 /*
554 * Recompute IP and UDP header pointers for new mbuf
555 */
556 ip = mtod(m, struct ip *);
557 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
558 }
559 lck_rw_done(pcbinfo->mtx);
560
561 if (mcast_delivered == 0) {
562 /*
563 * No matching pcb found; discard datagram.
564 * (No need to send an ICMP Port Unreachable
565 * for a broadcast or multicast datgram.)
566 */
567 udpstat.udps_noportbcast++;
568
569 if (ifp->if_udp_stat != NULL)
570 atomic_add_64(&ifp->if_udp_stat->port_unreach, 1);
571
572 goto bad;
573 }
574
575 if (m != NULL) /* free the extra copy of mbuf or skipped by IPSec */
576 m_freem(m);
577 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
578 return;
579 }
580
581 #if IPSEC
582 /*
583 * UDP to port 4500 with a payload where the first four bytes are
584 * not zero is a UDP encapsulated IPSec packet. Packets where
585 * the payload is one byte and that byte is 0xFF are NAT keepalive
586 * packets. Decapsulate the ESP packet and carry on with IPSec input
587 * or discard the NAT keep-alive.
588 */
589 if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 &&
590 uh->uh_dport == ntohs((u_short)esp_udp_encap_port)) {
591 int payload_len = len - sizeof(struct udphdr) > 4 ? 4 : len - sizeof(struct udphdr);
592 if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) {
593 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) + payload_len)) == 0) {
594 udpstat.udps_hdrops++;
595 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
596 return;
597 }
598 /*
599 * Expect 32-bit aligned data pointer on strict-align
600 * platforms.
601 */
602 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
603
604 ip = mtod(m, struct ip *);
605 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
606 }
607 /* Check for NAT keepalive packet */
608 if (payload_len == 1 && *(u_int8_t*)((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
609 m_freem(m);
610 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
611 return;
612 }
613 else if (payload_len == 4 && *(u_int32_t*)(void *)((caddr_t)uh + sizeof(struct udphdr)) != 0) {
614 /* UDP encapsulated IPSec packet to pass through NAT */
615 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
616 /* preserve the udp header */
617 esp4_input(m, iphlen + sizeof(struct udphdr));
618 return;
619 }
620 }
621 #endif
622
623 /*
624 * Locate pcb for datagram.
625 */
626 inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
627 ip->ip_dst, uh->uh_dport, 1, ifp);
628 if (inp == NULL) {
629
630 if (ifp->if_udp_stat != NULL)
631 atomic_add_64(&ifp->if_udp_stat->port_unreach, 1);
632
633 if (log_in_vain) {
634 char buf[MAX_IPv4_STR_LEN];
635 char buf2[MAX_IPv4_STR_LEN];
636
637 /* check src and dst address */
638 if (log_in_vain != 3)
639 log(LOG_INFO,
640 "Connection attempt to UDP %s:%d from %s:%d\n",
641 inet_ntop(AF_INET, &ip->ip_dst, buf, sizeof(buf)),
642 ntohs(uh->uh_dport),
643 inet_ntop(AF_INET, &ip->ip_src, buf2, sizeof(buf2)),
644 ntohs(uh->uh_sport));
645 else if (!(m->m_flags & (M_BCAST | M_MCAST)) &&
646 ip->ip_dst.s_addr != ip->ip_src.s_addr)
647 log_in_vain_log((LOG_INFO,
648 "Stealth Mode connection attempt to UDP %s:%d from %s:%d\n",
649 inet_ntop(AF_INET, &ip->ip_dst, buf, sizeof(buf)),
650 ntohs(uh->uh_dport),
651 inet_ntop(AF_INET, &ip->ip_src, buf2, sizeof(buf2)),
652 ntohs(uh->uh_sport)))
653 }
654 udpstat.udps_noport++;
655 if (m->m_flags & (M_BCAST | M_MCAST)) {
656 udpstat.udps_noportbcast++;
657 goto bad;
658 }
659 #if ICMP_BANDLIM
660 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
661 goto bad;
662 #endif
663 if (blackhole)
664 if (ifp && ifp->if_type != IFT_LOOP)
665 goto bad;
666 *ip = save_ip;
667 ip->ip_len += iphlen;
668 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
669 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
670 return;
671 }
672 udp_lock(inp->inp_socket, 1, 0);
673
674 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
675 udp_unlock(inp->inp_socket, 1, 0);
676
677 if (ifp->if_udp_stat != NULL)
678 atomic_add_64(&ifp->if_udp_stat->cleanup, 1);
679
680 goto bad;
681 }
682 #if IPSEC
683 if (ipsec_bypass == 0 && inp != NULL) {
684 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
685 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
686 udp_unlock(inp->inp_socket, 1, 0);
687
688 if (ifp->if_udp_stat != NULL)
689 atomic_add_64(&ifp->if_udp_stat->badipsec, 1);
690
691 goto bad;
692 }
693 }
694 #endif /*IPSEC*/
695
696 /*
697 * Construct sockaddr format source address.
698 * Stuff source address and datagram in user buffer.
699 */
700 udp_in.sin_port = uh->uh_sport;
701 udp_in.sin_addr = ip->ip_src;
702 if ((inp->inp_flags & INP_CONTROLOPTS) != 0
703 || (inp->inp_socket->so_options & SO_TIMESTAMP) != 0
704 || (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
705 #if INET6
706 if (inp->inp_vflag & INP_IPV6) {
707 int savedflags;
708
709 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
710 savedflags = inp->inp_flags;
711 inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
712 ret = ip6_savecontrol(inp, m, &opts);
713 inp->inp_flags = savedflags;
714 } else
715 #endif
716 {
717 ret = ip_savecontrol(inp, &opts, ip, m);
718 }
719 if (ret != 0) {
720 udp_unlock(inp->inp_socket, 1, 0);
721 goto bad;
722 }
723 }
724 m_adj(m, iphlen + sizeof(struct udphdr));
725
726 KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport,
727 save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen);
728
729 #if INET6
730 if (inp->inp_vflag & INP_IPV6) {
731 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
732 append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
733 } else
734 #endif
735 append_sa = (struct sockaddr *)&udp_in;
736 if (nstat_collect) {
737 locked_add_64(&inp->inp_stat->rxpackets, 1);
738 locked_add_64(&inp->inp_stat->rxbytes, m->m_pkthdr.len);
739 }
740 so_recv_data_stat(inp->inp_socket, m, 0);
741 if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa, m, opts, NULL) == 0) {
742 udpstat.udps_fullsock++;
743 } else {
744 sorwakeup(inp->inp_socket);
745 }
746 udp_unlock(inp->inp_socket, 1, 0);
747 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
748 return;
749 bad:
750 m_freem(m);
751 if (opts)
752 m_freem(opts);
753 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
754 return;
755 }
756
757 #if INET6
758 static void
759 ip_2_ip6_hdr(ip6, ip)
760 struct ip6_hdr *ip6;
761 struct ip *ip;
762 {
763 bzero(ip6, sizeof(*ip6));
764
765 ip6->ip6_vfc = IPV6_VERSION;
766 ip6->ip6_plen = ip->ip_len;
767 ip6->ip6_nxt = ip->ip_p;
768 ip6->ip6_hlim = ip->ip_ttl;
769 if (ip->ip_src.s_addr) {
770 ip6->ip6_src.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
771 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
772 }
773 if (ip->ip_dst.s_addr) {
774 ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
775 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
776 }
777 }
778 #endif
779
780 /*
781 * subroutine of udp_input(), mainly for source code readability.
782 */
783 static void
784 #if INET6
785 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
786 struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6,
787 struct udp_ip6 *pudp_ip6)
788 #else
789 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
790 struct sockaddr_in *pudp_in)
791 #endif
792 {
793 struct sockaddr *append_sa;
794 struct mbuf *opts = 0;
795 int ret = 0;
796
797 #if CONFIG_MACF_NET
798 if (mac_inpcb_check_deliver(last, n, AF_INET, SOCK_DGRAM) != 0) {
799 m_freem(n);
800 return;
801 }
802 #endif
803 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
804 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
805 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
806 #if INET6
807 if (last->inp_vflag & INP_IPV6) {
808 int savedflags;
809
810 if (pudp_ip6->uip6_init_done == 0) {
811 ip_2_ip6_hdr(&pudp_ip6->uip6_ip6, ip);
812 pudp_ip6->uip6_init_done = 1;
813 }
814 savedflags = last->inp_flags;
815 last->inp_flags &= ~INP_UNMAPPABLEOPTS;
816 ret = ip6_savecontrol(last, n, &opts);
817 if (ret != 0) {
818 last->inp_flags = savedflags;
819 goto error;
820 }
821 last->inp_flags = savedflags;
822 } else
823 #endif
824 {
825 ret = ip_savecontrol(last, &opts, ip, n);
826 if (ret != 0) {
827 goto error;
828 }
829 }
830 }
831 #if INET6
832 if (last->inp_vflag & INP_IPV6) {
833 if (pudp_in6->uin6_init_done == 0) {
834 in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin);
835 pudp_in6->uin6_init_done = 1;
836 }
837 append_sa = (struct sockaddr *)&pudp_in6->uin6_sin;
838 } else
839 #endif
840 append_sa = (struct sockaddr *)pudp_in;
841 if (nstat_collect) {
842 locked_add_64(&last->inp_stat->rxpackets, 1);
843 locked_add_64(&last->inp_stat->rxbytes, n->m_pkthdr.len);
844 }
845 so_recv_data_stat(last->inp_socket, n, 0);
846 m_adj(n, off);
847 if (sbappendaddr(&last->inp_socket->so_rcv, append_sa, n, opts, NULL) == 0) {
848 udpstat.udps_fullsock++;
849 } else {
850 sorwakeup(last->inp_socket);
851 }
852 return;
853 error:
854 m_freem(n);
855 m_freem(opts);
856 return;
857 }
858
859 /*
860 * Notify a udp user of an asynchronous error;
861 * just wake up so that he can collect error status.
862 */
863 void
864 udp_notify(inp, errno)
865 register struct inpcb *inp;
866 int errno;
867 {
868 inp->inp_socket->so_error = errno;
869 sorwakeup(inp->inp_socket);
870 sowwakeup(inp->inp_socket);
871 }
872
873 void
874 udp_ctlinput(cmd, sa, vip)
875 int cmd;
876 struct sockaddr *sa;
877 void *vip;
878 {
879 struct ip *ip = vip;
880 void (*notify)(struct inpcb *, int) = udp_notify;
881 struct in_addr faddr;
882 struct inpcb *inp;
883
884 faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
885 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
886 return;
887
888 if (PRC_IS_REDIRECT(cmd)) {
889 ip = 0;
890 notify = in_rtchange;
891 } else if (cmd == PRC_HOSTDEAD)
892 ip = 0;
893 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
894 return;
895 if (ip) {
896 struct udphdr uh;
897
898 bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof (uh));
899 inp = in_pcblookup_hash(&udbinfo, faddr, uh.uh_dport,
900 ip->ip_src, uh.uh_sport, 0, NULL);
901 if (inp != NULL && inp->inp_socket != NULL) {
902 udp_lock(inp->inp_socket, 1, 0);
903 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
904 WNT_STOPUSING) {
905 udp_unlock(inp->inp_socket, 1, 0);
906 return;
907 }
908 (*notify)(inp, inetctlerrmap[cmd]);
909 udp_unlock(inp->inp_socket, 1, 0);
910 }
911 } else
912 in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
913 }
914
915 int
916 udp_ctloutput(struct socket *so, struct sockopt *sopt)
917 {
918 int error, optval;
919 struct inpcb *inp;
920
921 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
922 if (sopt->sopt_level != IPPROTO_UDP &&
923 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH))
924 return (ip_ctloutput(so, sopt));
925
926 error = 0;
927 inp = sotoinpcb(so);
928
929 switch (sopt->sopt_dir) {
930 case SOPT_SET:
931 switch (sopt->sopt_name) {
932 case UDP_NOCKSUM:
933 /* This option is settable only for UDP over IPv4 */
934 if (!(inp->inp_vflag & INP_IPV4)) {
935 error = EINVAL;
936 break;
937 }
938
939 if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
940 sizeof (optval))) != 0)
941 break;
942
943 if (optval != 0)
944 inp->inp_flags |= INP_UDP_NOCKSUM;
945 else
946 inp->inp_flags &= ~INP_UDP_NOCKSUM;
947 break;
948
949 case SO_FLUSH:
950 if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
951 sizeof (optval))) != 0)
952 break;
953
954 error = inp_flush(inp, optval);
955 break;
956
957 default:
958 error = ENOPROTOOPT;
959 break;
960 }
961 break;
962
963 case SOPT_GET:
964 switch (sopt->sopt_name) {
965 case UDP_NOCKSUM:
966 optval = inp->inp_flags & INP_UDP_NOCKSUM;
967 break;
968
969 default:
970 error = ENOPROTOOPT;
971 break;
972 }
973 if (error == 0)
974 error = sooptcopyout(sopt, &optval, sizeof (optval));
975 break;
976 }
977 return (error);
978 }
979
980 static int
981 udp_pcblist SYSCTL_HANDLER_ARGS
982 {
983 #pragma unused(oidp, arg1, arg2)
984 int error, i, n;
985 struct inpcb *inp, **inp_list;
986 inp_gen_t gencnt;
987 struct xinpgen xig;
988
989 /*
990 * The process of preparing the TCB list is too time-consuming and
991 * resource-intensive to repeat twice on every request.
992 */
993 lck_rw_lock_exclusive(udbinfo.mtx);
994 if (req->oldptr == USER_ADDR_NULL) {
995 n = udbinfo.ipi_count;
996 req->oldidx = 2 * (sizeof xig)
997 + (n + n/8) * sizeof(struct xinpcb);
998 lck_rw_done(udbinfo.mtx);
999 return 0;
1000 }
1001
1002 if (req->newptr != USER_ADDR_NULL) {
1003 lck_rw_done(udbinfo.mtx);
1004 return EPERM;
1005 }
1006
1007 /*
1008 * OK, now we're committed to doing something.
1009 */
1010 gencnt = udbinfo.ipi_gencnt;
1011 n = udbinfo.ipi_count;
1012
1013 bzero(&xig, sizeof(xig));
1014 xig.xig_len = sizeof xig;
1015 xig.xig_count = n;
1016 xig.xig_gen = gencnt;
1017 xig.xig_sogen = so_gencnt;
1018 error = SYSCTL_OUT(req, &xig, sizeof xig);
1019 if (error) {
1020 lck_rw_done(udbinfo.mtx);
1021 return error;
1022 }
1023 /*
1024 * We are done if there is no pcb
1025 */
1026 if (n == 0) {
1027 lck_rw_done(udbinfo.mtx);
1028 return 0;
1029 }
1030
1031 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1032 if (inp_list == 0) {
1033 lck_rw_done(udbinfo.mtx);
1034 return ENOMEM;
1035 }
1036
1037 for (inp = LIST_FIRST(udbinfo.listhead), i = 0; inp && i < n;
1038 inp = LIST_NEXT(inp, inp_list)) {
1039 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1040 inp_list[i++] = inp;
1041 }
1042 n = i;
1043
1044 error = 0;
1045 for (i = 0; i < n; i++) {
1046 inp = inp_list[i];
1047 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1048 struct xinpcb xi;
1049
1050 bzero(&xi, sizeof(xi));
1051 xi.xi_len = sizeof xi;
1052 /* XXX should avoid extra copy */
1053 inpcb_to_compat(inp, &xi.xi_inp);
1054 if (inp->inp_socket)
1055 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1056 error = SYSCTL_OUT(req, &xi, sizeof xi);
1057 }
1058 }
1059 if (!error) {
1060 /*
1061 * Give the user an updated idea of our state.
1062 * If the generation differs from what we told
1063 * her before, she knows that something happened
1064 * while we were processing this request, and it
1065 * might be necessary to retry.
1066 */
1067 bzero(&xig, sizeof(xig));
1068 xig.xig_len = sizeof xig;
1069 xig.xig_gen = udbinfo.ipi_gencnt;
1070 xig.xig_sogen = so_gencnt;
1071 xig.xig_count = udbinfo.ipi_count;
1072 error = SYSCTL_OUT(req, &xig, sizeof xig);
1073 }
1074 FREE(inp_list, M_TEMP);
1075 lck_rw_done(udbinfo.mtx);
1076 return error;
1077 }
1078
1079 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1080 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
1081
1082 #if !CONFIG_EMBEDDED
1083
1084 static int
1085 udp_pcblist64 SYSCTL_HANDLER_ARGS
1086 {
1087 #pragma unused(oidp, arg1, arg2)
1088 int error, i, n;
1089 struct inpcb *inp, **inp_list;
1090 inp_gen_t gencnt;
1091 struct xinpgen xig;
1092
1093 /*
1094 * The process of preparing the TCB list is too time-consuming and
1095 * resource-intensive to repeat twice on every request.
1096 */
1097 lck_rw_lock_shared(udbinfo.mtx);
1098 if (req->oldptr == USER_ADDR_NULL) {
1099 n = udbinfo.ipi_count;
1100 req->oldidx = 2 * (sizeof xig)
1101 + (n + n/8) * sizeof(struct xinpcb64);
1102 lck_rw_done(udbinfo.mtx);
1103 return 0;
1104 }
1105
1106 if (req->newptr != USER_ADDR_NULL) {
1107 lck_rw_done(udbinfo.mtx);
1108 return EPERM;
1109 }
1110
1111 /*
1112 * OK, now we're committed to doing something.
1113 */
1114 gencnt = udbinfo.ipi_gencnt;
1115 n = udbinfo.ipi_count;
1116
1117 bzero(&xig, sizeof(xig));
1118 xig.xig_len = sizeof xig;
1119 xig.xig_count = n;
1120 xig.xig_gen = gencnt;
1121 xig.xig_sogen = so_gencnt;
1122 error = SYSCTL_OUT(req, &xig, sizeof xig);
1123 if (error) {
1124 lck_rw_done(udbinfo.mtx);
1125 return error;
1126 }
1127 /*
1128 * We are done if there is no pcb
1129 */
1130 if (n == 0) {
1131 lck_rw_done(udbinfo.mtx);
1132 return 0;
1133 }
1134
1135 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1136 if (inp_list == 0) {
1137 lck_rw_done(udbinfo.mtx);
1138 return ENOMEM;
1139 }
1140
1141 for (inp = LIST_FIRST(udbinfo.listhead), i = 0; inp && i < n;
1142 inp = LIST_NEXT(inp, inp_list)) {
1143 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1144 inp_list[i++] = inp;
1145 }
1146 n = i;
1147
1148 error = 0;
1149 for (i = 0; i < n; i++) {
1150 inp = inp_list[i];
1151 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1152 struct xinpcb64 xi;
1153
1154 bzero(&xi, sizeof(xi));
1155 xi.xi_len = sizeof xi;
1156 inpcb_to_xinpcb64(inp, &xi);
1157 if (inp->inp_socket)
1158 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1159 error = SYSCTL_OUT(req, &xi, sizeof xi);
1160 }
1161 }
1162 if (!error) {
1163 /*
1164 * Give the user an updated idea of our state.
1165 * If the generation differs from what we told
1166 * her before, she knows that something happened
1167 * while we were processing this request, and it
1168 * might be necessary to retry.
1169 */
1170 bzero(&xig, sizeof(xig));
1171 xig.xig_len = sizeof xig;
1172 xig.xig_gen = udbinfo.ipi_gencnt;
1173 xig.xig_sogen = so_gencnt;
1174 xig.xig_count = udbinfo.ipi_count;
1175 error = SYSCTL_OUT(req, &xig, sizeof xig);
1176 }
1177 FREE(inp_list, M_TEMP);
1178 lck_rw_done(udbinfo.mtx);
1179 return error;
1180 }
1181
1182 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1183 udp_pcblist64, "S,xinpcb64", "List of active UDP sockets");
1184
1185 #endif /* !CONFIG_EMBEDDED */
1186
1187 static int
1188 udp_pcblist_n SYSCTL_HANDLER_ARGS
1189 {
1190 #pragma unused(oidp, arg1, arg2)
1191 int error = 0;
1192
1193 error = get_pcblist_n(IPPROTO_UDP, req, &udbinfo);
1194
1195 return error;
1196 }
1197
1198 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist_n, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1199 udp_pcblist_n, "S,xinpcb_n", "List of active UDP sockets");
1200
1201
1202 __private_extern__ void
1203 udp_get_ports_used(unsigned int ifindex, uint8_t *bitfield)
1204 {
1205 inpcb_get_ports_used(ifindex, bitfield, &udbinfo);
1206 }
1207
1208 __private_extern__ uint32_t
1209 udp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
1210 {
1211 return inpcb_count_opportunistic(ifindex, &udbinfo, flags);
1212 }
1213
1214 static __inline__ u_int16_t
1215 get_socket_id(struct socket * s)
1216 {
1217 u_int16_t val;
1218
1219 if (s == NULL) {
1220 return (0);
1221 }
1222 val = (u_int16_t)(((uintptr_t)s) / sizeof(struct socket));
1223 if (val == 0) {
1224 val = 0xffff;
1225 }
1226 return (val);
1227 }
1228
1229 static int
1230 udp_check_pktinfo(struct mbuf *control, struct ifnet **outif, struct in_addr *laddr)
1231 {
1232 struct cmsghdr *cm = 0;
1233 struct in_pktinfo *pktinfo;
1234 struct ifnet *ifp;
1235
1236 /*
1237 * XXX: Currently, we assume all the optional information is stored
1238 * in a single mbuf.
1239 */
1240 if (control->m_next)
1241 return (EINVAL);
1242
1243 if (control->m_len < CMSG_LEN(0))
1244 return (EINVAL);
1245
1246 for (cm = M_FIRST_CMSGHDR(control); cm; cm = M_NXT_CMSGHDR(control, cm)) {
1247 if (cm->cmsg_len < sizeof(struct cmsghdr) || cm->cmsg_len > control->m_len)
1248 return (EINVAL);
1249
1250 if (cm->cmsg_level != IPPROTO_IP || cm->cmsg_type != IP_PKTINFO)
1251 continue;
1252
1253 if (cm->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
1254 return (EINVAL);
1255
1256 pktinfo = (struct in_pktinfo *)(void *)CMSG_DATA(cm);
1257
1258 /* Check for a valid ifindex in pktinfo */
1259 ifnet_head_lock_shared();
1260
1261 if (pktinfo->ipi_ifindex > if_index) {
1262 ifnet_head_done();
1263 return (ENXIO);
1264 }
1265
1266 /* If ipi_ifindex is specified it takes precedence over ipi_spec_dst */
1267
1268 if (pktinfo->ipi_ifindex) {
1269 ifp = ifindex2ifnet[pktinfo->ipi_ifindex];
1270 if (ifp == NULL) {
1271 ifnet_head_done();
1272 return (ENXIO);
1273 }
1274
1275 ifnet_head_done();
1276
1277 if (outif != NULL)
1278 *outif = ifp;
1279 laddr->s_addr = INADDR_ANY;
1280 break;
1281 }
1282
1283 ifnet_head_done();
1284
1285 /* Use the provided ipi_spec_dst address for temp source address */
1286 if (outif != NULL)
1287 *outif = NULL;
1288 *laddr = pktinfo->ipi_spec_dst;
1289 break;
1290 }
1291 return (0);
1292 }
1293
1294 static int
1295 udp_output(inp, m, addr, control, p)
1296 register struct inpcb *inp;
1297 struct mbuf *m;
1298 struct sockaddr *addr;
1299 struct mbuf *control;
1300 struct proc *p;
1301 {
1302 register struct udpiphdr *ui;
1303 register int len = m->m_pkthdr.len;
1304 struct sockaddr_in *sin;
1305 struct in_addr origladdr, laddr, faddr, pi_laddr;
1306 u_short lport, fport;
1307 struct sockaddr_in ifaddr;
1308 int error = 0, udp_dodisconnect = 0, pktinfo = 0;
1309 struct socket *so = inp->inp_socket;
1310 int soopts = 0;
1311 struct mbuf *inpopts;
1312 struct ip_moptions *mopts;
1313 struct route ro;
1314 struct ip_out_args ipoa = { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF };
1315 struct ifnet *outif = NULL;
1316 struct flowadv *adv = &ipoa.ipoa_flowadv;
1317 mbuf_svc_class_t msc = MBUF_SC_UNSPEC;
1318 struct ifnet *origoutifp;
1319 int flowadv = 0;
1320
1321 /* Enable flow advisory only when connected */
1322 flowadv = (so->so_state & SS_ISCONNECTED) ? 1 : 0;
1323
1324 pi_laddr.s_addr = INADDR_ANY;
1325
1326 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0);
1327
1328 lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1329 if (control != NULL) {
1330 msc = mbuf_service_class_from_control(control);
1331
1332 error = udp_check_pktinfo(control, &outif, &pi_laddr);
1333
1334 m_freem(control);
1335 if (error)
1336 goto release;
1337 pktinfo++;
1338 if (outif != NULL)
1339 ipoa.ipoa_boundif = outif->if_index;
1340 }
1341
1342 KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
1343 inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
1344 (htons((u_short)len + sizeof (struct udphdr))));
1345
1346 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1347 error = EMSGSIZE;
1348 goto release;
1349 }
1350
1351 if (flowadv && INP_WAIT_FOR_IF_FEEDBACK(inp)) {
1352 /*
1353 * The socket is flow-controlled, drop the packets
1354 * until the inp is not flow controlled
1355 */
1356 error = ENOBUFS;
1357 goto release;
1358 }
1359 /*
1360 * If socket was bound to an ifindex, tell ip_output about it.
1361 * If the ancillary IP_PKTINFO option contains an interface index,
1362 * it takes precedence over the one specified by IP_BOUND_IF.
1363 */
1364 if (ipoa.ipoa_boundif == IFSCOPE_NONE &&
1365 (inp->inp_flags & INP_BOUND_IF)) {
1366 outif = inp->inp_boundifp;
1367 ipoa.ipoa_boundif = outif->if_index;
1368 }
1369 if (inp->inp_flags & INP_NO_IFT_CELLULAR)
1370 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1371 soopts |= IP_OUTARGS;
1372
1373 /* If there was a routing change, discard cached route and check
1374 * that we have a valid source address.
1375 * Reacquire a new source address if INADDR_ANY was specified
1376 */
1377 if (inp->inp_route.ro_rt != NULL &&
1378 inp->inp_route.ro_rt->generation_id != route_generation) {
1379 struct in_ifaddr *ia;
1380
1381 /* src address is gone? */
1382 if ((ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
1383 if (((inp->inp_flags & INP_INADDR_ANY) == 0) ||
1384 (so->so_state & SS_ISCONNECTED)) {
1385 /* Rdar://5448998
1386 * If the source address is gone, return an error if:
1387 * - the source was specified
1388 * - the socket was already connected
1389 */
1390 soevent(so,
1391 (SO_FILT_HINT_LOCKED |
1392 SO_FILT_HINT_NOSRCADDR));
1393 error = EADDRNOTAVAIL;
1394 goto release;
1395 } else {
1396 /* new src will be set later */
1397 inp->inp_laddr.s_addr = INADDR_ANY;
1398 inp->inp_last_outifp = NULL;
1399 }
1400 }
1401 if (ia != NULL)
1402 IFA_REMREF(&ia->ia_ifa);
1403 if (inp->inp_route.ro_rt != NULL)
1404 rtfree(inp->inp_route.ro_rt);
1405 inp->inp_route.ro_rt = NULL;
1406 }
1407
1408 origoutifp = inp->inp_last_outifp;
1409
1410 /* IP_PKTINFO option check.
1411 * If a temporary scope or src address is provided, use it for this packet only
1412 * and make sure we forget it after sending this datagram.
1413 */
1414
1415 if (pi_laddr.s_addr != INADDR_ANY ||
1416 (ipoa.ipoa_boundif != IFSCOPE_NONE && pktinfo)) {
1417 laddr = pi_laddr; /* temp src address for this datagram only */
1418 origladdr.s_addr = INADDR_ANY;
1419 udp_dodisconnect = 1; /* we don't want to keep the laddr or route */
1420 inp->inp_flags |= INP_INADDR_ANY; /* remember we don't care about src addr.*/
1421 } else {
1422 origladdr = laddr = inp->inp_laddr;
1423 }
1424
1425 origoutifp = inp->inp_last_outifp;
1426 faddr = inp->inp_faddr;
1427 lport = inp->inp_lport;
1428 fport = inp->inp_fport;
1429
1430 if (addr) {
1431 sin = (struct sockaddr_in *)(void *)addr;
1432 if (faddr.s_addr != INADDR_ANY) {
1433 error = EISCONN;
1434 goto release;
1435 }
1436 if (lport == 0) {
1437 /*
1438 * In case we don't have a local port set, go through the full connect.
1439 * We don't have a local port yet (ie, we can't be looked up),
1440 * so it's not an issue if the input runs at the same time we do this.
1441 */
1442
1443 if (pi_laddr.s_addr != INADDR_ANY) /* if we have a source address specified, use that */
1444 inp->inp_laddr = pi_laddr;
1445 error = in_pcbconnect(inp, addr, p, &outif); /* if a scope is specified, use it */
1446 if (error) {
1447 goto release;
1448 }
1449 laddr = inp->inp_laddr;
1450 lport = inp->inp_lport;
1451 faddr = inp->inp_faddr;
1452 fport = inp->inp_fport;
1453 udp_dodisconnect = 1;
1454 ipoa.ipoa_boundif = (outif != NULL) ?
1455 outif->if_index : IFSCOPE_NONE;
1456 }
1457 else {
1458 /* Fast path case
1459 * we have a full address and a local port.
1460 * use those info to build the packet without changing the pcb
1461 * and interfering with the input path. See 3851370
1462 * Note: if we may have a scope from IP_PKTINFO but the
1463 * priority is always given to the scope provided by INP_BOUND_IF.
1464 */
1465 if (laddr.s_addr == INADDR_ANY) {
1466 if ((error = in_pcbladdr(inp, addr, &ifaddr, &outif)) != 0)
1467 goto release;
1468 laddr = ifaddr.sin_addr;
1469 inp->inp_flags |= INP_INADDR_ANY; /* from pcbconnect: remember we don't care about src addr.*/
1470 ipoa.ipoa_boundif = (outif != NULL) ?
1471 outif->if_index : IFSCOPE_NONE;
1472 }
1473
1474 faddr = sin->sin_addr;
1475 fport = sin->sin_port;
1476 }
1477 } else {
1478 if (faddr.s_addr == INADDR_ANY) {
1479 error = ENOTCONN;
1480 goto release;
1481 }
1482 }
1483
1484 #if CONFIG_MACF_NET
1485 mac_mbuf_label_associate_inpcb(inp, m);
1486 #endif
1487 if (inp->inp_flowhash == 0)
1488 inp->inp_flowhash = inp_calc_flowhash(inp);
1489
1490 /*
1491 * Calculate data length and get a mbuf
1492 * for UDP and IP headers.
1493 */
1494 M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT);
1495 if (m == 0) {
1496 error = ENOBUFS;
1497 goto abort;
1498 }
1499
1500 /*
1501 * Fill in mbuf with extended UDP header
1502 * and addresses and length put into network format.
1503 */
1504 ui = mtod(m, struct udpiphdr *);
1505 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1506 ui->ui_pr = IPPROTO_UDP;
1507 ui->ui_src = laddr;
1508 ui->ui_dst = faddr;
1509 ui->ui_sport = lport;
1510 ui->ui_dport = fport;
1511 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1512
1513 /*
1514 * Set up checksum and output datagram.
1515 */
1516 if (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM)) {
1517 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
1518 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1519 m->m_pkthdr.csum_flags = CSUM_UDP;
1520 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1521 } else {
1522 ui->ui_sum = 0;
1523 }
1524 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1525 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1526 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1527 udpstat.udps_opackets++;
1528
1529 KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport,
1530 ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen);
1531
1532 #if IPSEC
1533 if (ipsec_bypass == 0 && ipsec_setsocket(m, inp->inp_socket) != 0) {
1534 error = ENOBUFS;
1535 goto abort;
1536 }
1537 #endif /*IPSEC*/
1538
1539 inpopts = inp->inp_options;
1540 soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST));
1541 mopts = inp->inp_moptions;
1542 if (mopts != NULL) {
1543 IMO_LOCK(mopts);
1544 IMO_ADDREF_LOCKED(mopts);
1545 if (IN_MULTICAST(ntohl(ui->ui_dst.s_addr)) &&
1546 mopts->imo_multicast_ifp != NULL) {
1547 inp->inp_last_outifp = mopts->imo_multicast_ifp;
1548 }
1549 IMO_UNLOCK(mopts);
1550 }
1551
1552 /* Copy the cached route and take an extra reference */
1553 inp_route_copyout(inp, &ro);
1554
1555 set_packet_service_class(m, so, msc, 0);
1556 m->m_pkthdr.socket_id = get_socket_id(inp->inp_socket);
1557 m->m_pkthdr.m_flowhash = inp->inp_flowhash;
1558 m->m_pkthdr.m_fhflags |= PF_TAG_FLOWHASH;
1559 if (flowadv)
1560 m->m_pkthdr.m_fhflags |= PF_TAG_FLOWADV;
1561
1562 if (ipoa.ipoa_boundif != IFSCOPE_NONE)
1563 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1564
1565 if (laddr.s_addr != INADDR_ANY)
1566 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
1567
1568 inp->inp_sndinprog_cnt++;
1569
1570 socket_unlock(so, 0);
1571 error = ip_output_list(m, 0, inpopts, &ro, soopts, mopts, &ipoa);
1572 m = NULL;
1573 socket_lock(so, 0);
1574 if (mopts != NULL)
1575 IMO_REMREF(mopts);
1576
1577 if (error == 0 && nstat_collect) {
1578 locked_add_64(&inp->inp_stat->txpackets, 1);
1579 locked_add_64(&inp->inp_stat->txbytes, len);
1580 }
1581
1582 if (flowadv && (adv->code == FADV_FLOW_CONTROLLED ||
1583 adv->code == FADV_SUSPENDED)) {
1584 /* return a hint to the application that
1585 * the packet has been dropped
1586 */
1587 error = ENOBUFS;
1588 inp_set_fc_state(inp, adv->code);
1589 }
1590
1591 VERIFY(inp->inp_sndinprog_cnt > 0);
1592 if ( --inp->inp_sndinprog_cnt == 0)
1593 inp->inp_flags &= ~(INP_FC_FEEDBACK);
1594
1595 /* Synchronize PCB cached route */
1596 inp_route_copyin(inp, &ro);
1597
1598 abort:
1599 if (udp_dodisconnect) {
1600 /* Always discard the cached route for unconnected socket */
1601 if (inp->inp_route.ro_rt != NULL) {
1602 rtfree(inp->inp_route.ro_rt);
1603 inp->inp_route.ro_rt = NULL;
1604 }
1605 in_pcbdisconnect(inp);
1606 inp->inp_laddr = origladdr; /* XXX rehash? */
1607 inp->inp_last_outifp = origoutifp;
1608 } else if (inp->inp_route.ro_rt != NULL) {
1609 struct rtentry *rt = inp->inp_route.ro_rt;
1610 struct ifnet *outifp;
1611
1612 if (rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST))
1613 rt = NULL; /* unusable */
1614 /*
1615 * Always discard if it is a multicast or broadcast route.
1616 */
1617 if (rt == NULL) {
1618 rtfree(inp->inp_route.ro_rt);
1619 inp->inp_route.ro_rt = NULL;
1620 }
1621 /*
1622 * If the destination route is unicast, update outifp with
1623 * that of the route interface used by IP.
1624 */
1625 if (rt != NULL && (outifp = rt->rt_ifp) != inp->inp_last_outifp)
1626 inp->inp_last_outifp = outifp;
1627 }
1628
1629 release:
1630 if (m != NULL)
1631 m_freem(m);
1632 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
1633 return (error);
1634 }
1635
1636 u_int32_t udp_sendspace = 9216; /* really max datagram size */
1637 /* 187 1K datagrams (approx 192 KB) */
1638 u_int32_t udp_recvspace = 187 * (1024 +
1639 #if INET6
1640 sizeof(struct sockaddr_in6)
1641 #else
1642 sizeof(struct sockaddr_in)
1643 #endif
1644 );
1645
1646 /* Check that the values of udp send and recv space do not exceed sb_max */
1647 static int
1648 sysctl_udp_sospace(struct sysctl_oid *oidp, __unused void *arg1,
1649 __unused int arg2, struct sysctl_req *req) {
1650 u_int32_t new_value = 0, *space_p = NULL;
1651 int changed = 0, error = 0;
1652 u_quad_t sb_effective_max = (sb_max/ (MSIZE+MCLBYTES)) * MCLBYTES;
1653
1654 switch (oidp->oid_number) {
1655 case UDPCTL_RECVSPACE:
1656 space_p = &udp_recvspace;
1657 break;
1658 case UDPCTL_MAXDGRAM:
1659 space_p = &udp_sendspace;
1660 break;
1661 default:
1662 return EINVAL;
1663 }
1664 error = sysctl_io_number(req, *space_p, sizeof(u_int32_t),
1665 &new_value, &changed);
1666 if (changed) {
1667 if (new_value > 0 && new_value <= sb_effective_max) {
1668 *space_p = new_value;
1669 } else {
1670 error = ERANGE;
1671 }
1672 }
1673 return error;
1674 }
1675
1676 SYSCTL_PROC(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1677 &udp_recvspace, 0, &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size");
1678
1679 SYSCTL_PROC(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1680 &udp_sendspace, 0, &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size");
1681
1682 static int
1683 udp_abort(struct socket *so)
1684 {
1685 struct inpcb *inp;
1686
1687 inp = sotoinpcb(so);
1688 if (inp == 0)
1689 panic("udp_abort: so=%p null inp\n", so); /* ??? possible? panic instead? */
1690 soisdisconnected(so);
1691 in_pcbdetach(inp);
1692 return 0;
1693 }
1694
1695 static int
1696 udp_attach(struct socket *so, __unused int proto, struct proc *p)
1697 {
1698 struct inpcb *inp;
1699 int error;
1700
1701 inp = sotoinpcb(so);
1702 if (inp != 0)
1703 panic ("udp_attach so=%p inp=%p\n", so, inp);
1704
1705 error = in_pcballoc(so, &udbinfo, p);
1706 if (error)
1707 return error;
1708 error = soreserve(so, udp_sendspace, udp_recvspace);
1709 if (error)
1710 return error;
1711 inp = (struct inpcb *)so->so_pcb;
1712 inp->inp_vflag |= INP_IPV4;
1713 inp->inp_ip_ttl = ip_defttl;
1714 if (nstat_collect)
1715 nstat_udp_new_pcb(inp);
1716 return 0;
1717 }
1718
1719 static int
1720 udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
1721 {
1722 struct inpcb *inp;
1723 int error;
1724
1725 if (nam->sa_family != 0 && nam->sa_family != AF_INET
1726 && nam->sa_family != AF_INET6) {
1727 return EAFNOSUPPORT;
1728 }
1729 inp = sotoinpcb(so);
1730 if (inp == 0)
1731 return EINVAL;
1732 error = in_pcbbind(inp, nam, p);
1733 return error;
1734 }
1735
1736 static int
1737 udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
1738 {
1739 struct inpcb *inp;
1740 int error;
1741
1742 inp = sotoinpcb(so);
1743 if (inp == 0)
1744 return EINVAL;
1745 if (inp->inp_faddr.s_addr != INADDR_ANY)
1746 return EISCONN;
1747 error = in_pcbconnect(inp, nam, p, NULL);
1748 if (error == 0) {
1749 soisconnected(so);
1750 if (inp->inp_flowhash == 0)
1751 inp->inp_flowhash = inp_calc_flowhash(inp);
1752 }
1753 return error;
1754 }
1755
1756 static int
1757 udp_detach(struct socket *so)
1758 {
1759 struct inpcb *inp;
1760
1761 inp = sotoinpcb(so);
1762 if (inp == 0)
1763 panic("udp_detach: so=%p null inp\n", so); /* ??? possible? panic instead? */
1764 in_pcbdetach(inp);
1765 inp->inp_state = INPCB_STATE_DEAD;
1766 return 0;
1767 }
1768
1769 static int
1770 udp_disconnect(struct socket *so)
1771 {
1772 struct inpcb *inp;
1773
1774 inp = sotoinpcb(so);
1775 if (inp == 0)
1776 return EINVAL;
1777 if (inp->inp_faddr.s_addr == INADDR_ANY)
1778 return ENOTCONN;
1779
1780 in_pcbdisconnect(inp);
1781
1782 /* reset flow controlled state, just in case */
1783 inp_reset_fc_state(inp);
1784
1785 inp->inp_laddr.s_addr = INADDR_ANY;
1786 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1787 inp->inp_last_outifp = NULL;
1788 return 0;
1789 }
1790
1791 static int
1792 udp_send(struct socket *so, __unused int flags, struct mbuf *m, struct sockaddr *addr,
1793 struct mbuf *control, struct proc *p)
1794 {
1795 struct inpcb *inp;
1796
1797 inp = sotoinpcb(so);
1798 if (inp == 0) {
1799 m_freem(m);
1800 return EINVAL;
1801 }
1802
1803 return udp_output(inp, m, addr, control, p);
1804 }
1805
1806 int
1807 udp_shutdown(struct socket *so)
1808 {
1809 struct inpcb *inp;
1810
1811 inp = sotoinpcb(so);
1812 if (inp == 0)
1813 return EINVAL;
1814 socantsendmore(so);
1815 return 0;
1816 }
1817
1818 struct pr_usrreqs udp_usrreqs = {
1819 udp_abort, pru_accept_notsupp, udp_attach, udp_bind, udp_connect,
1820 pru_connect2_notsupp, in_control, udp_detach, udp_disconnect,
1821 pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp,
1822 pru_rcvoob_notsupp, udp_send, pru_sense_null, udp_shutdown,
1823 in_setsockaddr, sosend, soreceive, pru_sopoll_notsupp
1824 };
1825
1826
1827 int
1828 udp_lock(struct socket *so, int refcount, void *debug)
1829 {
1830 void *lr_saved;
1831
1832 if (debug == NULL)
1833 lr_saved = __builtin_return_address(0);
1834 else
1835 lr_saved = debug;
1836
1837 if (so->so_pcb) {
1838 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
1839 LCK_MTX_ASSERT_NOTOWNED);
1840 lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
1841 } else {
1842 panic("udp_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1843 so, lr_saved, solockhistory_nr(so));
1844 /* NOTREACHED */
1845 }
1846 if (refcount)
1847 so->so_usecount++;
1848
1849 so->lock_lr[so->next_lock_lr] = lr_saved;
1850 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
1851 return (0);
1852 }
1853
1854 int
1855 udp_unlock(struct socket *so, int refcount, void *debug)
1856 {
1857 void *lr_saved;
1858
1859 if (debug == NULL)
1860 lr_saved = __builtin_return_address(0);
1861 else
1862 lr_saved = debug;
1863
1864 if (refcount)
1865 so->so_usecount--;
1866
1867 if (so->so_pcb == NULL) {
1868 panic("udp_unlock: so=%p NO PCB! lr=%p lrh= %s\n",
1869 so, lr_saved, solockhistory_nr(so));
1870 /* NOTREACHED */
1871 } else {
1872 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
1873 LCK_MTX_ASSERT_OWNED);
1874 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1875 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1876 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
1877 }
1878
1879
1880 return (0);
1881 }
1882
1883 lck_mtx_t *
1884 udp_getlock(struct socket *so, __unused int locktype)
1885 {
1886 struct inpcb *inp = sotoinpcb(so);
1887
1888
1889 if (so->so_pcb)
1890 return(&inp->inpcb_mtx);
1891 else {
1892 panic("udp_getlock: so=%p NULL so_pcb lrh= %s\n",
1893 so, solockhistory_nr(so));
1894 return (so->so_proto->pr_domain->dom_mtx);
1895 }
1896 }
1897
1898 void
1899 udp_slowtimo()
1900 {
1901 struct inpcb *inp, *inpnxt;
1902 struct socket *so;
1903 struct inpcbinfo *pcbinfo = &udbinfo;
1904
1905 if (lck_rw_try_lock_exclusive(pcbinfo->mtx) == FALSE) {
1906 if (udp_gc_done == TRUE) {
1907 udp_gc_done = FALSE;
1908 return; /* couldn't get the lock, better lock next time */
1909 }
1910 lck_rw_lock_exclusive(pcbinfo->mtx);
1911 }
1912
1913 udp_gc_done = TRUE;
1914
1915 for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
1916 inpnxt = inp->inp_list.le_next;
1917
1918 if (inp->inp_wantcnt != WNT_STOPUSING)
1919 continue;
1920
1921 so = inp->inp_socket;
1922 if (!lck_mtx_try_lock(&inp->inpcb_mtx)) /* skip if busy, no hurry for cleanup... */
1923 continue;
1924
1925 if (so->so_usecount == 0) {
1926 if (inp->inp_state != INPCB_STATE_DEAD) {
1927 #if INET6
1928 if (INP_CHECK_SOCKAF(so, AF_INET6))
1929 in6_pcbdetach(inp);
1930 else
1931 #endif /* INET6 */
1932 in_pcbdetach(inp);
1933 }
1934 in_pcbdispose(inp);
1935 } else {
1936 lck_mtx_unlock(&inp->inpcb_mtx);
1937 }
1938 }
1939 lck_rw_done(pcbinfo->mtx);
1940 }
1941
1942 int
1943 ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr )
1944 {
1945 if ( dstaddr == srcaddr ){
1946 return 0;
1947 }
1948 return 1;
1949 }
1950
1951 void
1952 udp_in_cksum_stats(u_int32_t len)
1953 {
1954 udps_in_sw_cksum++;
1955 udps_in_sw_cksum_bytes += len;
1956 }
1957
1958 void
1959 udp_out_cksum_stats(u_int32_t len)
1960 {
1961 udps_out_sw_cksum++;
1962 udps_out_sw_cksum_bytes += len;
1963 }