]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/udp_usrreq.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / netinet / udp_usrreq.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
61 * $FreeBSD: src/sys/netinet/udp_usrreq.c,v 1.64.2.13 2001/08/08 18:59:54 ghelmer Exp $
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/domain.h>
70 #include <sys/protosw.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/sysctl.h>
74 #include <sys/syslog.h>
75
76 #include <kern/zalloc.h>
77
78 #include <net/if.h>
79 #include <net/if_types.h>
80 #include <net/route.h>
81
82 #include <netinet/in.h>
83 #include <netinet/in_systm.h>
84 #include <netinet/ip.h>
85 #if INET6
86 #include <netinet/ip6.h>
87 #endif
88 #include <netinet/in_pcb.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip_var.h>
91 #if INET6
92 #include <netinet6/in6_pcb.h>
93 #include <netinet6/ip6_var.h>
94 #endif
95 #include <netinet/ip_icmp.h>
96 #include <netinet/icmp_var.h>
97 #include <netinet/udp.h>
98 #include <netinet/udp_var.h>
99 #include <sys/kdebug.h>
100
101 #if IPSEC
102 #include <netinet6/ipsec.h>
103 #include <netinet6/esp.h>
104 extern int ipsec_bypass;
105 #endif /*IPSEC*/
106
107
108 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0)
109 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2)
110 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1)
111 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3)
112 #define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8))
113 #define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1)
114
115 /*
116 * UDP protocol implementation.
117 * Per RFC 768, August, 1980.
118 */
119 #ifndef COMPAT_42
120 static int udpcksum = 1;
121 #else
122 static int udpcksum = 0; /* XXX */
123 #endif
124 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
125 &udpcksum, 0, "");
126
127 static u_int32_t udps_in_sw_cksum;
128 SYSCTL_UINT(_net_inet_udp, OID_AUTO, in_sw_cksum, CTLFLAG_RD,
129 &udps_in_sw_cksum, 0,
130 "Number of received packets checksummed in software");
131
132 static u_int64_t udps_in_sw_cksum_bytes;
133 SYSCTL_QUAD(_net_inet_udp, OID_AUTO, in_sw_cksum_bytes, CTLFLAG_RD,
134 &udps_in_sw_cksum_bytes,
135 "Amount of received data checksummed in software");
136
137 static u_int32_t udps_out_sw_cksum;
138 SYSCTL_UINT(_net_inet_udp, OID_AUTO, out_sw_cksum, CTLFLAG_RD,
139 &udps_out_sw_cksum, 0,
140 "Number of transmitted packets checksummed in software");
141
142 static u_int64_t udps_out_sw_cksum_bytes;
143 SYSCTL_QUAD(_net_inet_udp, OID_AUTO, out_sw_cksum_bytes, CTLFLAG_RD,
144 &udps_out_sw_cksum_bytes,
145 "Amount of transmitted data checksummed in software");
146
147 int log_in_vain = 0;
148 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
149 &log_in_vain, 0, "Log all incoming UDP packets");
150
151 static int blackhole = 0;
152 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
153 &blackhole, 0, "Do not send port unreachables for refused connects");
154
155 struct inpcbhead udb; /* from udp_var.h */
156 #define udb6 udb /* for KAME src sync over BSD*'s */
157 struct inpcbinfo udbinfo;
158
159 #ifndef UDBHASHSIZE
160 #define UDBHASHSIZE 16
161 #endif
162
163 extern int esp_udp_encap_port;
164
165 extern void ipfwsyslog( int level, const char *format,...);
166
167 extern int fw_verbose;
168 static int udp_gc_done = FALSE; /* Garbage collection performed last slowtimo */
169
170 #if IPFIREWALL
171 #define log_in_vain_log( a ) { \
172 if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \
173 ipfwsyslog a ; \
174 } \
175 else log a ; \
176 }
177 #else
178 #define log_in_vain_log( a ) { log a; }
179 #endif
180
181 struct udpstat udpstat; /* from udp_var.h */
182 SYSCTL_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RD,
183 &udpstat, udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
184 SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount, CTLFLAG_RD,
185 &udbinfo.ipi_count, 0, "Number of active PCBs");
186
187 __private_extern__ int udp_use_randomport = 1;
188 SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports, CTLFLAG_RW,
189 &udp_use_randomport, 0, "Randomize UDP port numbers");
190
191 #if INET6
192 struct udp_in6 {
193 struct sockaddr_in6 uin6_sin;
194 u_char uin6_init_done : 1;
195 };
196 struct udp_ip6 {
197 struct ip6_hdr uip6_ip6;
198 u_char uip6_init_done : 1;
199 };
200 static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
201 static void udp_append(struct inpcb *last, struct ip *ip,
202 struct mbuf *n, int off, struct sockaddr_in *pudp_in,
203 struct udp_in6 *pudp_in6, struct udp_ip6 *pudp_ip6);
204 #else
205 static void udp_append(struct inpcb *last, struct ip *ip,
206 struct mbuf *n, int off, struct sockaddr_in *pudp_in);
207 #endif
208
209 static int udp_detach(struct socket *so);
210 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
211 struct mbuf *, struct proc *);
212 extern int ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr );
213
214 void
215 udp_init()
216 {
217 vm_size_t str_size;
218 struct inpcbinfo *pcbinfo;
219
220
221 LIST_INIT(&udb);
222 udbinfo.listhead = &udb;
223 udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask);
224 udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB,
225 &udbinfo.porthashmask);
226 #ifdef __APPLE__
227 str_size = (vm_size_t) sizeof(struct inpcb);
228 udbinfo.ipi_zone = (void *) zinit(str_size, 80000*str_size, 8192, "udpcb");
229
230 pcbinfo = &udbinfo;
231 /*
232 * allocate lock group attribute and group for udp pcb mutexes
233 */
234 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
235
236 pcbinfo->mtx_grp = lck_grp_alloc_init("udppcb", pcbinfo->mtx_grp_attr);
237
238 pcbinfo->mtx_attr = lck_attr_alloc_init();
239
240 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL)
241 return; /* pretty much dead if this fails... */
242 #else
243 udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets,
244 ZONE_INTERRUPT, 0);
245 #endif
246 }
247
248 void
249 udp_input(m, iphlen)
250 register struct mbuf *m;
251 int iphlen;
252 {
253 register struct ip *ip;
254 register struct udphdr *uh;
255 register struct inpcb *inp;
256 struct mbuf *opts = 0;
257 int len;
258 struct ip save_ip;
259 struct sockaddr *append_sa;
260 struct inpcbinfo *pcbinfo = &udbinfo;
261 struct sockaddr_in udp_in = {
262 sizeof (udp_in), AF_INET, 0, { 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }
263 };
264 #if INET6
265 struct udp_in6 udp_in6 = {
266 { sizeof (udp_in6.uin6_sin), AF_INET6, 0, 0,
267 IN6ADDR_ANY_INIT, 0 },
268 0
269 };
270 struct udp_ip6 udp_ip6;
271 #endif /* INET6 */
272
273 udpstat.udps_ipackets++;
274
275 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0,0,0,0,0);
276 if (m->m_pkthdr.csum_flags & CSUM_TCP_SUM16)
277 m->m_pkthdr.csum_flags = 0; /* invalidate hwcksum for UDP */
278
279 /*
280 * Strip IP options, if any; should skip this,
281 * make available to user, and use on returned packets,
282 * but we don't yet have a way to check the checksum
283 * with options still present.
284 */
285 if (iphlen > sizeof (struct ip)) {
286 ip_stripoptions(m, (struct mbuf *)0);
287 iphlen = sizeof(struct ip);
288 }
289
290 /*
291 * Get IP and UDP header together in first mbuf.
292 */
293 ip = mtod(m, struct ip *);
294 if (m->m_len < iphlen + sizeof(struct udphdr)) {
295 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
296 udpstat.udps_hdrops++;
297 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
298 return;
299 }
300 ip = mtod(m, struct ip *);
301 }
302 uh = (struct udphdr *)((caddr_t)ip + iphlen);
303
304 /* destination port of 0 is illegal, based on RFC768. */
305 if (uh->uh_dport == 0)
306 goto bad;
307
308 KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
309 ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
310
311 /*
312 * Make mbuf data length reflect UDP length.
313 * If not enough data to reflect UDP length, drop.
314 */
315 len = ntohs((u_short)uh->uh_ulen);
316 if (ip->ip_len != len) {
317 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
318 udpstat.udps_badlen++;
319 goto bad;
320 }
321 m_adj(m, len - ip->ip_len);
322 /* ip->ip_len = len; */
323 }
324 /*
325 * Save a copy of the IP header in case we want restore it
326 * for sending an ICMP error message in response.
327 */
328 save_ip = *ip;
329
330 /*
331 * Checksum extended UDP header and data.
332 */
333 if (uh->uh_sum) {
334 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
335 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
336 uh->uh_sum = m->m_pkthdr.csum_data;
337 else
338 goto doudpcksum;
339 uh->uh_sum ^= 0xffff;
340 } else {
341 char b[9];
342 doudpcksum:
343 *(uint32_t*)&b[0] = *(uint32_t*)&((struct ipovly *)ip)->ih_x1[0];
344 *(uint32_t*)&b[4] = *(uint32_t*)&((struct ipovly *)ip)->ih_x1[4];
345 *(uint8_t*)&b[8] = *(uint8_t*)&((struct ipovly *)ip)->ih_x1[8];
346
347 bzero(((struct ipovly *)ip)->ih_x1, 9);
348 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
349 uh->uh_sum = in_cksum(m, len + sizeof (struct ip));
350
351 *(uint32_t*)&((struct ipovly *)ip)->ih_x1[0] = *(uint32_t*)&b[0];
352 *(uint32_t*)&((struct ipovly *)ip)->ih_x1[4] = *(uint32_t*)&b[4];
353 *(uint8_t*)&((struct ipovly *)ip)->ih_x1[8] = *(uint8_t*)&b[8];
354 udp_in_cksum_stats(len);
355 }
356 if (uh->uh_sum) {
357 udpstat.udps_badsum++;
358 m_freem(m);
359 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
360 return;
361 }
362 }
363 #ifndef __APPLE__
364 else
365 udpstat.udps_nosum++;
366 #endif
367
368 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
369 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
370
371 int reuse_sock = 0, mcast_delivered = 0;
372
373 lck_rw_lock_shared(pcbinfo->mtx);
374 /*
375 * Deliver a multicast or broadcast datagram to *all* sockets
376 * for which the local and remote addresses and ports match
377 * those of the incoming datagram. This allows more than
378 * one process to receive multi/broadcasts on the same port.
379 * (This really ought to be done for unicast datagrams as
380 * well, but that would cause problems with existing
381 * applications that open both address-specific sockets and
382 * a wildcard socket listening to the same port -- they would
383 * end up receiving duplicates of every unicast datagram.
384 * Those applications open the multiple sockets to overcome an
385 * inadequacy of the UDP socket interface, but for backwards
386 * compatibility we avoid the problem here rather than
387 * fixing the interface. Maybe 4.5BSD will remedy this?)
388 */
389
390
391 /*
392 * Construct sockaddr format source address.
393 */
394 udp_in.sin_port = uh->uh_sport;
395 udp_in.sin_addr = ip->ip_src;
396 /*
397 * Locate pcb(s) for datagram.
398 * (Algorithm copied from raw_intr().)
399 */
400 #if INET6
401 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
402 #endif
403 LIST_FOREACH(inp, &udb, inp_list) {
404 if (inp->inp_socket == NULL)
405 continue;
406 if (inp != sotoinpcb(inp->inp_socket))
407 panic("udp_input: bad so back ptr inp=%p\n", inp);
408 #if INET6
409 if ((inp->inp_vflag & INP_IPV4) == 0)
410 continue;
411 #endif
412
413 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
414 continue;
415 }
416
417 udp_lock(inp->inp_socket, 1, 0);
418
419 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
420 udp_unlock(inp->inp_socket, 1, 0);
421 continue;
422 }
423
424 if (inp->inp_lport != uh->uh_dport) {
425 udp_unlock(inp->inp_socket, 1, 0);
426 continue;
427 }
428 if (inp->inp_laddr.s_addr != INADDR_ANY) {
429 if (inp->inp_laddr.s_addr !=
430 ip->ip_dst.s_addr) {
431 udp_unlock(inp->inp_socket, 1, 0);
432 continue;
433 }
434 }
435 if (inp->inp_faddr.s_addr != INADDR_ANY) {
436 if (inp->inp_faddr.s_addr !=
437 ip->ip_src.s_addr ||
438 inp->inp_fport != uh->uh_sport) {
439 udp_unlock(inp->inp_socket, 1, 0);
440 continue;
441 }
442 }
443
444 reuse_sock = inp->inp_socket->so_options& (SO_REUSEPORT|SO_REUSEADDR);
445 {
446 #if IPSEC
447 int skipit = 0;
448 /* check AH/ESP integrity. */
449 if (ipsec_bypass == 0) {
450 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
451 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
452 /* do not inject data to pcb */
453 skipit = 1;
454 }
455 }
456 if (skipit == 0)
457 #endif /*IPSEC*/
458 {
459 struct mbuf *n = NULL;
460
461 if (reuse_sock)
462 n = m_copy(m, 0, M_COPYALL);
463 #if INET6
464 udp_append(inp, ip, m,
465 iphlen + sizeof(struct udphdr),
466 &udp_in, &udp_in6, &udp_ip6);
467 #else
468 udp_append(inp, ip, m,
469 iphlen + sizeof(struct udphdr),
470 &udp_in);
471 #endif /* INET6 */
472 mcast_delivered++;
473
474 m = n;
475 }
476 udp_unlock(inp->inp_socket, 1, 0);
477 }
478 /*
479 * Don't look for additional matches if this one does
480 * not have either the SO_REUSEPORT or SO_REUSEADDR
481 * socket options set. This heuristic avoids searching
482 * through all pcbs in the common case of a non-shared
483 * port. It assumes that an application will never
484 * clear these options after setting them.
485 */
486 if (reuse_sock == 0 || m == NULL)
487 break;
488 /*
489 * Recompute IP and UDP header pointers for new mbuf
490 */
491 ip = mtod(m, struct ip *);
492 uh = (struct udphdr *)((caddr_t)ip + iphlen);
493 }
494 lck_rw_done(pcbinfo->mtx);
495
496 if (mcast_delivered == 0) {
497 /*
498 * No matching pcb found; discard datagram.
499 * (No need to send an ICMP Port Unreachable
500 * for a broadcast or multicast datgram.)
501 */
502 udpstat.udps_noportbcast++;
503 goto bad;
504 }
505
506 if (m != NULL) /* free the extra copy of mbuf or skipped by IPSec */
507 m_freem(m);
508 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
509 return;
510 }
511
512 #if IPSEC
513 /*
514 * UDP to port 4500 with a payload where the first four bytes are
515 * not zero is a UDP encapsulated IPSec packet. Packets where
516 * the payload is one byte and that byte is 0xFF are NAT keepalive
517 * packets. Decapsulate the ESP packet and carry on with IPSec input
518 * or discard the NAT keep-alive.
519 */
520 if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 &&
521 uh->uh_dport == ntohs((u_short)esp_udp_encap_port)) {
522 int payload_len = len - sizeof(struct udphdr) > 4 ? 4 : len - sizeof(struct udphdr);
523 if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) {
524 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) + payload_len)) == 0) {
525 udpstat.udps_hdrops++;
526 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
527 return;
528 }
529 ip = mtod(m, struct ip *);
530 uh = (struct udphdr *)((caddr_t)ip + iphlen);
531 }
532 /* Check for NAT keepalive packet */
533 if (payload_len == 1 && *(u_int8_t*)((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
534 m_freem(m);
535 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
536 return;
537 }
538 else if (payload_len == 4 && *(u_int32_t*)((caddr_t)uh + sizeof(struct udphdr)) != 0) {
539 /* UDP encapsulated IPSec packet to pass through NAT */
540 size_t stripsiz;
541
542 stripsiz = sizeof(struct udphdr);
543
544 ip = mtod(m, struct ip *);
545 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
546 m->m_data += stripsiz;
547 m->m_len -= stripsiz;
548 m->m_pkthdr.len -= stripsiz;
549 ip = mtod(m, struct ip *);
550 ip->ip_len = ip->ip_len - stripsiz;
551 ip->ip_p = IPPROTO_ESP;
552
553 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
554 esp4_input(m, iphlen);
555 return;
556 }
557 }
558 #endif
559
560 /*
561 * Locate pcb for datagram.
562 */
563 inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
564 ip->ip_dst, uh->uh_dport, 1, m->m_pkthdr.rcvif);
565 if (inp == NULL) {
566 if (log_in_vain) {
567 char buf[MAX_IPv4_STR_LEN];
568 char buf2[MAX_IPv4_STR_LEN];
569
570 /* check src and dst address */
571 if (log_in_vain != 3)
572 log(LOG_INFO,
573 "Connection attempt to UDP %s:%d from %s:%d\n",
574 inet_ntop(AF_INET, &ip->ip_dst, buf, sizeof(buf)),
575 ntohs(uh->uh_dport),
576 inet_ntop(AF_INET, &ip->ip_src, buf2, sizeof(buf2)),
577 ntohs(uh->uh_sport));
578 else if (!(m->m_flags & (M_BCAST | M_MCAST)) &&
579 ip->ip_dst.s_addr != ip->ip_src.s_addr)
580 log_in_vain_log((LOG_INFO,
581 "Stealth Mode connection attempt to UDP %s:%d from %s:%d\n",
582 inet_ntop(AF_INET, &ip->ip_dst, buf, sizeof(buf)),
583 ntohs(uh->uh_dport),
584 inet_ntop(AF_INET, &ip->ip_src, buf2, sizeof(buf2)),
585 ntohs(uh->uh_sport)))
586 }
587 udpstat.udps_noport++;
588 if (m->m_flags & (M_BCAST | M_MCAST)) {
589 udpstat.udps_noportbcast++;
590 goto bad;
591 }
592 #if ICMP_BANDLIM
593 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
594 goto bad;
595 #endif
596 if (blackhole)
597 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP)
598 goto bad;
599 *ip = save_ip;
600 ip->ip_len += iphlen;
601 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
602 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
603 return;
604 }
605 udp_lock(inp->inp_socket, 1, 0);
606
607 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
608 udp_unlock(inp->inp_socket, 1, 0);
609 goto bad;
610 }
611 #if IPSEC
612 if (ipsec_bypass == 0 && inp != NULL) {
613 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
614 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
615 udp_unlock(inp->inp_socket, 1, 0);
616 goto bad;
617 }
618 }
619 #endif /*IPSEC*/
620
621 /*
622 * Construct sockaddr format source address.
623 * Stuff source address and datagram in user buffer.
624 */
625 udp_in.sin_port = uh->uh_sport;
626 udp_in.sin_addr = ip->ip_src;
627 if (inp->inp_flags & INP_CONTROLOPTS
628 || inp->inp_socket->so_options & SO_TIMESTAMP) {
629 #if INET6
630 if (inp->inp_vflag & INP_IPV6) {
631 int savedflags;
632
633 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
634 savedflags = inp->inp_flags;
635 inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
636 ip6_savecontrol(inp, &opts, &udp_ip6.uip6_ip6, m);
637 inp->inp_flags = savedflags;
638 } else
639 #endif
640 ip_savecontrol(inp, &opts, ip, m);
641 }
642 m_adj(m, iphlen + sizeof(struct udphdr));
643
644 KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport,
645 save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen);
646
647 #if INET6
648 if (inp->inp_vflag & INP_IPV6) {
649 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
650 append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
651 } else
652 #endif
653 append_sa = (struct sockaddr *)&udp_in;
654 if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa, m, opts, NULL) == 0) {
655 udpstat.udps_fullsock++;
656 }
657 else {
658 sorwakeup(inp->inp_socket);
659 }
660 udp_unlock(inp->inp_socket, 1, 0);
661 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
662 return;
663 bad:
664 m_freem(m);
665 if (opts)
666 m_freem(opts);
667 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
668 return;
669 }
670
671 #if INET6
672 static void
673 ip_2_ip6_hdr(ip6, ip)
674 struct ip6_hdr *ip6;
675 struct ip *ip;
676 {
677 bzero(ip6, sizeof(*ip6));
678
679 ip6->ip6_vfc = IPV6_VERSION;
680 ip6->ip6_plen = ip->ip_len;
681 ip6->ip6_nxt = ip->ip_p;
682 ip6->ip6_hlim = ip->ip_ttl;
683 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
684 IPV6_ADDR_INT32_SMP;
685 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
686 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
687 }
688 #endif
689
690 /*
691 * subroutine of udp_input(), mainly for source code readability.
692 */
693 static void
694 #if INET6
695 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
696 struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6,
697 struct udp_ip6 *pudp_ip6)
698 #else
699 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
700 struct sockaddr_in *pudp_in)
701 #endif
702 {
703 struct sockaddr *append_sa;
704 struct mbuf *opts = 0;
705
706 #if CONFIG_MACF_NET
707 if (mac_inpcb_check_deliver(last, n, AF_INET, SOCK_DGRAM) != 0) {
708 m_freem(n);
709 return;
710 }
711 #endif
712 if (last->inp_flags & INP_CONTROLOPTS ||
713 last->inp_socket->so_options & SO_TIMESTAMP) {
714 #if INET6
715 if (last->inp_vflag & INP_IPV6) {
716 int savedflags;
717
718 if (pudp_ip6->uip6_init_done == 0) {
719 ip_2_ip6_hdr(&pudp_ip6->uip6_ip6, ip);
720 pudp_ip6->uip6_init_done = 1;
721 }
722 savedflags = last->inp_flags;
723 last->inp_flags &= ~INP_UNMAPPABLEOPTS;
724 ip6_savecontrol(last, &opts, &pudp_ip6->uip6_ip6, n);
725 last->inp_flags = savedflags;
726 } else
727 #endif
728 ip_savecontrol(last, &opts, ip, n);
729 }
730 #if INET6
731 if (last->inp_vflag & INP_IPV6) {
732 if (pudp_in6->uin6_init_done == 0) {
733 in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin);
734 pudp_in6->uin6_init_done = 1;
735 }
736 append_sa = (struct sockaddr *)&pudp_in6->uin6_sin;
737 } else
738 #endif
739 append_sa = (struct sockaddr *)pudp_in;
740 m_adj(n, off);
741 if (sbappendaddr(&last->inp_socket->so_rcv, append_sa, n, opts, NULL) == 0) {
742 udpstat.udps_fullsock++;
743 } else
744 sorwakeup(last->inp_socket);
745 }
746
747 /*
748 * Notify a udp user of an asynchronous error;
749 * just wake up so that he can collect error status.
750 */
751 void
752 udp_notify(inp, errno)
753 register struct inpcb *inp;
754 int errno;
755 {
756 inp->inp_socket->so_error = errno;
757 sorwakeup(inp->inp_socket);
758 sowwakeup(inp->inp_socket);
759 }
760
761 void
762 udp_ctlinput(cmd, sa, vip)
763 int cmd;
764 struct sockaddr *sa;
765 void *vip;
766 {
767 struct ip *ip = vip;
768 struct udphdr *uh;
769 void (*notify)(struct inpcb *, int) = udp_notify;
770 struct in_addr faddr;
771 struct inpcb *inp;
772
773 faddr = ((struct sockaddr_in *)sa)->sin_addr;
774 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
775 return;
776
777 if (PRC_IS_REDIRECT(cmd)) {
778 ip = 0;
779 notify = in_rtchange;
780 } else if (cmd == PRC_HOSTDEAD)
781 ip = 0;
782 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
783 return;
784 if (ip) {
785 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
786 inp = in_pcblookup_hash(&udbinfo, faddr, uh->uh_dport,
787 ip->ip_src, uh->uh_sport, 0, NULL);
788 if (inp != NULL && inp->inp_socket != NULL) {
789 udp_lock(inp->inp_socket, 1, 0);
790 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
791 udp_unlock(inp->inp_socket, 1, 0);
792 return;
793 }
794 (*notify)(inp, inetctlerrmap[cmd]);
795 udp_unlock(inp->inp_socket, 1, 0);
796 }
797 } else
798 in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
799 }
800
801 int
802 udp_ctloutput(struct socket *so, struct sockopt *sopt)
803 {
804 int error, optval;
805 struct inpcb *inp;
806
807 if (sopt->sopt_level != IPPROTO_UDP)
808 return (ip_ctloutput(so, sopt));
809
810 error = 0;
811 inp = sotoinpcb(so);
812
813 switch (sopt->sopt_dir) {
814 case SOPT_SET:
815 switch (sopt->sopt_name) {
816 case UDP_NOCKSUM:
817 /* This option is settable only for UDP over IPv4 */
818 if (!(inp->inp_vflag & INP_IPV4)) {
819 error = EINVAL;
820 break;
821 }
822
823 if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
824 sizeof (optval))) != 0)
825 break;
826
827 if (optval != 0)
828 inp->inp_flags |= INP_UDP_NOCKSUM;
829 else
830 inp->inp_flags &= ~INP_UDP_NOCKSUM;
831 break;
832
833 default:
834 error = ENOPROTOOPT;
835 break;
836 }
837 break;
838
839 case SOPT_GET:
840 switch (sopt->sopt_name) {
841 case UDP_NOCKSUM:
842 optval = inp->inp_flags & INP_UDP_NOCKSUM;
843 break;
844
845 default:
846 error = ENOPROTOOPT;
847 break;
848 }
849 if (error == 0)
850 error = sooptcopyout(sopt, &optval, sizeof (optval));
851 break;
852 }
853 return (error);
854 }
855
856 static int
857 udp_pcblist SYSCTL_HANDLER_ARGS
858 {
859 #pragma unused(oidp, arg1, arg2)
860 int error, i, n;
861 struct inpcb *inp, **inp_list;
862 inp_gen_t gencnt;
863 struct xinpgen xig;
864
865 /*
866 * The process of preparing the TCB list is too time-consuming and
867 * resource-intensive to repeat twice on every request.
868 */
869 lck_rw_lock_exclusive(udbinfo.mtx);
870 if (req->oldptr == USER_ADDR_NULL) {
871 n = udbinfo.ipi_count;
872 req->oldidx = 2 * (sizeof xig)
873 + (n + n/8) * sizeof(struct xinpcb);
874 lck_rw_done(udbinfo.mtx);
875 return 0;
876 }
877
878 if (req->newptr != USER_ADDR_NULL) {
879 lck_rw_done(udbinfo.mtx);
880 return EPERM;
881 }
882
883 /*
884 * OK, now we're committed to doing something.
885 */
886 gencnt = udbinfo.ipi_gencnt;
887 n = udbinfo.ipi_count;
888
889 bzero(&xig, sizeof(xig));
890 xig.xig_len = sizeof xig;
891 xig.xig_count = n;
892 xig.xig_gen = gencnt;
893 xig.xig_sogen = so_gencnt;
894 error = SYSCTL_OUT(req, &xig, sizeof xig);
895 if (error) {
896 lck_rw_done(udbinfo.mtx);
897 return error;
898 }
899 /*
900 * We are done if there is no pcb
901 */
902 if (n == 0) {
903 lck_rw_done(udbinfo.mtx);
904 return 0;
905 }
906
907 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
908 if (inp_list == 0) {
909 lck_rw_done(udbinfo.mtx);
910 return ENOMEM;
911 }
912
913 for (inp = LIST_FIRST(udbinfo.listhead), i = 0; inp && i < n;
914 inp = LIST_NEXT(inp, inp_list)) {
915 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
916 inp_list[i++] = inp;
917 }
918 n = i;
919
920 error = 0;
921 for (i = 0; i < n; i++) {
922 inp = inp_list[i];
923 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
924 struct xinpcb xi;
925
926 bzero(&xi, sizeof(xi));
927 xi.xi_len = sizeof xi;
928 /* XXX should avoid extra copy */
929 inpcb_to_compat(inp, &xi.xi_inp);
930 if (inp->inp_socket)
931 sotoxsocket(inp->inp_socket, &xi.xi_socket);
932 error = SYSCTL_OUT(req, &xi, sizeof xi);
933 }
934 }
935 if (!error) {
936 /*
937 * Give the user an updated idea of our state.
938 * If the generation differs from what we told
939 * her before, she knows that something happened
940 * while we were processing this request, and it
941 * might be necessary to retry.
942 */
943 bzero(&xig, sizeof(xig));
944 xig.xig_len = sizeof xig;
945 xig.xig_gen = udbinfo.ipi_gencnt;
946 xig.xig_sogen = so_gencnt;
947 xig.xig_count = udbinfo.ipi_count;
948 error = SYSCTL_OUT(req, &xig, sizeof xig);
949 }
950 FREE(inp_list, M_TEMP);
951 lck_rw_done(udbinfo.mtx);
952 return error;
953 }
954
955 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
956 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
957
958 #if !CONFIG_EMBEDDED
959
960 static int
961 udp_pcblist64 SYSCTL_HANDLER_ARGS
962 {
963 #pragma unused(oidp, arg1, arg2)
964 int error, i, n;
965 struct inpcb *inp, **inp_list;
966 inp_gen_t gencnt;
967 struct xinpgen xig;
968
969 /*
970 * The process of preparing the TCB list is too time-consuming and
971 * resource-intensive to repeat twice on every request.
972 */
973 lck_rw_lock_shared(udbinfo.mtx);
974 if (req->oldptr == USER_ADDR_NULL) {
975 n = udbinfo.ipi_count;
976 req->oldidx = 2 * (sizeof xig)
977 + (n + n/8) * sizeof(struct xinpcb64);
978 lck_rw_done(udbinfo.mtx);
979 return 0;
980 }
981
982 if (req->newptr != USER_ADDR_NULL) {
983 lck_rw_done(udbinfo.mtx);
984 return EPERM;
985 }
986
987 /*
988 * OK, now we're committed to doing something.
989 */
990 gencnt = udbinfo.ipi_gencnt;
991 n = udbinfo.ipi_count;
992
993 bzero(&xig, sizeof(xig));
994 xig.xig_len = sizeof xig;
995 xig.xig_count = n;
996 xig.xig_gen = gencnt;
997 xig.xig_sogen = so_gencnt;
998 error = SYSCTL_OUT(req, &xig, sizeof xig);
999 if (error) {
1000 lck_rw_done(udbinfo.mtx);
1001 return error;
1002 }
1003 /*
1004 * We are done if there is no pcb
1005 */
1006 if (n == 0) {
1007 lck_rw_done(udbinfo.mtx);
1008 return 0;
1009 }
1010
1011 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1012 if (inp_list == 0) {
1013 lck_rw_done(udbinfo.mtx);
1014 return ENOMEM;
1015 }
1016
1017 for (inp = LIST_FIRST(udbinfo.listhead), i = 0; inp && i < n;
1018 inp = LIST_NEXT(inp, inp_list)) {
1019 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1020 inp_list[i++] = inp;
1021 }
1022 n = i;
1023
1024 error = 0;
1025 for (i = 0; i < n; i++) {
1026 inp = inp_list[i];
1027 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1028 struct xinpcb64 xi;
1029
1030 bzero(&xi, sizeof(xi));
1031 xi.xi_len = sizeof xi;
1032 inpcb_to_xinpcb64(inp, &xi);
1033 if (inp->inp_socket)
1034 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1035 error = SYSCTL_OUT(req, &xi, sizeof xi);
1036 }
1037 }
1038 if (!error) {
1039 /*
1040 * Give the user an updated idea of our state.
1041 * If the generation differs from what we told
1042 * her before, she knows that something happened
1043 * while we were processing this request, and it
1044 * might be necessary to retry.
1045 */
1046 bzero(&xig, sizeof(xig));
1047 xig.xig_len = sizeof xig;
1048 xig.xig_gen = udbinfo.ipi_gencnt;
1049 xig.xig_sogen = so_gencnt;
1050 xig.xig_count = udbinfo.ipi_count;
1051 error = SYSCTL_OUT(req, &xig, sizeof xig);
1052 }
1053 FREE(inp_list, M_TEMP);
1054 lck_rw_done(udbinfo.mtx);
1055 return error;
1056 }
1057
1058 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64, CTLFLAG_RD, 0, 0,
1059 udp_pcblist64, "S,xinpcb64", "List of active UDP sockets");
1060
1061 #endif /* !CONFIG_EMBEDDED */
1062
1063 static __inline__ u_int16_t
1064 get_socket_id(struct socket * s)
1065 {
1066 u_int16_t val;
1067
1068 if (s == NULL) {
1069 return (0);
1070 }
1071 val = (u_int16_t)(((uintptr_t)s) / sizeof(struct socket));
1072 if (val == 0) {
1073 val = 0xffff;
1074 }
1075 return (val);
1076 }
1077
1078 static int
1079 udp_output(inp, m, addr, control, p)
1080 register struct inpcb *inp;
1081 struct mbuf *m;
1082 struct sockaddr *addr;
1083 struct mbuf *control;
1084 struct proc *p;
1085 {
1086 register struct udpiphdr *ui;
1087 register int len = m->m_pkthdr.len;
1088 struct sockaddr_in *sin;
1089 struct in_addr origladdr, laddr, faddr;
1090 u_short lport, fport;
1091 struct sockaddr_in *ifaddr;
1092 int error = 0, udp_dodisconnect = 0;
1093 struct socket *so = inp->inp_socket;
1094 int soopts = 0;
1095 struct mbuf *inpopts;
1096 struct ip_moptions *mopts;
1097 struct route ro;
1098 struct ip_out_args ipoa;
1099 #if PKT_PRIORITY
1100 mbuf_traffic_class_t mtc = MBUF_TC_NONE;
1101 #endif /* PKT_PRIORITY */
1102
1103 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0);
1104
1105 if (control != NULL) {
1106 #if PKT_PRIORITY
1107 mtc = mbuf_traffic_class_from_control(control);
1108 #endif /* PKT_PRIORITY */
1109 m_freem(control);
1110 }
1111 KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
1112 inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
1113 (htons((u_short)len + sizeof (struct udphdr))));
1114
1115 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1116 error = EMSGSIZE;
1117 goto release;
1118 }
1119
1120 lck_mtx_assert(inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1121
1122 /* If socket was bound to an ifindex, tell ip_output about it */
1123 ipoa.ipoa_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
1124 inp->inp_boundif : IFSCOPE_NONE;
1125 soopts |= IP_OUTARGS;
1126
1127 /* If there was a routing change, discard cached route and check
1128 * that we have a valid source address.
1129 * Reacquire a new source address if INADDR_ANY was specified
1130 */
1131 if (inp->inp_route.ro_rt != NULL &&
1132 inp->inp_route.ro_rt->generation_id != route_generation) {
1133 struct in_ifaddr *ia;
1134
1135 /* src address is gone? */
1136 if ((ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
1137 if (inp->inp_flags & INP_INADDR_ANY) {
1138 /* new src will be set later */
1139 inp->inp_laddr.s_addr = INADDR_ANY;
1140 } else {
1141 error = EADDRNOTAVAIL;
1142 goto release;
1143 }
1144 }
1145 if (ia != NULL)
1146 ifafree(&ia->ia_ifa);
1147 if (inp->inp_route.ro_rt != NULL)
1148 rtfree(inp->inp_route.ro_rt);
1149 inp->inp_route.ro_rt = NULL;
1150 }
1151
1152 origladdr= laddr = inp->inp_laddr;
1153 faddr = inp->inp_faddr;
1154 lport = inp->inp_lport;
1155 fport = inp->inp_fport;
1156
1157 if (addr) {
1158 sin = (struct sockaddr_in *)addr;
1159 if (faddr.s_addr != INADDR_ANY) {
1160 error = EISCONN;
1161 goto release;
1162 }
1163 if (lport == 0) {
1164 /*
1165 * In case we don't have a local port set, go through the full connect.
1166 * We don't have a local port yet (ie, we can't be looked up),
1167 * so it's not an issue if the input runs at the same time we do this.
1168 */
1169 error = in_pcbconnect(inp, addr, p);
1170 if (error) {
1171 goto release;
1172 }
1173 laddr = inp->inp_laddr;
1174 lport = inp->inp_lport;
1175 faddr = inp->inp_faddr;
1176 fport = inp->inp_fport;
1177 udp_dodisconnect = 1;
1178 }
1179 else {
1180 /* Fast path case
1181 * we have a full address and a local port.
1182 * use those info to build the packet without changing the pcb
1183 * and interfering with the input path. See 3851370
1184 */
1185 if (laddr.s_addr == INADDR_ANY) {
1186 if ((error = in_pcbladdr(inp, addr, &ifaddr)) != 0)
1187 goto release;
1188 laddr = ifaddr->sin_addr;
1189 inp->inp_flags |= INP_INADDR_ANY; /* from pcbconnect: remember we don't care about src addr.*/
1190 }
1191
1192 faddr = sin->sin_addr;
1193 fport = sin->sin_port;
1194 }
1195 } else {
1196 if (faddr.s_addr == INADDR_ANY) {
1197 error = ENOTCONN;
1198 goto release;
1199 }
1200 }
1201
1202 #if CONFIG_MACF_NET
1203 mac_mbuf_label_associate_inpcb(inp, m);
1204 #endif
1205
1206 /*
1207 * Calculate data length and get a mbuf
1208 * for UDP and IP headers.
1209 */
1210 M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT);
1211 if (m == 0) {
1212 error = ENOBUFS;
1213 goto abort;
1214 }
1215
1216 /*
1217 * Fill in mbuf with extended UDP header
1218 * and addresses and length put into network format.
1219 */
1220 ui = mtod(m, struct udpiphdr *);
1221 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1222 ui->ui_pr = IPPROTO_UDP;
1223 ui->ui_src = laddr;
1224 ui->ui_dst = faddr;
1225 ui->ui_sport = lport;
1226 ui->ui_dport = fport;
1227 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1228
1229 /*
1230 * Set up checksum and output datagram.
1231 */
1232 if (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM)) {
1233 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
1234 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1235 m->m_pkthdr.csum_flags = CSUM_UDP;
1236 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1237 } else {
1238 ui->ui_sum = 0;
1239 }
1240 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1241 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1242 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1243 udpstat.udps_opackets++;
1244
1245 KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport,
1246 ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen);
1247
1248 #if IPSEC
1249 if (ipsec_bypass == 0 && ipsec_setsocket(m, inp->inp_socket) != 0) {
1250 error = ENOBUFS;
1251 goto abort;
1252 }
1253 #endif /*IPSEC*/
1254 m->m_pkthdr.socket_id = get_socket_id(inp->inp_socket);
1255
1256 inpopts = inp->inp_options;
1257 soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST));
1258 mopts = inp->inp_moptions;
1259
1260 /* Copy the cached route and take an extra reference */
1261 inp_route_copyout(inp, &ro);
1262
1263 #if PKT_PRIORITY
1264 set_traffic_class(m, so, mtc);
1265 #endif /* PKT_PRIORITY */
1266
1267 socket_unlock(so, 0);
1268 /* XXX jgraessley please look at XXX */
1269 error = ip_output_list(m, 0, inpopts, &ro, soopts, mopts, &ipoa);
1270 socket_lock(so, 0);
1271
1272 /* Synchronize PCB cached route */
1273 inp_route_copyin(inp, &ro);
1274
1275 if (udp_dodisconnect) {
1276 #if IFNET_ROUTE_REFCNT
1277 /* Always discard the cached route for unconnected socket */
1278 if (inp->inp_route.ro_rt != NULL) {
1279 rtfree(inp->inp_route.ro_rt);
1280 inp->inp_route.ro_rt = NULL;
1281 }
1282 #endif /* IFNET_ROUTE_REFCNT */
1283 in_pcbdisconnect(inp);
1284 inp->inp_laddr = origladdr; /* XXX rehash? */
1285 }
1286 #if IFNET_ROUTE_REFCNT
1287 else if (inp->inp_route.ro_rt != NULL &&
1288 (inp->inp_route.ro_rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST))) {
1289 /* Always discard non-unicast cached route */
1290 rtfree(inp->inp_route.ro_rt);
1291 inp->inp_route.ro_rt = NULL;
1292 }
1293 #endif /* IFNET_ROUTE_REFCNT */
1294
1295 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
1296 return (error);
1297
1298 abort:
1299 if (udp_dodisconnect) {
1300 #if IFNET_ROUTE_REFCNT
1301 /* Always discard the cached route for unconnected socket */
1302 if (inp->inp_route.ro_rt != NULL) {
1303 rtfree(inp->inp_route.ro_rt);
1304 inp->inp_route.ro_rt = NULL;
1305 }
1306 #endif /* IFNET_ROUTE_REFCNT */
1307 in_pcbdisconnect(inp);
1308 inp->inp_laddr = origladdr; /* XXX rehash? */
1309 }
1310 #if IFNET_ROUTE_REFCNT
1311 else if (inp->inp_route.ro_rt != NULL &&
1312 (inp->inp_route.ro_rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST))) {
1313 /* Always discard non-unicast cached route */
1314 rtfree(inp->inp_route.ro_rt);
1315 inp->inp_route.ro_rt = NULL;
1316 }
1317 #endif /* IFNET_ROUTE_REFCNT */
1318
1319 release:
1320 m_freem(m);
1321 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
1322 return (error);
1323 }
1324
1325 u_int32_t udp_sendspace = 9216; /* really max datagram size */
1326 /* 40 1K datagrams */
1327 u_int32_t udp_recvspace = 40 * (1024 +
1328 #if INET6
1329 sizeof(struct sockaddr_in6)
1330 #else
1331 sizeof(struct sockaddr_in)
1332 #endif
1333 );
1334
1335 /* Check that the values of udp send and recv space do not exceed sb_max */
1336 static int
1337 sysctl_udp_sospace(struct sysctl_oid *oidp, __unused void *arg1,
1338 __unused int arg2, struct sysctl_req *req) {
1339 u_int32_t new_value = 0, *space_p = NULL;
1340 int changed = 0, error = 0;
1341 u_quad_t sb_effective_max = (sb_max/ (MSIZE+MCLBYTES)) * MCLBYTES;
1342
1343 switch (oidp->oid_number) {
1344 case UDPCTL_RECVSPACE:
1345 space_p = &udp_recvspace;
1346 break;
1347 case UDPCTL_MAXDGRAM:
1348 space_p = &udp_sendspace;
1349 break;
1350 default:
1351 return EINVAL;
1352 }
1353 error = sysctl_io_number(req, *space_p, sizeof(u_int32_t),
1354 &new_value, &changed);
1355 if (changed) {
1356 if (new_value > 0 && new_value <= sb_effective_max) {
1357 *space_p = new_value;
1358 } else {
1359 error = ERANGE;
1360 }
1361 }
1362 return error;
1363 }
1364
1365 SYSCTL_PROC(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLTYPE_INT | CTLFLAG_RW,
1366 &udp_recvspace, 0, &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size");
1367
1368 SYSCTL_PROC(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLTYPE_INT | CTLFLAG_RW,
1369 &udp_sendspace, 0, &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size");
1370
1371 static int
1372 udp_abort(struct socket *so)
1373 {
1374 struct inpcb *inp;
1375
1376 inp = sotoinpcb(so);
1377 if (inp == 0)
1378 panic("udp_abort: so=%p null inp\n", so); /* ??? possible? panic instead? */
1379 soisdisconnected(so);
1380 in_pcbdetach(inp);
1381 return 0;
1382 }
1383
1384 static int
1385 udp_attach(struct socket *so, __unused int proto, struct proc *p)
1386 {
1387 struct inpcb *inp;
1388 int error;
1389
1390 inp = sotoinpcb(so);
1391 if (inp != 0)
1392 panic ("udp_attach so=%p inp=%p\n", so, inp);
1393
1394 error = in_pcballoc(so, &udbinfo, p);
1395 if (error)
1396 return error;
1397 error = soreserve(so, udp_sendspace, udp_recvspace);
1398 if (error)
1399 return error;
1400 inp = (struct inpcb *)so->so_pcb;
1401 inp->inp_vflag |= INP_IPV4;
1402 inp->inp_ip_ttl = ip_defttl;
1403 return 0;
1404 }
1405
1406 static int
1407 udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
1408 {
1409 struct inpcb *inp;
1410 int error;
1411
1412 if (nam->sa_family != 0 && nam->sa_family != AF_INET
1413 && nam->sa_family != AF_INET6) {
1414 return EAFNOSUPPORT;
1415 }
1416 inp = sotoinpcb(so);
1417 if (inp == 0)
1418 return EINVAL;
1419 error = in_pcbbind(inp, nam, p);
1420 return error;
1421 }
1422
1423 static int
1424 udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
1425 {
1426 struct inpcb *inp;
1427 int error;
1428
1429 inp = sotoinpcb(so);
1430 if (inp == 0)
1431 return EINVAL;
1432 if (inp->inp_faddr.s_addr != INADDR_ANY)
1433 return EISCONN;
1434 error = in_pcbconnect(inp, nam, p);
1435 if (error == 0)
1436 soisconnected(so);
1437 return error;
1438 }
1439
1440 static int
1441 udp_detach(struct socket *so)
1442 {
1443 struct inpcb *inp;
1444
1445 inp = sotoinpcb(so);
1446 if (inp == 0)
1447 panic("udp_detach: so=%p null inp\n", so); /* ??? possible? panic instead? */
1448 in_pcbdetach(inp);
1449 inp->inp_state = INPCB_STATE_DEAD;
1450 return 0;
1451 }
1452
1453 static int
1454 udp_disconnect(struct socket *so)
1455 {
1456 struct inpcb *inp;
1457
1458 inp = sotoinpcb(so);
1459 if (inp == 0)
1460 return EINVAL;
1461 if (inp->inp_faddr.s_addr == INADDR_ANY)
1462 return ENOTCONN;
1463
1464 in_pcbdisconnect(inp);
1465 inp->inp_laddr.s_addr = INADDR_ANY;
1466 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1467 return 0;
1468 }
1469
1470 static int
1471 udp_send(struct socket *so, __unused int flags, struct mbuf *m, struct sockaddr *addr,
1472 struct mbuf *control, struct proc *p)
1473 {
1474 struct inpcb *inp;
1475
1476 inp = sotoinpcb(so);
1477 if (inp == 0) {
1478 m_freem(m);
1479 return EINVAL;
1480 }
1481
1482 return udp_output(inp, m, addr, control, p);
1483 }
1484
1485 int
1486 udp_shutdown(struct socket *so)
1487 {
1488 struct inpcb *inp;
1489
1490 inp = sotoinpcb(so);
1491 if (inp == 0)
1492 return EINVAL;
1493 socantsendmore(so);
1494 return 0;
1495 }
1496
1497 struct pr_usrreqs udp_usrreqs = {
1498 udp_abort, pru_accept_notsupp, udp_attach, udp_bind, udp_connect,
1499 pru_connect2_notsupp, in_control, udp_detach, udp_disconnect,
1500 pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp,
1501 pru_rcvoob_notsupp, udp_send, pru_sense_null, udp_shutdown,
1502 in_setsockaddr, sosend, soreceive, pru_sopoll_notsupp
1503 };
1504
1505
1506 int
1507 udp_lock(struct socket *so, int refcount, void *debug)
1508 {
1509 void *lr_saved;
1510
1511 if (debug == NULL)
1512 lr_saved = __builtin_return_address(0);
1513 else
1514 lr_saved = debug;
1515
1516 if (so->so_pcb) {
1517 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx,
1518 LCK_MTX_ASSERT_NOTOWNED);
1519 lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1520 } else {
1521 panic("udp_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1522 so, lr_saved, solockhistory_nr(so));
1523 /* NOTREACHED */
1524 }
1525 if (refcount)
1526 so->so_usecount++;
1527
1528 so->lock_lr[so->next_lock_lr] = lr_saved;
1529 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
1530 return (0);
1531 }
1532
1533 int
1534 udp_unlock(struct socket *so, int refcount, void *debug)
1535 {
1536 void *lr_saved;
1537
1538 if (debug == NULL)
1539 lr_saved = __builtin_return_address(0);
1540 else
1541 lr_saved = debug;
1542
1543 if (refcount)
1544 so->so_usecount--;
1545
1546 if (so->so_pcb == NULL) {
1547 panic("udp_unlock: so=%p NO PCB! lr=%p lrh= %s\n",
1548 so, lr_saved, solockhistory_nr(so));
1549 /* NOTREACHED */
1550 } else {
1551 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx,
1552 LCK_MTX_ASSERT_OWNED);
1553 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1554 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1555 lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1556 }
1557
1558
1559 return (0);
1560 }
1561
1562 lck_mtx_t *
1563 udp_getlock(struct socket *so, __unused int locktype)
1564 {
1565 struct inpcb *inp = sotoinpcb(so);
1566
1567
1568 if (so->so_pcb)
1569 return(inp->inpcb_mtx);
1570 else {
1571 panic("udp_getlock: so=%p NULL so_pcb lrh= %s\n",
1572 so, solockhistory_nr(so));
1573 return (so->so_proto->pr_domain->dom_mtx);
1574 }
1575 }
1576
1577 void
1578 udp_slowtimo()
1579 {
1580 struct inpcb *inp, *inpnxt;
1581 struct socket *so;
1582 struct inpcbinfo *pcbinfo = &udbinfo;
1583
1584 if (lck_rw_try_lock_exclusive(pcbinfo->mtx) == FALSE) {
1585 if (udp_gc_done == TRUE) {
1586 udp_gc_done = FALSE;
1587 return; /* couldn't get the lock, better lock next time */
1588 }
1589 lck_rw_lock_exclusive(pcbinfo->mtx);
1590 }
1591
1592 udp_gc_done = TRUE;
1593
1594 for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
1595 inpnxt = inp->inp_list.le_next;
1596
1597 if (inp->inp_wantcnt != WNT_STOPUSING)
1598 continue;
1599
1600 so = inp->inp_socket;
1601 if (!lck_mtx_try_lock(inp->inpcb_mtx)) /* skip if busy, no hurry for cleanup... */
1602 continue;
1603
1604 if (so->so_usecount == 0) {
1605 if (inp->inp_state != INPCB_STATE_DEAD) {
1606 #if INET6
1607 if (INP_CHECK_SOCKAF(so, AF_INET6))
1608 in6_pcbdetach(inp);
1609 else
1610 #endif /* INET6 */
1611 in_pcbdetach(inp);
1612 }
1613 in_pcbdispose(inp);
1614 } else {
1615 lck_mtx_unlock(inp->inpcb_mtx);
1616 }
1617 }
1618 lck_rw_done(pcbinfo->mtx);
1619 }
1620
1621 int
1622 ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr )
1623 {
1624 if ( dstaddr == srcaddr ){
1625 return 0;
1626 }
1627 return 1;
1628 }
1629
1630 void
1631 udp_in_cksum_stats(u_int32_t len)
1632 {
1633 udps_in_sw_cksum++;
1634 udps_in_sw_cksum_bytes += len;
1635 }
1636
1637 void
1638 udp_out_cksum_stats(u_int32_t len)
1639 {
1640 udps_out_sw_cksum++;
1641 udps_out_sw_cksum_bytes += len;
1642 }