]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/udp_usrreq.c
ec3ff435e8669a2cdf5525aad706e118f2cba5c5
[apple/xnu.git] / bsd / netinet / udp_usrreq.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
61 * $FreeBSD: src/sys/netinet/udp_usrreq.c,v 1.64.2.13 2001/08/08 18:59:54 ghelmer Exp $
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/domain.h>
70 #include <sys/protosw.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/sysctl.h>
74 #include <sys/syslog.h>
75
76 #include <kern/zalloc.h>
77
78 #include <net/if.h>
79 #include <net/if_types.h>
80 #include <net/route.h>
81
82 #include <netinet/in.h>
83 #include <netinet/in_systm.h>
84 #include <netinet/ip.h>
85 #if INET6
86 #include <netinet/ip6.h>
87 #endif
88 #include <netinet/in_pcb.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip_var.h>
91 #if INET6
92 #include <netinet6/ip6_var.h>
93 #endif
94 #include <netinet/ip_icmp.h>
95 #include <netinet/icmp_var.h>
96 #include <netinet/udp.h>
97 #include <netinet/udp_var.h>
98 #include <sys/kdebug.h>
99
100 #if IPSEC
101 #include <netinet6/ipsec.h>
102 #include <netinet6/esp.h>
103 extern int ipsec_bypass;
104 #endif /*IPSEC*/
105
106
107 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0)
108 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2)
109 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1)
110 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3)
111 #define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8))
112 #define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1)
113
114 /*
115 * UDP protocol implementation.
116 * Per RFC 768, August, 1980.
117 */
118 #ifndef COMPAT_42
119 static int udpcksum = 1;
120 #else
121 static int udpcksum = 0; /* XXX */
122 #endif
123 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
124 &udpcksum, 0, "");
125
126 static u_int32_t udps_in_sw_cksum;
127 SYSCTL_UINT(_net_inet_udp, OID_AUTO, in_sw_cksum, CTLFLAG_RD,
128 &udps_in_sw_cksum, 0,
129 "Number of received packets checksummed in software");
130
131 static u_int64_t udps_in_sw_cksum_bytes;
132 SYSCTL_QUAD(_net_inet_udp, OID_AUTO, in_sw_cksum_bytes, CTLFLAG_RD,
133 &udps_in_sw_cksum_bytes,
134 "Amount of received data checksummed in software");
135
136 static u_int32_t udps_out_sw_cksum;
137 SYSCTL_UINT(_net_inet_udp, OID_AUTO, out_sw_cksum, CTLFLAG_RD,
138 &udps_out_sw_cksum, 0,
139 "Number of transmitted packets checksummed in software");
140
141 static u_int64_t udps_out_sw_cksum_bytes;
142 SYSCTL_QUAD(_net_inet_udp, OID_AUTO, out_sw_cksum_bytes, CTLFLAG_RD,
143 &udps_out_sw_cksum_bytes,
144 "Amount of transmitted data checksummed in software");
145
146 int log_in_vain = 0;
147 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
148 &log_in_vain, 0, "Log all incoming UDP packets");
149
150 static int blackhole = 0;
151 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
152 &blackhole, 0, "Do not send port unreachables for refused connects");
153
154 struct inpcbhead udb; /* from udp_var.h */
155 #define udb6 udb /* for KAME src sync over BSD*'s */
156 struct inpcbinfo udbinfo;
157
158 #ifndef UDBHASHSIZE
159 #define UDBHASHSIZE 16
160 #endif
161
162 extern int esp_udp_encap_port;
163 extern u_long route_generation;
164
165 extern void ipfwsyslog( int level, const char *format,...);
166
167 extern int fw_verbose;
168 static int udp_gc_done = FALSE; /* Garbage collection performed last slowtimo */
169
170 #if IPFIREWALL
171 #define log_in_vain_log( a ) { \
172 if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \
173 ipfwsyslog a ; \
174 } \
175 else log a ; \
176 }
177 #else
178 #define log_in_vain_log( a ) { log a; }
179 #endif
180
181 struct udpstat udpstat; /* from udp_var.h */
182 SYSCTL_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RD,
183 &udpstat, udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
184 SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount, CTLFLAG_RD,
185 &udbinfo.ipi_count, 0, "Number of active PCBs");
186
187 #if INET6
188 struct udp_in6 {
189 struct sockaddr_in6 uin6_sin;
190 u_char uin6_init_done : 1;
191 };
192 struct udp_ip6 {
193 struct ip6_hdr uip6_ip6;
194 u_char uip6_init_done : 1;
195 };
196 static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
197 static void udp_append(struct inpcb *last, struct ip *ip,
198 struct mbuf *n, int off, struct sockaddr_in *pudp_in,
199 struct udp_in6 *pudp_in6, struct udp_ip6 *pudp_ip6);
200 #else
201 static void udp_append(struct inpcb *last, struct ip *ip,
202 struct mbuf *n, int off, struct sockaddr_in *pudp_in);
203 #endif
204
205 static int udp_detach(struct socket *so);
206 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
207 struct mbuf *, struct proc *);
208 extern int ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr );
209
210 void
211 udp_init()
212 {
213 vm_size_t str_size;
214 struct inpcbinfo *pcbinfo;
215
216
217 LIST_INIT(&udb);
218 udbinfo.listhead = &udb;
219 udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask);
220 udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB,
221 &udbinfo.porthashmask);
222 #ifdef __APPLE__
223 str_size = (vm_size_t) sizeof(struct inpcb);
224 udbinfo.ipi_zone = (void *) zinit(str_size, 80000*str_size, 8192, "udpcb");
225
226 pcbinfo = &udbinfo;
227 /*
228 * allocate lock group attribute and group for udp pcb mutexes
229 */
230 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
231
232 pcbinfo->mtx_grp = lck_grp_alloc_init("udppcb", pcbinfo->mtx_grp_attr);
233
234 pcbinfo->mtx_attr = lck_attr_alloc_init();
235
236 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL)
237 return; /* pretty much dead if this fails... */
238
239 in_pcb_nat_init(&udbinfo, AF_INET, IPPROTO_UDP, SOCK_DGRAM);
240 #else
241 udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets,
242 ZONE_INTERRUPT, 0);
243 #endif
244
245 #if 0
246 /* for pcb sharing testing only */
247 stat = in_pcb_new_share_client(&udbinfo, &fake_owner);
248 kprintf("udp_init in_pcb_new_share_client - stat = %d\n", stat);
249
250 laddr.s_addr = 0x11646464;
251 faddr.s_addr = 0x11646465;
252
253 lport = 1500;
254 in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner);
255 kprintf("udp_init in_pcb_grab_port - stat = %d\n", stat);
256
257 stat = in_pcb_rem_share_client(&udbinfo, fake_owner);
258 kprintf("udp_init in_pcb_rem_share_client - stat = %d\n", stat);
259
260 stat = in_pcb_new_share_client(&udbinfo, &fake_owner);
261 kprintf("udp_init in_pcb_new_share_client(2) - stat = %d\n", stat);
262
263 laddr.s_addr = 0x11646464;
264 faddr.s_addr = 0x11646465;
265
266 lport = 1500;
267 stat = in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner);
268 kprintf("udp_init in_pcb_grab_port(2) - stat = %d\n", stat);
269 #endif
270 }
271
272 void
273 udp_input(m, iphlen)
274 register struct mbuf *m;
275 int iphlen;
276 {
277 register struct ip *ip;
278 register struct udphdr *uh;
279 register struct inpcb *inp;
280 struct mbuf *opts = 0;
281 int len;
282 struct ip save_ip;
283 struct sockaddr *append_sa;
284 struct inpcbinfo *pcbinfo = &udbinfo;
285 struct sockaddr_in udp_in = {
286 sizeof (udp_in), AF_INET, 0, { 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }
287 };
288 #if INET6
289 struct udp_in6 udp_in6 = {
290 { sizeof (udp_in6.uin6_sin), AF_INET6, 0, 0,
291 IN6ADDR_ANY_INIT, 0 },
292 0
293 };
294 struct udp_ip6 udp_ip6;
295 #endif /* INET6 */
296
297 udpstat.udps_ipackets++;
298
299 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0,0,0,0,0);
300 if (m->m_pkthdr.csum_flags & CSUM_TCP_SUM16)
301 m->m_pkthdr.csum_flags = 0; /* invalidate hwcksum for UDP */
302
303 /*
304 * Strip IP options, if any; should skip this,
305 * make available to user, and use on returned packets,
306 * but we don't yet have a way to check the checksum
307 * with options still present.
308 */
309 if (iphlen > sizeof (struct ip)) {
310 ip_stripoptions(m, (struct mbuf *)0);
311 iphlen = sizeof(struct ip);
312 }
313
314 /*
315 * Get IP and UDP header together in first mbuf.
316 */
317 ip = mtod(m, struct ip *);
318 if (m->m_len < iphlen + sizeof(struct udphdr)) {
319 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
320 udpstat.udps_hdrops++;
321 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
322 return;
323 }
324 ip = mtod(m, struct ip *);
325 }
326 uh = (struct udphdr *)((caddr_t)ip + iphlen);
327
328 /* destination port of 0 is illegal, based on RFC768. */
329 if (uh->uh_dport == 0)
330 goto bad;
331
332 KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
333 ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
334
335 /*
336 * Make mbuf data length reflect UDP length.
337 * If not enough data to reflect UDP length, drop.
338 */
339 len = ntohs((u_short)uh->uh_ulen);
340 if (ip->ip_len != len) {
341 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
342 udpstat.udps_badlen++;
343 goto bad;
344 }
345 m_adj(m, len - ip->ip_len);
346 /* ip->ip_len = len; */
347 }
348 /*
349 * Save a copy of the IP header in case we want restore it
350 * for sending an ICMP error message in response.
351 */
352 save_ip = *ip;
353
354 /*
355 * Checksum extended UDP header and data.
356 */
357 if (uh->uh_sum) {
358 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
359 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
360 uh->uh_sum = m->m_pkthdr.csum_data;
361 else
362 goto doudpcksum;
363 uh->uh_sum ^= 0xffff;
364 } else {
365 char b[9];
366 doudpcksum:
367 *(uint32_t*)&b[0] = *(uint32_t*)&((struct ipovly *)ip)->ih_x1[0];
368 *(uint32_t*)&b[4] = *(uint32_t*)&((struct ipovly *)ip)->ih_x1[4];
369 *(uint8_t*)&b[8] = *(uint8_t*)&((struct ipovly *)ip)->ih_x1[8];
370
371 bzero(((struct ipovly *)ip)->ih_x1, 9);
372 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
373 uh->uh_sum = in_cksum(m, len + sizeof (struct ip));
374
375 *(uint32_t*)&((struct ipovly *)ip)->ih_x1[0] = *(uint32_t*)&b[0];
376 *(uint32_t*)&((struct ipovly *)ip)->ih_x1[4] = *(uint32_t*)&b[4];
377 *(uint8_t*)&((struct ipovly *)ip)->ih_x1[8] = *(uint8_t*)&b[8];
378 udp_in_cksum_stats(len);
379 }
380 if (uh->uh_sum) {
381 udpstat.udps_badsum++;
382 m_freem(m);
383 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
384 return;
385 }
386 }
387 #ifndef __APPLE__
388 else
389 udpstat.udps_nosum++;
390 #endif
391
392 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
393 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
394
395 int reuse_sock = 0, mcast_delivered = 0;
396 struct mbuf *n = NULL;
397
398 lck_rw_lock_shared(pcbinfo->mtx);
399 /*
400 * Deliver a multicast or broadcast datagram to *all* sockets
401 * for which the local and remote addresses and ports match
402 * those of the incoming datagram. This allows more than
403 * one process to receive multi/broadcasts on the same port.
404 * (This really ought to be done for unicast datagrams as
405 * well, but that would cause problems with existing
406 * applications that open both address-specific sockets and
407 * a wildcard socket listening to the same port -- they would
408 * end up receiving duplicates of every unicast datagram.
409 * Those applications open the multiple sockets to overcome an
410 * inadequacy of the UDP socket interface, but for backwards
411 * compatibility we avoid the problem here rather than
412 * fixing the interface. Maybe 4.5BSD will remedy this?)
413 */
414
415
416 /*
417 * Construct sockaddr format source address.
418 */
419 udp_in.sin_port = uh->uh_sport;
420 udp_in.sin_addr = ip->ip_src;
421 /*
422 * Locate pcb(s) for datagram.
423 * (Algorithm copied from raw_intr().)
424 */
425 #if INET6
426 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
427 #endif
428 LIST_FOREACH(inp, &udb, inp_list) {
429 #ifdef __APPLE__
430 /* Ignore nat/SharedIP dummy pcbs */
431 if (inp->inp_socket == &udbinfo.nat_dummy_socket)
432 continue;
433 #endif
434 if (inp->inp_socket == NULL)
435 continue;
436 if (inp != sotoinpcb(inp->inp_socket))
437 panic("udp_input: bad so back ptr inp=%p\n", inp);
438 #if INET6
439 if ((inp->inp_vflag & INP_IPV4) == 0)
440 continue;
441 #endif
442
443 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
444 continue;
445 }
446
447 udp_lock(inp->inp_socket, 1, 0);
448
449 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
450 udp_unlock(inp->inp_socket, 1, 0);
451 continue;
452 }
453
454 if (inp->inp_lport != uh->uh_dport) {
455 udp_unlock(inp->inp_socket, 1, 0);
456 continue;
457 }
458 if (inp->inp_laddr.s_addr != INADDR_ANY) {
459 if (inp->inp_laddr.s_addr !=
460 ip->ip_dst.s_addr) {
461 udp_unlock(inp->inp_socket, 1, 0);
462 continue;
463 }
464 }
465 if (inp->inp_faddr.s_addr != INADDR_ANY) {
466 if (inp->inp_faddr.s_addr !=
467 ip->ip_src.s_addr ||
468 inp->inp_fport != uh->uh_sport) {
469 udp_unlock(inp->inp_socket, 1, 0);
470 continue;
471 }
472 }
473
474 reuse_sock = inp->inp_socket->so_options& (SO_REUSEPORT|SO_REUSEADDR);
475 {
476 #if IPSEC
477 int skipit = 0;
478 /* check AH/ESP integrity. */
479 if (ipsec_bypass == 0) {
480 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
481 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
482 /* do not inject data to pcb */
483 skipit = 1;
484 }
485 }
486 if (skipit == 0)
487 #endif /*IPSEC*/
488 {
489 if (reuse_sock)
490 n = m_copy(m, 0, M_COPYALL);
491 #if INET6
492 udp_append(inp, ip, m,
493 iphlen + sizeof(struct udphdr),
494 &udp_in, &udp_in6, &udp_ip6);
495 #else
496 udp_append(inp, ip, m,
497 iphlen + sizeof(struct udphdr),
498 &udp_in);
499 #endif /* INET6 */
500 mcast_delivered++;
501 }
502 udp_unlock(inp->inp_socket, 1, 0);
503 }
504 /*
505 * Don't look for additional matches if this one does
506 * not have either the SO_REUSEPORT or SO_REUSEADDR
507 * socket options set. This heuristic avoids searching
508 * through all pcbs in the common case of a non-shared
509 * port. It assumes that an application will never
510 * clear these options after setting them.
511 */
512 if (reuse_sock == 0 || ((m = n) == NULL))
513 break;
514 }
515 lck_rw_done(pcbinfo->mtx);
516
517 if (mcast_delivered == 0) {
518 /*
519 * No matching pcb found; discard datagram.
520 * (No need to send an ICMP Port Unreachable
521 * for a broadcast or multicast datgram.)
522 */
523 udpstat.udps_noportbcast++;
524 goto bad;
525 }
526
527 if (reuse_sock != 0) /* free the extra copy of mbuf */
528 m_freem(m);
529 return;
530 }
531
532 #if IPSEC
533 /*
534 * UDP to port 4500 with a payload where the first four bytes are
535 * not zero is a UDP encapsulated IPSec packet. Packets where
536 * the payload is one byte and that byte is 0xFF are NAT keepalive
537 * packets. Decapsulate the ESP packet and carry on with IPSec input
538 * or discard the NAT keep-alive.
539 */
540 if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 &&
541 uh->uh_dport == ntohs((u_short)esp_udp_encap_port)) {
542 int payload_len = len - sizeof(struct udphdr) > 4 ? 4 : len - sizeof(struct udphdr);
543 if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) {
544 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) + payload_len)) == 0) {
545 udpstat.udps_hdrops++;
546 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
547 return;
548 }
549 ip = mtod(m, struct ip *);
550 uh = (struct udphdr *)((caddr_t)ip + iphlen);
551 }
552 /* Check for NAT keepalive packet */
553 if (payload_len == 1 && *(u_int8_t*)((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
554 m_freem(m);
555 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
556 return;
557 }
558 else if (payload_len == 4 && *(u_int32_t*)((caddr_t)uh + sizeof(struct udphdr)) != 0) {
559 /* UDP encapsulated IPSec packet to pass through NAT */
560 size_t stripsiz;
561
562 stripsiz = sizeof(struct udphdr);
563
564 ip = mtod(m, struct ip *);
565 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
566 m->m_data += stripsiz;
567 m->m_len -= stripsiz;
568 m->m_pkthdr.len -= stripsiz;
569 ip = mtod(m, struct ip *);
570 ip->ip_len = ip->ip_len - stripsiz;
571 ip->ip_p = IPPROTO_ESP;
572
573 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
574 esp4_input(m, iphlen);
575 return;
576 }
577 }
578 #endif
579
580 /*
581 * Locate pcb for datagram.
582 */
583 inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
584 ip->ip_dst, uh->uh_dport, 1, m->m_pkthdr.rcvif);
585 if (inp == NULL) {
586 if (log_in_vain) {
587 char buf[MAX_IPv4_STR_LEN];
588 char buf2[MAX_IPv4_STR_LEN];
589
590 /* check src and dst address */
591 if (log_in_vain != 3)
592 log(LOG_INFO,
593 "Connection attempt to UDP %s:%d from %s:%d\n",
594 inet_ntop(AF_INET, &ip->ip_dst, buf, sizeof(buf)),
595 ntohs(uh->uh_dport),
596 inet_ntop(AF_INET, &ip->ip_src, buf2, sizeof(buf2)),
597 ntohs(uh->uh_sport));
598 else if (!(m->m_flags & (M_BCAST | M_MCAST)) &&
599 ip->ip_dst.s_addr != ip->ip_src.s_addr)
600 log_in_vain_log((LOG_INFO,
601 "Stealth Mode connection attempt to UDP %s:%d from %s:%d\n",
602 inet_ntop(AF_INET, &ip->ip_dst, buf, sizeof(buf)),
603 ntohs(uh->uh_dport),
604 inet_ntop(AF_INET, &ip->ip_src, buf2, sizeof(buf2)),
605 ntohs(uh->uh_sport)))
606 }
607 udpstat.udps_noport++;
608 if (m->m_flags & (M_BCAST | M_MCAST)) {
609 udpstat.udps_noportbcast++;
610 goto bad;
611 }
612 #if ICMP_BANDLIM
613 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
614 goto bad;
615 #endif
616 if (blackhole)
617 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP)
618 goto bad;
619 *ip = save_ip;
620 ip->ip_len += iphlen;
621 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
622 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
623 return;
624 }
625 udp_lock(inp->inp_socket, 1, 0);
626
627 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
628 udp_unlock(inp->inp_socket, 1, 0);
629 goto bad;
630 }
631 #if IPSEC
632 if (ipsec_bypass == 0 && inp != NULL) {
633 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
634 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
635 udp_unlock(inp->inp_socket, 1, 0);
636 goto bad;
637 }
638 }
639 #endif /*IPSEC*/
640
641 /*
642 * Construct sockaddr format source address.
643 * Stuff source address and datagram in user buffer.
644 */
645 udp_in.sin_port = uh->uh_sport;
646 udp_in.sin_addr = ip->ip_src;
647 if (inp->inp_flags & INP_CONTROLOPTS
648 || inp->inp_socket->so_options & SO_TIMESTAMP) {
649 #if INET6
650 if (inp->inp_vflag & INP_IPV6) {
651 int savedflags;
652
653 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
654 savedflags = inp->inp_flags;
655 inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
656 ip6_savecontrol(inp, &opts, &udp_ip6.uip6_ip6, m);
657 inp->inp_flags = savedflags;
658 } else
659 #endif
660 ip_savecontrol(inp, &opts, ip, m);
661 }
662 m_adj(m, iphlen + sizeof(struct udphdr));
663
664 KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport,
665 save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen);
666
667 #if INET6
668 if (inp->inp_vflag & INP_IPV6) {
669 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
670 append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
671 } else
672 #endif
673 append_sa = (struct sockaddr *)&udp_in;
674 if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa, m, opts, NULL) == 0) {
675 udpstat.udps_fullsock++;
676 }
677 else {
678 sorwakeup(inp->inp_socket);
679 }
680 udp_unlock(inp->inp_socket, 1, 0);
681 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
682 return;
683 bad:
684 m_freem(m);
685 if (opts)
686 m_freem(opts);
687 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
688 return;
689 }
690
691 #if INET6
692 static void
693 ip_2_ip6_hdr(ip6, ip)
694 struct ip6_hdr *ip6;
695 struct ip *ip;
696 {
697 bzero(ip6, sizeof(*ip6));
698
699 ip6->ip6_vfc = IPV6_VERSION;
700 ip6->ip6_plen = ip->ip_len;
701 ip6->ip6_nxt = ip->ip_p;
702 ip6->ip6_hlim = ip->ip_ttl;
703 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
704 IPV6_ADDR_INT32_SMP;
705 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
706 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
707 }
708 #endif
709
710 /*
711 * subroutine of udp_input(), mainly for source code readability.
712 */
713 static void
714 #if INET6
715 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
716 struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6,
717 struct udp_ip6 *pudp_ip6)
718 #else
719 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
720 struct sockaddr_in *pudp_in)
721 #endif
722 {
723 struct sockaddr *append_sa;
724 struct mbuf *opts = 0;
725
726 #if CONFIG_MACF_NET
727 if (mac_inpcb_check_deliver(last, n, AF_INET, SOCK_DGRAM) != 0) {
728 m_freem(n);
729 return;
730 }
731 #endif
732 if (last->inp_flags & INP_CONTROLOPTS ||
733 last->inp_socket->so_options & SO_TIMESTAMP) {
734 #if INET6
735 if (last->inp_vflag & INP_IPV6) {
736 int savedflags;
737
738 if (pudp_ip6->uip6_init_done == 0) {
739 ip_2_ip6_hdr(&pudp_ip6->uip6_ip6, ip);
740 pudp_ip6->uip6_init_done = 1;
741 }
742 savedflags = last->inp_flags;
743 last->inp_flags &= ~INP_UNMAPPABLEOPTS;
744 ip6_savecontrol(last, &opts, &pudp_ip6->uip6_ip6, n);
745 last->inp_flags = savedflags;
746 } else
747 #endif
748 ip_savecontrol(last, &opts, ip, n);
749 }
750 #if INET6
751 if (last->inp_vflag & INP_IPV6) {
752 if (pudp_in6->uin6_init_done == 0) {
753 in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin);
754 pudp_in6->uin6_init_done = 1;
755 }
756 append_sa = (struct sockaddr *)&pudp_in6->uin6_sin;
757 } else
758 #endif
759 append_sa = (struct sockaddr *)pudp_in;
760 m_adj(n, off);
761 if (sbappendaddr(&last->inp_socket->so_rcv, append_sa, n, opts, NULL) == 0) {
762 udpstat.udps_fullsock++;
763 } else
764 sorwakeup(last->inp_socket);
765 }
766
767 /*
768 * Notify a udp user of an asynchronous error;
769 * just wake up so that he can collect error status.
770 */
771 void
772 udp_notify(inp, errno)
773 register struct inpcb *inp;
774 int errno;
775 {
776 inp->inp_socket->so_error = errno;
777 sorwakeup(inp->inp_socket);
778 sowwakeup(inp->inp_socket);
779 }
780
781 void
782 udp_ctlinput(cmd, sa, vip)
783 int cmd;
784 struct sockaddr *sa;
785 void *vip;
786 {
787 struct ip *ip = vip;
788 struct udphdr *uh;
789 void (*notify)(struct inpcb *, int) = udp_notify;
790 struct in_addr faddr;
791 struct inpcb *inp;
792
793 faddr = ((struct sockaddr_in *)sa)->sin_addr;
794 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
795 return;
796
797 if (PRC_IS_REDIRECT(cmd)) {
798 ip = 0;
799 notify = in_rtchange;
800 } else if (cmd == PRC_HOSTDEAD)
801 ip = 0;
802 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
803 return;
804 if (ip) {
805 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
806 inp = in_pcblookup_hash(&udbinfo, faddr, uh->uh_dport,
807 ip->ip_src, uh->uh_sport, 0, NULL);
808 if (inp != NULL && inp->inp_socket != NULL) {
809 udp_lock(inp->inp_socket, 1, 0);
810 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
811 udp_unlock(inp->inp_socket, 1, 0);
812 return;
813 }
814 (*notify)(inp, inetctlerrmap[cmd]);
815 udp_unlock(inp->inp_socket, 1, 0);
816 }
817 } else
818 in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
819 }
820
821 int
822 udp_ctloutput(struct socket *so, struct sockopt *sopt)
823 {
824 int error, optval;
825 struct inpcb *inp;
826
827 if (sopt->sopt_level != IPPROTO_UDP)
828 return (ip_ctloutput(so, sopt));
829
830 error = 0;
831 inp = sotoinpcb(so);
832
833 switch (sopt->sopt_dir) {
834 case SOPT_SET:
835 switch (sopt->sopt_name) {
836 case UDP_NOCKSUM:
837 /* This option is settable only for UDP over IPv4 */
838 if (!(inp->inp_vflag & INP_IPV4)) {
839 error = EINVAL;
840 break;
841 }
842
843 if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
844 sizeof (optval))) != 0)
845 break;
846
847 if (optval != 0)
848 inp->inp_flags |= INP_UDP_NOCKSUM;
849 else
850 inp->inp_flags &= ~INP_UDP_NOCKSUM;
851 break;
852
853 default:
854 error = ENOPROTOOPT;
855 break;
856 }
857 break;
858
859 case SOPT_GET:
860 switch (sopt->sopt_name) {
861 case UDP_NOCKSUM:
862 optval = inp->inp_flags & INP_UDP_NOCKSUM;
863 break;
864
865 default:
866 error = ENOPROTOOPT;
867 break;
868 }
869 if (error == 0)
870 error = sooptcopyout(sopt, &optval, sizeof (optval));
871 break;
872 }
873 return (error);
874 }
875
876 static int
877 udp_pcblist SYSCTL_HANDLER_ARGS
878 {
879 #pragma unused(oidp, arg1, arg2)
880 int error, i, n;
881 struct inpcb *inp, **inp_list;
882 inp_gen_t gencnt;
883 struct xinpgen xig;
884
885 /*
886 * The process of preparing the TCB list is too time-consuming and
887 * resource-intensive to repeat twice on every request.
888 */
889 lck_rw_lock_exclusive(udbinfo.mtx);
890 if (req->oldptr == USER_ADDR_NULL) {
891 n = udbinfo.ipi_count;
892 req->oldidx = 2 * (sizeof xig)
893 + (n + n/8) * sizeof(struct xinpcb);
894 lck_rw_done(udbinfo.mtx);
895 return 0;
896 }
897
898 if (req->newptr != USER_ADDR_NULL) {
899 lck_rw_done(udbinfo.mtx);
900 return EPERM;
901 }
902
903 /*
904 * OK, now we're committed to doing something.
905 */
906 gencnt = udbinfo.ipi_gencnt;
907 n = udbinfo.ipi_count;
908
909 bzero(&xig, sizeof(xig));
910 xig.xig_len = sizeof xig;
911 xig.xig_count = n;
912 xig.xig_gen = gencnt;
913 xig.xig_sogen = so_gencnt;
914 error = SYSCTL_OUT(req, &xig, sizeof xig);
915 if (error) {
916 lck_rw_done(udbinfo.mtx);
917 return error;
918 }
919 /*
920 * We are done if there is no pcb
921 */
922 if (n == 0) {
923 lck_rw_done(udbinfo.mtx);
924 return 0;
925 }
926
927 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
928 if (inp_list == 0) {
929 lck_rw_done(udbinfo.mtx);
930 return ENOMEM;
931 }
932
933 for (inp = LIST_FIRST(udbinfo.listhead), i = 0; inp && i < n;
934 inp = LIST_NEXT(inp, inp_list)) {
935 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
936 inp_list[i++] = inp;
937 }
938 n = i;
939
940 error = 0;
941 for (i = 0; i < n; i++) {
942 inp = inp_list[i];
943 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
944 struct xinpcb xi;
945
946 bzero(&xi, sizeof(xi));
947 xi.xi_len = sizeof xi;
948 /* XXX should avoid extra copy */
949 inpcb_to_compat(inp, &xi.xi_inp);
950 if (inp->inp_socket)
951 sotoxsocket(inp->inp_socket, &xi.xi_socket);
952 error = SYSCTL_OUT(req, &xi, sizeof xi);
953 }
954 }
955 if (!error) {
956 /*
957 * Give the user an updated idea of our state.
958 * If the generation differs from what we told
959 * her before, she knows that something happened
960 * while we were processing this request, and it
961 * might be necessary to retry.
962 */
963 bzero(&xig, sizeof(xig));
964 xig.xig_len = sizeof xig;
965 xig.xig_gen = udbinfo.ipi_gencnt;
966 xig.xig_sogen = so_gencnt;
967 xig.xig_count = udbinfo.ipi_count;
968 error = SYSCTL_OUT(req, &xig, sizeof xig);
969 }
970 FREE(inp_list, M_TEMP);
971 lck_rw_done(udbinfo.mtx);
972 return error;
973 }
974
975 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
976 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
977
978
979
980 static __inline__ u_int16_t
981 get_socket_id(struct socket * s)
982 {
983 u_int16_t val;
984
985 if (s == NULL) {
986 return (0);
987 }
988 val = (u_int16_t)(((u_int32_t)s) / sizeof(struct socket));
989 if (val == 0) {
990 val = 0xffff;
991 }
992 return (val);
993 }
994
995 static int
996 udp_output(inp, m, addr, control, p)
997 register struct inpcb *inp;
998 struct mbuf *m;
999 struct sockaddr *addr;
1000 struct mbuf *control;
1001 struct proc *p;
1002 {
1003 register struct udpiphdr *ui;
1004 register int len = m->m_pkthdr.len;
1005 struct sockaddr_in *sin;
1006 struct in_addr origladdr, laddr, faddr;
1007 u_short lport, fport;
1008 struct sockaddr_in *ifaddr;
1009 int error = 0, udp_dodisconnect = 0;
1010 struct socket *so = inp->inp_socket;
1011 int soopts = 0;
1012 struct mbuf *inpopts;
1013 struct ip_moptions *mopts;
1014 struct route ro;
1015 struct ip_out_args ipoa;
1016
1017 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0);
1018
1019 if (control)
1020 m_freem(control); /* XXX */
1021
1022 KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
1023 inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
1024 (htons((u_short)len + sizeof (struct udphdr))));
1025
1026 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1027 error = EMSGSIZE;
1028 goto release;
1029 }
1030
1031 lck_mtx_assert(inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1032
1033 /* If socket was bound to an ifindex, tell ip_output about it */
1034 ipoa.ipoa_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
1035 inp->inp_boundif : IFSCOPE_NONE;
1036 soopts |= IP_OUTARGS;
1037
1038 /* If there was a routing change, discard cached route and check
1039 * that we have a valid source address.
1040 * Reacquire a new source address if INADDR_ANY was specified
1041 */
1042 if (inp->inp_route.ro_rt && inp->inp_route.ro_rt->generation_id != route_generation) {
1043 if (ifa_foraddr(inp->inp_laddr.s_addr) == 0) { /* src address is gone */
1044 if (inp->inp_flags & INP_INADDR_ANY)
1045 inp->inp_laddr.s_addr = INADDR_ANY; /* new src will be set later */
1046 else {
1047 error = EADDRNOTAVAIL;
1048 goto release;
1049 }
1050 }
1051 rtfree(inp->inp_route.ro_rt);
1052 inp->inp_route.ro_rt = (struct rtentry *)0;
1053 }
1054
1055 origladdr= laddr = inp->inp_laddr;
1056 faddr = inp->inp_faddr;
1057 lport = inp->inp_lport;
1058 fport = inp->inp_fport;
1059
1060 if (addr) {
1061 sin = (struct sockaddr_in *)addr;
1062 if (faddr.s_addr != INADDR_ANY) {
1063 error = EISCONN;
1064 goto release;
1065 }
1066 if (lport == 0) {
1067 /*
1068 * In case we don't have a local port set, go through the full connect.
1069 * We don't have a local port yet (ie, we can't be looked up),
1070 * so it's not an issue if the input runs at the same time we do this.
1071 */
1072 error = in_pcbconnect(inp, addr, p);
1073 if (error) {
1074 goto release;
1075 }
1076 laddr = inp->inp_laddr;
1077 lport = inp->inp_lport;
1078 faddr = inp->inp_faddr;
1079 fport = inp->inp_fport;
1080 udp_dodisconnect = 1;
1081 }
1082 else {
1083 /* Fast path case
1084 * we have a full address and a local port.
1085 * use those info to build the packet without changing the pcb
1086 * and interfering with the input path. See 3851370
1087 */
1088 if (laddr.s_addr == INADDR_ANY) {
1089 if ((error = in_pcbladdr(inp, addr, &ifaddr)) != 0)
1090 goto release;
1091 laddr = ifaddr->sin_addr;
1092 inp->inp_flags |= INP_INADDR_ANY; /* from pcbconnect: remember we don't care about src addr.*/
1093 }
1094
1095 faddr = sin->sin_addr;
1096 fport = sin->sin_port;
1097 }
1098 } else {
1099 if (faddr.s_addr == INADDR_ANY) {
1100 error = ENOTCONN;
1101 goto release;
1102 }
1103 }
1104
1105 #if CONFIG_MACF_NET
1106 mac_mbuf_label_associate_inpcb(inp, m);
1107 #endif
1108
1109 #if CONFIG_IP_EDGEHOLE
1110 ip_edgehole_mbuf_tag(inp, m);
1111 #endif
1112
1113 /*
1114 * Calculate data length and get a mbuf
1115 * for UDP and IP headers.
1116 */
1117 M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT);
1118 if (m == 0) {
1119 error = ENOBUFS;
1120 goto abort;
1121 }
1122
1123 /*
1124 * Fill in mbuf with extended UDP header
1125 * and addresses and length put into network format.
1126 */
1127 ui = mtod(m, struct udpiphdr *);
1128 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1129 ui->ui_pr = IPPROTO_UDP;
1130 ui->ui_src = laddr;
1131 ui->ui_dst = faddr;
1132 ui->ui_sport = lport;
1133 ui->ui_dport = fport;
1134 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1135
1136 /*
1137 * Set up checksum and output datagram.
1138 */
1139 if (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM)) {
1140 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
1141 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1142 m->m_pkthdr.csum_flags = CSUM_UDP;
1143 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1144 } else {
1145 ui->ui_sum = 0;
1146 }
1147 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1148 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1149 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1150 udpstat.udps_opackets++;
1151
1152 KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport,
1153 ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen);
1154
1155 #if IPSEC
1156 if (ipsec_bypass == 0 && ipsec_setsocket(m, inp->inp_socket) != 0) {
1157 error = ENOBUFS;
1158 goto abort;
1159 }
1160 #endif /*IPSEC*/
1161 m->m_pkthdr.socket_id = get_socket_id(inp->inp_socket);
1162
1163 inpopts = inp->inp_options;
1164 soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST));
1165 mopts = inp->inp_moptions;
1166
1167 /* We don't want to cache the route for non-connected UDP */
1168 if (udp_dodisconnect) {
1169 bcopy(&inp->inp_route, &ro, sizeof (ro));
1170 ro.ro_rt = NULL;
1171 }
1172
1173 socket_unlock(so, 0);
1174 /* XXX jgraessley please look at XXX */
1175 error = ip_output_list(m, 0, inpopts,
1176 udp_dodisconnect ? &ro : &inp->inp_route, soopts, mopts, &ipoa);
1177 socket_lock(so, 0);
1178
1179 if (udp_dodisconnect) {
1180 /* Discard the cached route, if there is one */
1181 if (ro.ro_rt != NULL) {
1182 rtfree(ro.ro_rt);
1183 ro.ro_rt = NULL;
1184 }
1185 in_pcbdisconnect(inp);
1186 inp->inp_laddr = origladdr; /* XXX rehash? */
1187 }
1188 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
1189 return (error);
1190
1191 abort:
1192 if (udp_dodisconnect) {
1193 in_pcbdisconnect(inp);
1194 inp->inp_laddr = origladdr; /* XXX rehash? */
1195 }
1196
1197 release:
1198 m_freem(m);
1199 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
1200 return (error);
1201 }
1202
1203 u_long udp_sendspace = 9216; /* really max datagram size */
1204 /* 40 1K datagrams */
1205 SYSCTL_INT(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
1206 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
1207
1208 u_long udp_recvspace = 40 * (1024 +
1209 #if INET6
1210 sizeof(struct sockaddr_in6)
1211 #else
1212 sizeof(struct sockaddr_in)
1213 #endif
1214 );
1215 SYSCTL_INT(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
1216 &udp_recvspace, 0, "Maximum incoming UDP datagram size");
1217
1218 static int
1219 udp_abort(struct socket *so)
1220 {
1221 struct inpcb *inp;
1222
1223 inp = sotoinpcb(so);
1224 if (inp == 0)
1225 panic("udp_abort: so=%p null inp\n", so); /* ??? possible? panic instead? */
1226 soisdisconnected(so);
1227 in_pcbdetach(inp);
1228 return 0;
1229 }
1230
1231 static int
1232 udp_attach(struct socket *so, __unused int proto, struct proc *p)
1233 {
1234 struct inpcb *inp;
1235 int error;
1236
1237 inp = sotoinpcb(so);
1238 if (inp != 0)
1239 panic ("udp_attach so=%p inp=%p\n", so, inp);
1240
1241 error = in_pcballoc(so, &udbinfo, p);
1242 if (error)
1243 return error;
1244 error = soreserve(so, udp_sendspace, udp_recvspace);
1245 if (error)
1246 return error;
1247 inp = (struct inpcb *)so->so_pcb;
1248 inp->inp_vflag |= INP_IPV4;
1249 inp->inp_ip_ttl = ip_defttl;
1250 return 0;
1251 }
1252
1253 static int
1254 udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
1255 {
1256 struct inpcb *inp;
1257 int error;
1258
1259 if (nam->sa_family != 0 && nam->sa_family != AF_INET
1260 && nam->sa_family != AF_INET6) {
1261 return EAFNOSUPPORT;
1262 }
1263 inp = sotoinpcb(so);
1264 if (inp == 0)
1265 return EINVAL;
1266 error = in_pcbbind(inp, nam, p);
1267 return error;
1268 }
1269
1270 static int
1271 udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
1272 {
1273 struct inpcb *inp;
1274 int error;
1275
1276 inp = sotoinpcb(so);
1277 if (inp == 0)
1278 return EINVAL;
1279 if (inp->inp_faddr.s_addr != INADDR_ANY)
1280 return EISCONN;
1281 error = in_pcbconnect(inp, nam, p);
1282 if (error == 0)
1283 soisconnected(so);
1284 return error;
1285 }
1286
1287 static int
1288 udp_detach(struct socket *so)
1289 {
1290 struct inpcb *inp;
1291
1292 inp = sotoinpcb(so);
1293 if (inp == 0)
1294 panic("udp_detach: so=%p null inp\n", so); /* ??? possible? panic instead? */
1295 in_pcbdetach(inp);
1296 inp->inp_state = INPCB_STATE_DEAD;
1297 return 0;
1298 }
1299
1300 static int
1301 udp_disconnect(struct socket *so)
1302 {
1303 struct inpcb *inp;
1304
1305 inp = sotoinpcb(so);
1306 if (inp == 0)
1307 return EINVAL;
1308 if (inp->inp_faddr.s_addr == INADDR_ANY)
1309 return ENOTCONN;
1310
1311 in_pcbdisconnect(inp);
1312 inp->inp_laddr.s_addr = INADDR_ANY;
1313 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1314 return 0;
1315 }
1316
1317 static int
1318 udp_send(struct socket *so, __unused int flags, struct mbuf *m, struct sockaddr *addr,
1319 struct mbuf *control, struct proc *p)
1320 {
1321 struct inpcb *inp;
1322
1323 inp = sotoinpcb(so);
1324 if (inp == 0) {
1325 m_freem(m);
1326 return EINVAL;
1327 }
1328
1329 return udp_output(inp, m, addr, control, p);
1330 }
1331
1332 int
1333 udp_shutdown(struct socket *so)
1334 {
1335 struct inpcb *inp;
1336
1337 inp = sotoinpcb(so);
1338 if (inp == 0)
1339 return EINVAL;
1340 socantsendmore(so);
1341 return 0;
1342 }
1343
1344 struct pr_usrreqs udp_usrreqs = {
1345 udp_abort, pru_accept_notsupp, udp_attach, udp_bind, udp_connect,
1346 pru_connect2_notsupp, in_control, udp_detach, udp_disconnect,
1347 pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp,
1348 pru_rcvoob_notsupp, udp_send, pru_sense_null, udp_shutdown,
1349 in_setsockaddr, sosend, soreceive, pru_sopoll_notsupp
1350 };
1351
1352
1353 int
1354 udp_lock(struct socket *so, int refcount, int debug)
1355 {
1356 int lr_saved;
1357 if (debug == 0)
1358 lr_saved = (unsigned int) __builtin_return_address(0);
1359 else lr_saved = debug;
1360
1361 if (so->so_pcb) {
1362 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_NOTOWNED);
1363 lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1364 }
1365 else
1366 panic("udp_lock: so=%p NO PCB! lr=%x\n", so, lr_saved);
1367
1368 if (refcount)
1369 so->so_usecount++;
1370
1371 so->lock_lr[so->next_lock_lr] = (u_int32_t)lr_saved;
1372 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
1373 return (0);
1374 }
1375
1376 int
1377 udp_unlock(struct socket *so, int refcount, int debug)
1378 {
1379 int lr_saved;
1380
1381 if (debug == 0)
1382 lr_saved = (unsigned int) __builtin_return_address(0);
1383 else lr_saved = debug;
1384
1385 if (refcount) {
1386 so->so_usecount--;
1387 #if 0
1388 {
1389 struct inpcb *inp = sotoinpcb(so);
1390 struct inpcbinfo *pcbinfo = &udbinfo;
1391
1392 if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
1393
1394 if (lck_rw_try_lock_exclusive(pcbinfo->mtx)) {
1395 in_pcbdispose(inp);
1396 lck_rw_done(pcbinfo->mtx);
1397 return(0);
1398 }
1399 }
1400 }
1401 #endif
1402 }
1403 if (so->so_pcb == NULL)
1404 panic("udp_unlock: so=%p NO PCB! lr=%x\n", so, lr_saved);
1405 else {
1406 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1407 so->unlock_lr[so->next_unlock_lr] = (u_int32_t)lr_saved;
1408 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1409 lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1410 }
1411
1412
1413 return (0);
1414 }
1415
1416 lck_mtx_t *
1417 udp_getlock(struct socket *so, __unused int locktype)
1418 {
1419 struct inpcb *inp = sotoinpcb(so);
1420
1421
1422 if (so->so_pcb)
1423 return(inp->inpcb_mtx);
1424 else {
1425 panic("udp_getlock: so=%p NULL so_pcb\n", so);
1426 return (so->so_proto->pr_domain->dom_mtx);
1427 }
1428 }
1429
1430 void
1431 udp_slowtimo()
1432 {
1433 struct inpcb *inp, *inpnxt;
1434 struct socket *so;
1435 struct inpcbinfo *pcbinfo = &udbinfo;
1436
1437 if (lck_rw_try_lock_exclusive(pcbinfo->mtx) == FALSE) {
1438 if (udp_gc_done == TRUE) {
1439 udp_gc_done = FALSE;
1440 return; /* couldn't get the lock, better lock next time */
1441 }
1442 lck_rw_lock_exclusive(pcbinfo->mtx);
1443 }
1444
1445 udp_gc_done = TRUE;
1446
1447 for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
1448 inpnxt = inp->inp_list.le_next;
1449
1450 /* Ignore nat/SharedIP dummy pcbs */
1451 if (inp->inp_socket == &udbinfo.nat_dummy_socket)
1452 continue;
1453
1454 if (inp->inp_wantcnt != WNT_STOPUSING)
1455 continue;
1456
1457 so = inp->inp_socket;
1458 if (!lck_mtx_try_lock(inp->inpcb_mtx)) /* skip if busy, no hurry for cleanup... */
1459 continue;
1460
1461 if (so->so_usecount == 0)
1462 in_pcbdispose(inp);
1463 else
1464 lck_mtx_unlock(inp->inpcb_mtx);
1465 }
1466 lck_rw_done(pcbinfo->mtx);
1467 }
1468
1469 int
1470 ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr )
1471 {
1472 if ( dstaddr == srcaddr ){
1473 return 0;
1474 }
1475 return 1;
1476 }
1477
1478 void
1479 udp_in_cksum_stats(u_int32_t len)
1480 {
1481 udps_in_sw_cksum++;
1482 udps_in_sw_cksum_bytes += len;
1483 }
1484
1485 void
1486 udp_out_cksum_stats(u_int32_t len)
1487 {
1488 udps_out_sw_cksum++;
1489 udps_out_sw_cksum_bytes += len;
1490 }