]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/udp_usrreq.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / bsd / netinet / udp_usrreq.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
61 * $FreeBSD: src/sys/netinet/udp_usrreq.c,v 1.64.2.13 2001/08/08 18:59:54 ghelmer Exp $
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/domain.h>
70 #include <sys/protosw.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/sysctl.h>
74 #include <sys/syslog.h>
75
76 #include <net/if.h>
77 #include <net/if_types.h>
78 #include <net/route.h>
79
80 #include <netinet/in.h>
81 #include <netinet/in_systm.h>
82 #include <netinet/ip.h>
83 #if INET6
84 #include <netinet/ip6.h>
85 #endif
86 #include <netinet/in_pcb.h>
87 #include <netinet/in_var.h>
88 #include <netinet/ip_var.h>
89 #if INET6
90 #include <netinet6/ip6_var.h>
91 #endif
92 #include <netinet/ip_icmp.h>
93 #include <netinet/icmp_var.h>
94 #include <netinet/udp.h>
95 #include <netinet/udp_var.h>
96 #include <sys/kdebug.h>
97
98 #if IPSEC
99 #include <netinet6/ipsec.h>
100 extern int ipsec_bypass;
101 extern lck_mtx_t *sadb_mutex;
102 #endif /*IPSEC*/
103
104
105 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0)
106 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2)
107 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1)
108 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3)
109 #define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8))
110 #define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1)
111
112 /*
113 * UDP protocol implementation.
114 * Per RFC 768, August, 1980.
115 */
116 #ifndef COMPAT_42
117 static int udpcksum = 1;
118 #else
119 static int udpcksum = 0; /* XXX */
120 #endif
121 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
122 &udpcksum, 0, "");
123
124 int log_in_vain = 0;
125 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
126 &log_in_vain, 0, "Log all incoming UDP packets");
127
128 static int blackhole = 0;
129 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
130 &blackhole, 0, "Do not send port unreachables for refused connects");
131
132 struct inpcbhead udb; /* from udp_var.h */
133 #define udb6 udb /* for KAME src sync over BSD*'s */
134 struct inpcbinfo udbinfo;
135
136 #ifndef UDBHASHSIZE
137 #define UDBHASHSIZE 16
138 #endif
139
140 extern int apple_hwcksum_rx;
141 extern int esp_udp_encap_port;
142 extern u_long route_generation;
143
144 extern void ipfwsyslog( int level, char *format,...);
145
146 extern int fw_verbose;
147
148 #define log_in_vain_log( a ) { \
149 if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \
150 ipfwsyslog a ; \
151 } \
152 else log a ; \
153 }
154
155 struct udpstat udpstat; /* from udp_var.h */
156 SYSCTL_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RD,
157 &udpstat, udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
158 SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount, CTLFLAG_RD,
159 &udbinfo.ipi_count, 0, "Number of active PCBs");
160
161 static struct sockaddr_in udp_in = { sizeof(udp_in), AF_INET };
162 #if INET6
163 struct udp_in6 {
164 struct sockaddr_in6 uin6_sin;
165 u_char uin6_init_done : 1;
166 } udp_in6 = {
167 { sizeof(udp_in6.uin6_sin), AF_INET6 },
168 0
169 };
170 struct udp_ip6 {
171 struct ip6_hdr uip6_ip6;
172 u_char uip6_init_done : 1;
173 } udp_ip6;
174 #endif /* INET6 */
175
176 static void udp_append(struct inpcb *last, struct ip *ip,
177 struct mbuf *n, int off);
178 #if INET6
179 static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
180 #endif
181
182 static int udp_detach(struct socket *so);
183 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
184 struct mbuf *, struct proc *);
185 extern int ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr );
186
187 void
188 udp_init()
189 {
190 vm_size_t str_size;
191 struct inpcbinfo *pcbinfo;
192
193
194 LIST_INIT(&udb);
195 udbinfo.listhead = &udb;
196 udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask);
197 udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB,
198 &udbinfo.porthashmask);
199 #ifdef __APPLE__
200 str_size = (vm_size_t) sizeof(struct inpcb);
201 udbinfo.ipi_zone = (void *) zinit(str_size, 80000*str_size, 8192, "udpcb");
202
203 pcbinfo = &udbinfo;
204 /*
205 * allocate lock group attribute and group for udp pcb mutexes
206 */
207 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
208 lck_grp_attr_setdefault(pcbinfo->mtx_grp_attr);
209
210 pcbinfo->mtx_grp = lck_grp_alloc_init("udppcb", pcbinfo->mtx_grp_attr);
211
212 pcbinfo->mtx_attr = lck_attr_alloc_init();
213 lck_attr_setdefault(pcbinfo->mtx_attr);
214
215 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL)
216 return; /* pretty much dead if this fails... */
217
218 in_pcb_nat_init(&udbinfo, AF_INET, IPPROTO_UDP, SOCK_DGRAM);
219 #else
220 udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets,
221 ZONE_INTERRUPT, 0);
222 #endif
223
224 #if 0
225 /* for pcb sharing testing only */
226 stat = in_pcb_new_share_client(&udbinfo, &fake_owner);
227 kprintf("udp_init in_pcb_new_share_client - stat = %d\n", stat);
228
229 laddr.s_addr = 0x11646464;
230 faddr.s_addr = 0x11646465;
231
232 lport = 1500;
233 in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner);
234 kprintf("udp_init in_pcb_grab_port - stat = %d\n", stat);
235
236 stat = in_pcb_rem_share_client(&udbinfo, fake_owner);
237 kprintf("udp_init in_pcb_rem_share_client - stat = %d\n", stat);
238
239 stat = in_pcb_new_share_client(&udbinfo, &fake_owner);
240 kprintf("udp_init in_pcb_new_share_client(2) - stat = %d\n", stat);
241
242 laddr.s_addr = 0x11646464;
243 faddr.s_addr = 0x11646465;
244
245 lport = 1500;
246 stat = in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner);
247 kprintf("udp_init in_pcb_grab_port(2) - stat = %d\n", stat);
248 #endif
249 }
250
251 void
252 udp_input(m, iphlen)
253 register struct mbuf *m;
254 int iphlen;
255 {
256 register struct ip *ip;
257 register struct udphdr *uh;
258 register struct inpcb *inp;
259 struct mbuf *opts = 0;
260 int len;
261 struct ip save_ip;
262 struct sockaddr *append_sa;
263 struct inpcbinfo *pcbinfo = &udbinfo;
264
265 udpstat.udps_ipackets++;
266
267 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0,0,0,0,0);
268 if (m->m_pkthdr.csum_flags & CSUM_TCP_SUM16)
269 m->m_pkthdr.csum_flags = 0; /* invalidate hwcksum for UDP */
270
271 /*
272 * Strip IP options, if any; should skip this,
273 * make available to user, and use on returned packets,
274 * but we don't yet have a way to check the checksum
275 * with options still present.
276 */
277 if (iphlen > sizeof (struct ip)) {
278 ip_stripoptions(m, (struct mbuf *)0);
279 iphlen = sizeof(struct ip);
280 }
281
282 /*
283 * Get IP and UDP header together in first mbuf.
284 */
285 ip = mtod(m, struct ip *);
286 if (m->m_len < iphlen + sizeof(struct udphdr)) {
287 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
288 udpstat.udps_hdrops++;
289 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
290 return;
291 }
292 ip = mtod(m, struct ip *);
293 }
294 uh = (struct udphdr *)((caddr_t)ip + iphlen);
295
296 /* destination port of 0 is illegal, based on RFC768. */
297 if (uh->uh_dport == 0)
298 goto bad;
299
300 KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
301 ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
302
303 /*
304 * Make mbuf data length reflect UDP length.
305 * If not enough data to reflect UDP length, drop.
306 */
307 len = ntohs((u_short)uh->uh_ulen);
308 if (ip->ip_len != len) {
309 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
310 udpstat.udps_badlen++;
311 goto bad;
312 }
313 m_adj(m, len - ip->ip_len);
314 /* ip->ip_len = len; */
315 }
316 /*
317 * Save a copy of the IP header in case we want restore it
318 * for sending an ICMP error message in response.
319 */
320 save_ip = *ip;
321
322 /*
323 * Checksum extended UDP header and data.
324 */
325 if (uh->uh_sum) {
326 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
327 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
328 uh->uh_sum = m->m_pkthdr.csum_data;
329 else
330 goto doudpcksum;
331 uh->uh_sum ^= 0xffff;
332 } else {
333 char b[9];
334 doudpcksum:
335 *(uint32_t*)&b[0] = *(uint32_t*)&((struct ipovly *)ip)->ih_x1[0];
336 *(uint32_t*)&b[4] = *(uint32_t*)&((struct ipovly *)ip)->ih_x1[4];
337 *(uint8_t*)&b[8] = *(uint8_t*)&((struct ipovly *)ip)->ih_x1[8];
338
339 bzero(((struct ipovly *)ip)->ih_x1, 9);
340 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
341 uh->uh_sum = in_cksum(m, len + sizeof (struct ip));
342
343 *(uint32_t*)&((struct ipovly *)ip)->ih_x1[0] = *(uint32_t*)&b[0];
344 *(uint32_t*)&((struct ipovly *)ip)->ih_x1[4] = *(uint32_t*)&b[4];
345 *(uint8_t*)&((struct ipovly *)ip)->ih_x1[8] = *(uint8_t*)&b[8];
346 }
347 if (uh->uh_sum) {
348 udpstat.udps_badsum++;
349 m_freem(m);
350 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
351 return;
352 }
353 }
354 #ifndef __APPLE__
355 else
356 udpstat.udps_nosum++;
357 #endif
358
359 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
360 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
361 struct inpcb *last;
362 lck_rw_lock_shared(pcbinfo->mtx);
363 /*
364 * Deliver a multicast or broadcast datagram to *all* sockets
365 * for which the local and remote addresses and ports match
366 * those of the incoming datagram. This allows more than
367 * one process to receive multi/broadcasts on the same port.
368 * (This really ought to be done for unicast datagrams as
369 * well, but that would cause problems with existing
370 * applications that open both address-specific sockets and
371 * a wildcard socket listening to the same port -- they would
372 * end up receiving duplicates of every unicast datagram.
373 * Those applications open the multiple sockets to overcome an
374 * inadequacy of the UDP socket interface, but for backwards
375 * compatibility we avoid the problem here rather than
376 * fixing the interface. Maybe 4.5BSD will remedy this?)
377 */
378
379
380 /*
381 * Construct sockaddr format source address.
382 */
383 udp_in.sin_port = uh->uh_sport;
384 udp_in.sin_addr = ip->ip_src;
385 /*
386 * Locate pcb(s) for datagram.
387 * (Algorithm copied from raw_intr().)
388 */
389 last = NULL;
390 #if INET6
391 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
392 #endif
393 LIST_FOREACH(inp, &udb, inp_list) {
394 #ifdef __APPLE__
395 /* Ignore nat/SharedIP dummy pcbs */
396 if (inp->inp_socket == &udbinfo.nat_dummy_socket)
397 continue;
398 #endif
399 if (inp->inp_socket == NULL)
400 continue;
401 if (inp != sotoinpcb(inp->inp_socket))
402 panic("udp_input: bad so back ptr inp=%x\n", inp);
403 #if INET6
404 if ((inp->inp_vflag & INP_IPV4) == 0)
405 continue;
406 #endif
407 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
408 continue;
409 }
410
411 udp_lock(inp->inp_socket, 1, 0);
412
413 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
414 udp_unlock(inp->inp_socket, 1, 0);
415 continue;
416 }
417
418 if (inp->inp_lport != uh->uh_dport) {
419 udp_unlock(inp->inp_socket, 1, 0);
420 continue;
421 }
422 if (inp->inp_laddr.s_addr != INADDR_ANY) {
423 if (inp->inp_laddr.s_addr !=
424 ip->ip_dst.s_addr) {
425 udp_unlock(inp->inp_socket, 1, 0);
426 continue;
427 }
428 }
429 if (inp->inp_faddr.s_addr != INADDR_ANY) {
430 if (inp->inp_faddr.s_addr !=
431 ip->ip_src.s_addr ||
432 inp->inp_fport != uh->uh_sport) {
433 udp_unlock(inp->inp_socket, 1, 0);
434 continue;
435 }
436 }
437
438 if (last != NULL) {
439 struct mbuf *n;
440 #if IPSEC
441 int skipit = 0;
442 /* check AH/ESP integrity. */
443 if (ipsec_bypass == 0) {
444 lck_mtx_lock(sadb_mutex);
445 if (ipsec4_in_reject_so(m, last->inp_socket)) {
446 ipsecstat.in_polvio++;
447 /* do not inject data to pcb */
448 skipit = 1;
449 }
450 lck_mtx_unlock(sadb_mutex);
451 }
452 if (skipit == 0)
453 #endif /*IPSEC*/
454 if ((n = m_copy(m, 0, M_COPYALL)) != NULL) {
455 udp_append(last, ip, n,
456 iphlen +
457 sizeof(struct udphdr));
458 }
459 udp_unlock(last->inp_socket, 1, 0);
460 }
461 last = inp;
462 /*
463 * Don't look for additional matches if this one does
464 * not have either the SO_REUSEPORT or SO_REUSEADDR
465 * socket options set. This heuristic avoids searching
466 * through all pcbs in the common case of a non-shared
467 * port. It * assumes that an application will never
468 * clear these options after setting them.
469 */
470 if ((last->inp_socket->so_options&(SO_REUSEPORT|SO_REUSEADDR)) == 0)
471 break;
472 }
473 lck_rw_done(pcbinfo->mtx);
474
475 if (last == NULL) {
476 /*
477 * No matching pcb found; discard datagram.
478 * (No need to send an ICMP Port Unreachable
479 * for a broadcast or multicast datgram.)
480 */
481 udpstat.udps_noportbcast++;
482 goto bad;
483 }
484 #if IPSEC
485 /* check AH/ESP integrity. */
486 if (ipsec_bypass == 0 && m) {
487 lck_mtx_lock(sadb_mutex);
488 if (ipsec4_in_reject_so(m, last->inp_socket)) {
489 ipsecstat.in_polvio++;
490 lck_mtx_unlock(sadb_mutex);
491 udp_unlock(last->inp_socket, 1, 0);
492 goto bad;
493 }
494 lck_mtx_unlock(sadb_mutex);
495 }
496 #endif /*IPSEC*/
497 udp_append(last, ip, m, iphlen + sizeof(struct udphdr));
498 udp_unlock(last->inp_socket, 1, 0);
499 return;
500 }
501
502 #if IPSEC
503 /*
504 * UDP to port 4500 with a payload where the first four bytes are
505 * not zero is a UDP encapsulated IPSec packet. Packets where
506 * the payload is one byte and that byte is 0xFF are NAT keepalive
507 * packets. Decapsulate the ESP packet and carry on with IPSec input
508 * or discard the NAT keep-alive.
509 */
510 if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 &&
511 uh->uh_dport == ntohs((u_short)esp_udp_encap_port)) {
512 int payload_len = len - sizeof(struct udphdr) > 4 ? 4 : len - sizeof(struct udphdr);
513 if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) {
514 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) + payload_len)) == 0) {
515 udpstat.udps_hdrops++;
516 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
517 return;
518 }
519 ip = mtod(m, struct ip *);
520 uh = (struct udphdr *)((caddr_t)ip + iphlen);
521 }
522 /* Check for NAT keepalive packet */
523 if (payload_len == 1 && *(u_int8_t*)((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
524 m_freem(m);
525 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
526 return;
527 }
528 else if (payload_len == 4 && *(u_int32_t*)((caddr_t)uh + sizeof(struct udphdr)) != 0) {
529 /* UDP encapsulated IPSec packet to pass through NAT */
530 size_t stripsiz;
531
532 stripsiz = sizeof(struct udphdr);
533
534 ip = mtod(m, struct ip *);
535 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
536 m->m_data += stripsiz;
537 m->m_len -= stripsiz;
538 m->m_pkthdr.len -= stripsiz;
539 ip = mtod(m, struct ip *);
540 ip->ip_len = ip->ip_len - stripsiz;
541 ip->ip_p = IPPROTO_ESP;
542
543 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
544 esp4_input(m, iphlen);
545 return;
546 }
547 }
548 #endif
549
550 /*
551 * Locate pcb for datagram.
552 */
553 inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
554 ip->ip_dst, uh->uh_dport, 1, m->m_pkthdr.rcvif);
555 if (inp == NULL) {
556 if (log_in_vain) {
557 char buf[MAX_IPv4_STR_LEN];
558 char buf2[MAX_IPv4_STR_LEN];
559
560 /* check src and dst address */
561 if (log_in_vain != 3)
562 log(LOG_INFO,
563 "Connection attempt to UDP %s:%d from %s:%d\n",
564 inet_ntop(AF_INET, &ip->ip_dst, buf, sizeof(buf)),
565 ntohs(uh->uh_dport),
566 inet_ntop(AF_INET, &ip->ip_src, buf2, sizeof(buf2)),
567 ntohs(uh->uh_sport));
568 else if (!(m->m_flags & (M_BCAST | M_MCAST)) &&
569 ip->ip_dst.s_addr != ip->ip_src.s_addr)
570 log_in_vain_log((LOG_INFO,
571 "Stealth Mode connection attempt to UDP %s:%d from %s:%d\n",
572 inet_ntop(AF_INET, &ip->ip_dst, buf, sizeof(buf)),
573 ntohs(uh->uh_dport),
574 inet_ntop(AF_INET, &ip->ip_src, buf2, sizeof(buf2)),
575 ntohs(uh->uh_sport)))
576 }
577 udpstat.udps_noport++;
578 if (m->m_flags & (M_BCAST | M_MCAST)) {
579 udpstat.udps_noportbcast++;
580 goto bad;
581 }
582 #if ICMP_BANDLIM
583 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
584 goto bad;
585 #endif
586 if (blackhole)
587 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP)
588 goto bad;
589 *ip = save_ip;
590 ip->ip_len += iphlen;
591 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
592 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
593 return;
594 }
595 udp_lock(inp->inp_socket, 1, 0);
596
597 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
598 udp_unlock(inp->inp_socket, 1, 0);
599 goto bad;
600 }
601 #if IPSEC
602 if (ipsec_bypass == 0 && inp != NULL) {
603 lck_mtx_lock(sadb_mutex);
604 if (ipsec4_in_reject_so(m, inp->inp_socket)) {
605 ipsecstat.in_polvio++;
606 lck_mtx_unlock(sadb_mutex);
607 udp_unlock(inp->inp_socket, 1, 0);
608 goto bad;
609 }
610 lck_mtx_unlock(sadb_mutex);
611 }
612 #endif /*IPSEC*/
613
614 /*
615 * Construct sockaddr format source address.
616 * Stuff source address and datagram in user buffer.
617 */
618 udp_in.sin_port = uh->uh_sport;
619 udp_in.sin_addr = ip->ip_src;
620 if (inp->inp_flags & INP_CONTROLOPTS
621 || inp->inp_socket->so_options & SO_TIMESTAMP) {
622 #if INET6
623 if (inp->inp_vflag & INP_IPV6) {
624 int savedflags;
625
626 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
627 savedflags = inp->inp_flags;
628 inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
629 ip6_savecontrol(inp, &opts, &udp_ip6.uip6_ip6, m);
630 inp->inp_flags = savedflags;
631 } else
632 #endif
633 ip_savecontrol(inp, &opts, ip, m);
634 }
635 m_adj(m, iphlen + sizeof(struct udphdr));
636
637 KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport,
638 save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen);
639
640 #if INET6
641 if (inp->inp_vflag & INP_IPV6) {
642 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
643 append_sa = (struct sockaddr *)&udp_in6;
644 } else
645 #endif
646 append_sa = (struct sockaddr *)&udp_in;
647 if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa, m, opts, NULL) == 0) {
648 udpstat.udps_fullsock++;
649 }
650 else {
651 sorwakeup(inp->inp_socket);
652 }
653 udp_unlock(inp->inp_socket, 1, 0);
654 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
655 return;
656 bad:
657 m_freem(m);
658 if (opts)
659 m_freem(opts);
660 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
661 return;
662 }
663
664 #if INET6
665 static void
666 ip_2_ip6_hdr(ip6, ip)
667 struct ip6_hdr *ip6;
668 struct ip *ip;
669 {
670 bzero(ip6, sizeof(*ip6));
671
672 ip6->ip6_vfc = IPV6_VERSION;
673 ip6->ip6_plen = ip->ip_len;
674 ip6->ip6_nxt = ip->ip_p;
675 ip6->ip6_hlim = ip->ip_ttl;
676 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
677 IPV6_ADDR_INT32_SMP;
678 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
679 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
680 }
681 #endif
682
683 /*
684 * subroutine of udp_input(), mainly for source code readability.
685 * caller must properly init udp_ip6 and udp_in6 beforehand.
686 */
687 static void
688 udp_append(last, ip, n, off)
689 struct inpcb *last;
690 struct ip *ip;
691 struct mbuf *n;
692 int off;
693 {
694 struct sockaddr *append_sa;
695 struct mbuf *opts = 0;
696
697 if (last->inp_flags & INP_CONTROLOPTS ||
698 last->inp_socket->so_options & SO_TIMESTAMP) {
699 #if INET6
700 if (last->inp_vflag & INP_IPV6) {
701 int savedflags;
702
703 if (udp_ip6.uip6_init_done == 0) {
704 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
705 udp_ip6.uip6_init_done = 1;
706 }
707 savedflags = last->inp_flags;
708 last->inp_flags &= ~INP_UNMAPPABLEOPTS;
709 ip6_savecontrol(last, &opts, &udp_ip6.uip6_ip6, n);
710 last->inp_flags = savedflags;
711 } else
712 #endif
713 ip_savecontrol(last, &opts, ip, n);
714 }
715 #if INET6
716 if (last->inp_vflag & INP_IPV6) {
717 if (udp_in6.uin6_init_done == 0) {
718 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
719 udp_in6.uin6_init_done = 1;
720 }
721 append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
722 } else
723 #endif
724 append_sa = (struct sockaddr *)&udp_in;
725 m_adj(n, off);
726 if (sbappendaddr(&last->inp_socket->so_rcv, append_sa, n, opts, NULL) == 0) {
727 udpstat.udps_fullsock++;
728 } else
729 sorwakeup(last->inp_socket);
730 }
731
732 /*
733 * Notify a udp user of an asynchronous error;
734 * just wake up so that he can collect error status.
735 */
736 void
737 udp_notify(inp, errno)
738 register struct inpcb *inp;
739 int errno;
740 {
741 inp->inp_socket->so_error = errno;
742 sorwakeup(inp->inp_socket);
743 sowwakeup(inp->inp_socket);
744 }
745
746 void
747 udp_ctlinput(cmd, sa, vip)
748 int cmd;
749 struct sockaddr *sa;
750 void *vip;
751 {
752 struct ip *ip = vip;
753 struct udphdr *uh;
754 void (*notify)(struct inpcb *, int) = udp_notify;
755 struct in_addr faddr;
756 struct inpcb *inp;
757
758 faddr = ((struct sockaddr_in *)sa)->sin_addr;
759 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
760 return;
761
762 if (PRC_IS_REDIRECT(cmd)) {
763 ip = 0;
764 notify = in_rtchange;
765 } else if (cmd == PRC_HOSTDEAD)
766 ip = 0;
767 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
768 return;
769 if (ip) {
770 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
771 inp = in_pcblookup_hash(&udbinfo, faddr, uh->uh_dport,
772 ip->ip_src, uh->uh_sport, 0, NULL);
773 if (inp != NULL && inp->inp_socket != NULL) {
774 udp_lock(inp->inp_socket, 1, 0);
775 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
776 udp_unlock(inp->inp_socket, 1, 0);
777 return;
778 }
779 (*notify)(inp, inetctlerrmap[cmd]);
780 udp_unlock(inp->inp_socket, 1, 0);
781 }
782 } else
783 in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
784 }
785
786 static int
787 udp_pcblist SYSCTL_HANDLER_ARGS
788 {
789 int error, i, n;
790 struct inpcb *inp, **inp_list;
791 inp_gen_t gencnt;
792 struct xinpgen xig;
793
794 /*
795 * The process of preparing the TCB list is too time-consuming and
796 * resource-intensive to repeat twice on every request.
797 */
798 lck_rw_lock_exclusive(udbinfo.mtx);
799 if (req->oldptr == USER_ADDR_NULL) {
800 n = udbinfo.ipi_count;
801 req->oldidx = 2 * (sizeof xig)
802 + (n + n/8) * sizeof(struct xinpcb);
803 lck_rw_done(udbinfo.mtx);
804 return 0;
805 }
806
807 if (req->newptr != USER_ADDR_NULL) {
808 lck_rw_done(udbinfo.mtx);
809 return EPERM;
810 }
811
812 /*
813 * OK, now we're committed to doing something.
814 */
815 gencnt = udbinfo.ipi_gencnt;
816 n = udbinfo.ipi_count;
817
818 bzero(&xig, sizeof(xig));
819 xig.xig_len = sizeof xig;
820 xig.xig_count = n;
821 xig.xig_gen = gencnt;
822 xig.xig_sogen = so_gencnt;
823 error = SYSCTL_OUT(req, &xig, sizeof xig);
824 if (error) {
825 lck_rw_done(udbinfo.mtx);
826 return error;
827 }
828 /*
829 * We are done if there is no pcb
830 */
831 if (n == 0) {
832 lck_rw_done(udbinfo.mtx);
833 return 0;
834 }
835
836 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
837 if (inp_list == 0) {
838 lck_rw_done(udbinfo.mtx);
839 return ENOMEM;
840 }
841
842 for (inp = LIST_FIRST(udbinfo.listhead), i = 0; inp && i < n;
843 inp = LIST_NEXT(inp, inp_list)) {
844 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
845 inp_list[i++] = inp;
846 }
847 n = i;
848
849 error = 0;
850 for (i = 0; i < n; i++) {
851 inp = inp_list[i];
852 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
853 struct xinpcb xi;
854
855 bzero(&xi, sizeof(xi));
856 xi.xi_len = sizeof xi;
857 /* XXX should avoid extra copy */
858 inpcb_to_compat(inp, &xi.xi_inp);
859 if (inp->inp_socket)
860 sotoxsocket(inp->inp_socket, &xi.xi_socket);
861 error = SYSCTL_OUT(req, &xi, sizeof xi);
862 }
863 }
864 if (!error) {
865 /*
866 * Give the user an updated idea of our state.
867 * If the generation differs from what we told
868 * her before, she knows that something happened
869 * while we were processing this request, and it
870 * might be necessary to retry.
871 */
872 bzero(&xig, sizeof(xig));
873 xig.xig_len = sizeof xig;
874 xig.xig_gen = udbinfo.ipi_gencnt;
875 xig.xig_sogen = so_gencnt;
876 xig.xig_count = udbinfo.ipi_count;
877 error = SYSCTL_OUT(req, &xig, sizeof xig);
878 }
879 FREE(inp_list, M_TEMP);
880 lck_rw_done(udbinfo.mtx);
881 return error;
882 }
883
884 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
885 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
886
887
888
889 static __inline__ u_int16_t
890 get_socket_id(struct socket * s)
891 {
892 u_int16_t val;
893
894 if (s == NULL) {
895 return (0);
896 }
897 val = (u_int16_t)(((u_int32_t)s) / sizeof(struct socket));
898 if (val == 0) {
899 val = 0xffff;
900 }
901 return (val);
902 }
903
904 static int
905 udp_output(inp, m, addr, control, p)
906 register struct inpcb *inp;
907 struct mbuf *m;
908 struct sockaddr *addr;
909 struct mbuf *control;
910 struct proc *p;
911 {
912 register struct udpiphdr *ui;
913 register int len = m->m_pkthdr.len;
914 struct sockaddr_in *sin, src;
915 struct in_addr origladdr, laddr, faddr;
916 u_short lport, fport;
917 struct sockaddr_in *ifaddr;
918 int error = 0, udp_dodisconnect = 0;
919
920
921 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0);
922
923 if (control)
924 m_freem(control); /* XXX */
925
926 KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
927 inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
928 (htons((u_short)len + sizeof (struct udphdr))));
929
930 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
931 error = EMSGSIZE;
932 goto release;
933 }
934
935 /* If there was a routing change, discard cached route and check
936 * that we have a valid source address.
937 * Reacquire a new source address if INADDR_ANY was specified
938 */
939
940 #if 1
941 lck_mtx_assert(inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
942 #endif
943
944 if (inp->inp_route.ro_rt && inp->inp_route.ro_rt->generation_id != route_generation) {
945 if (ifa_foraddr(inp->inp_laddr.s_addr) == 0) { /* src address is gone */
946 if (inp->inp_flags & INP_INADDR_ANY)
947 inp->inp_faddr.s_addr = INADDR_ANY; /* new src will be set later */
948 else {
949 error = EADDRNOTAVAIL;
950 goto release;
951 }
952 }
953 rtfree(inp->inp_route.ro_rt);
954 inp->inp_route.ro_rt = (struct rtentry *)0;
955 }
956
957 origladdr= laddr = inp->inp_laddr;
958 faddr = inp->inp_faddr;
959 lport = inp->inp_lport;
960 fport = inp->inp_fport;
961
962 if (addr) {
963 sin = (struct sockaddr_in *)addr;
964 if (faddr.s_addr != INADDR_ANY) {
965 error = EISCONN;
966 goto release;
967 }
968 if (lport == 0) {
969 /*
970 * In case we don't have a local port set, go through the full connect.
971 * We don't have a local port yet (ie, we can't be looked up),
972 * so it's not an issue if the input runs at the same time we do this.
973 */
974 error = in_pcbconnect(inp, addr, p);
975 if (error) {
976 goto release;
977 }
978 laddr = inp->inp_laddr;
979 lport = inp->inp_lport;
980 faddr = inp->inp_faddr;
981 fport = inp->inp_fport;
982 udp_dodisconnect = 1;
983 }
984 else {
985 /* Fast path case
986 * we have a full address and a local port.
987 * use those info to build the packet without changing the pcb
988 * and interfering with the input path. See 3851370
989 */
990 if (laddr.s_addr == INADDR_ANY) {
991 if ((error = in_pcbladdr(inp, addr, &ifaddr)) != 0)
992 goto release;
993 laddr = ifaddr->sin_addr;
994 inp->inp_flags |= INP_INADDR_ANY; /* from pcbconnect: remember we don't care about src addr.*/
995 }
996
997 faddr = sin->sin_addr;
998 fport = sin->sin_port;
999 }
1000 } else {
1001 if (faddr.s_addr == INADDR_ANY) {
1002 error = ENOTCONN;
1003 goto release;
1004 }
1005 }
1006
1007
1008 /*
1009 * Calculate data length and get a mbuf
1010 * for UDP and IP headers.
1011 */
1012 M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT);
1013 if (m == 0) {
1014 error = ENOBUFS;
1015 goto abort;
1016 }
1017
1018 /*
1019 * Fill in mbuf with extended UDP header
1020 * and addresses and length put into network format.
1021 */
1022 ui = mtod(m, struct udpiphdr *);
1023 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1024 ui->ui_pr = IPPROTO_UDP;
1025 ui->ui_src = laddr;
1026 ui->ui_dst = faddr;
1027 ui->ui_sport = lport;
1028 ui->ui_dport = fport;
1029 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1030
1031 /*
1032 * Set up checksum and output datagram.
1033 */
1034 if (udpcksum) {
1035 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
1036 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1037 m->m_pkthdr.csum_flags = CSUM_UDP;
1038 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1039 } else {
1040 ui->ui_sum = 0;
1041 }
1042 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1043 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1044 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1045 udpstat.udps_opackets++;
1046
1047 KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport,
1048 ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen);
1049
1050 #if IPSEC
1051 if (ipsec_bypass == 0 && ipsec_setsocket(m, inp->inp_socket) != 0) {
1052 error = ENOBUFS;
1053 goto abort;
1054 }
1055 #endif /*IPSEC*/
1056 m->m_pkthdr.socket_id = get_socket_id(inp->inp_socket);
1057 error = ip_output_list(m, 0, inp->inp_options, &inp->inp_route,
1058 (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)),
1059 inp->inp_moptions);
1060
1061 if (udp_dodisconnect) {
1062 in_pcbdisconnect(inp);
1063 inp->inp_laddr = origladdr; /* XXX rehash? */
1064 }
1065 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
1066 return (error);
1067
1068 abort:
1069 if (udp_dodisconnect) {
1070 in_pcbdisconnect(inp);
1071 inp->inp_laddr = origladdr; /* XXX rehash? */
1072 }
1073
1074 release:
1075 m_freem(m);
1076 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
1077 return (error);
1078 }
1079
1080 u_long udp_sendspace = 9216; /* really max datagram size */
1081 /* 40 1K datagrams */
1082 SYSCTL_INT(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
1083 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
1084
1085 u_long udp_recvspace = 40 * (1024 +
1086 #if INET6
1087 sizeof(struct sockaddr_in6)
1088 #else
1089 sizeof(struct sockaddr_in)
1090 #endif
1091 );
1092 SYSCTL_INT(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
1093 &udp_recvspace, 0, "Maximum incoming UDP datagram size");
1094
1095 static int
1096 udp_abort(struct socket *so)
1097 {
1098 struct inpcb *inp;
1099
1100 inp = sotoinpcb(so);
1101 if (inp == 0)
1102 panic("udp_abort: so=%x null inp\n", so); /* ??? possible? panic instead? */
1103 soisdisconnected(so);
1104 in_pcbdetach(inp);
1105 return 0;
1106 }
1107
1108 static int
1109 udp_attach(struct socket *so, int proto, struct proc *p)
1110 {
1111 struct inpcb *inp;
1112 int error;
1113
1114 inp = sotoinpcb(so);
1115 if (inp != 0)
1116 panic ("udp_attach so=%x inp=%x\n", so, inp);
1117
1118 error = in_pcballoc(so, &udbinfo, p);
1119 if (error)
1120 return error;
1121 error = soreserve(so, udp_sendspace, udp_recvspace);
1122 if (error)
1123 return error;
1124 inp = (struct inpcb *)so->so_pcb;
1125 inp->inp_vflag |= INP_IPV4;
1126 inp->inp_ip_ttl = ip_defttl;
1127 return 0;
1128 }
1129
1130 static int
1131 udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
1132 {
1133 struct inpcb *inp;
1134 int error;
1135
1136 inp = sotoinpcb(so);
1137 if (inp == 0)
1138 return EINVAL;
1139 error = in_pcbbind(inp, nam, p);
1140 return error;
1141 }
1142
1143 static int
1144 udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
1145 {
1146 struct inpcb *inp;
1147 int error;
1148
1149 inp = sotoinpcb(so);
1150 if (inp == 0)
1151 return EINVAL;
1152 if (inp->inp_faddr.s_addr != INADDR_ANY)
1153 return EISCONN;
1154 error = in_pcbconnect(inp, nam, p);
1155 if (error == 0)
1156 soisconnected(so);
1157 return error;
1158 }
1159
1160 static int
1161 udp_detach(struct socket *so)
1162 {
1163 struct inpcb *inp;
1164
1165 inp = sotoinpcb(so);
1166 if (inp == 0)
1167 panic("udp_detach: so=%x null inp\n", so); /* ??? possible? panic instead? */
1168 in_pcbdetach(inp);
1169 inp->inp_state = INPCB_STATE_DEAD;
1170 return 0;
1171 }
1172
1173 static int
1174 udp_disconnect(struct socket *so)
1175 {
1176 struct inpcb *inp;
1177
1178 inp = sotoinpcb(so);
1179 if (inp == 0)
1180 return EINVAL;
1181 if (inp->inp_faddr.s_addr == INADDR_ANY)
1182 return ENOTCONN;
1183
1184 in_pcbdisconnect(inp);
1185 inp->inp_laddr.s_addr = INADDR_ANY;
1186 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1187 return 0;
1188 }
1189
1190 static int
1191 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1192 struct mbuf *control, struct proc *p)
1193 {
1194 struct inpcb *inp;
1195
1196 inp = sotoinpcb(so);
1197 if (inp == 0) {
1198 m_freem(m);
1199 return EINVAL;
1200 }
1201 return udp_output(inp, m, addr, control, p);
1202 }
1203
1204 int
1205 udp_shutdown(struct socket *so)
1206 {
1207 struct inpcb *inp;
1208
1209 inp = sotoinpcb(so);
1210 if (inp == 0)
1211 return EINVAL;
1212 socantsendmore(so);
1213 return 0;
1214 }
1215
1216 struct pr_usrreqs udp_usrreqs = {
1217 udp_abort, pru_accept_notsupp, udp_attach, udp_bind, udp_connect,
1218 pru_connect2_notsupp, in_control, udp_detach, udp_disconnect,
1219 pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp,
1220 pru_rcvoob_notsupp, udp_send, pru_sense_null, udp_shutdown,
1221 in_setsockaddr, sosend, soreceive, pru_sopoll_notsupp
1222 };
1223
1224
1225 int
1226 udp_lock(so, refcount, debug)
1227 struct socket *so;
1228 int refcount, debug;
1229 {
1230 int lr_saved;
1231 #ifdef __ppc__
1232 if (debug == 0) {
1233 __asm__ volatile("mflr %0" : "=r" (lr_saved));
1234 }
1235 else lr_saved = debug;
1236 #endif
1237
1238 if (so->so_pcb) {
1239 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_NOTOWNED);
1240 lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1241 }
1242 else {
1243 panic("udp_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
1244 lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_NOTOWNED);
1245 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
1246 }
1247
1248 if (refcount)
1249 so->so_usecount++;
1250
1251 so->reserved3= lr_saved;
1252 return (0);
1253 }
1254
1255 int
1256 udp_unlock(so, refcount, debug)
1257 struct socket *so;
1258 int refcount;
1259 int debug;
1260 {
1261 int lr_saved;
1262 struct inpcb *inp = sotoinpcb(so);
1263 struct inpcbinfo *pcbinfo = &udbinfo;
1264 #ifdef __ppc__
1265 if (debug == 0) {
1266 __asm__ volatile("mflr %0" : "=r" (lr_saved));
1267 }
1268 else lr_saved = debug;
1269 #endif
1270 if (refcount) {
1271 so->so_usecount--;
1272 #if 0
1273 if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
1274 if (lck_rw_try_lock_exclusive(pcbinfo->mtx)) {
1275 in_pcbdispose(inp);
1276 lck_rw_done(pcbinfo->mtx);
1277 return(0);
1278 }
1279 }
1280 #endif
1281 }
1282 if (so->so_pcb == NULL) {
1283 panic("udp_unlock: so=%x NO PCB! lr=%x\n", so, lr_saved);
1284 lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_OWNED);
1285 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1286 }
1287 else {
1288 lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1289 lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
1290 }
1291
1292
1293 so->reserved4 = lr_saved;
1294 return (0);
1295 }
1296
1297 lck_mtx_t *
1298 udp_getlock(so, locktype)
1299 struct socket *so;
1300 int locktype;
1301 {
1302 struct inpcb *inp = sotoinpcb(so);
1303
1304
1305 if (so->so_pcb)
1306 return(inp->inpcb_mtx);
1307 else {
1308 panic("udp_getlock: so=%x NULL so_pcb\n", so);
1309 return (so->so_proto->pr_domain->dom_mtx);
1310 }
1311 }
1312
1313 void
1314 udp_slowtimo()
1315 {
1316 struct inpcb *inp, *inpnxt;
1317 struct socket *so;
1318 struct inpcbinfo *pcbinfo = &udbinfo;
1319
1320 lck_rw_lock_exclusive(pcbinfo->mtx);
1321
1322 for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
1323 inpnxt = inp->inp_list.le_next;
1324
1325 /* Ignore nat/SharedIP dummy pcbs */
1326 if (inp->inp_socket == &udbinfo.nat_dummy_socket)
1327 continue;
1328
1329 if (inp->inp_wantcnt != WNT_STOPUSING)
1330 continue;
1331
1332 so = inp->inp_socket;
1333 if (!lck_mtx_try_lock(inp->inpcb_mtx)) /* skip if busy, no hurry for cleanup... */
1334 continue;
1335
1336 if (so->so_usecount == 0)
1337 in_pcbdispose(inp);
1338 else
1339 lck_mtx_unlock(inp->inpcb_mtx);
1340 }
1341 lck_rw_done(pcbinfo->mtx);
1342 }
1343
1344 int
1345 ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr )
1346 {
1347 if ( dstaddr == srcaddr ){
1348 return 0;
1349 }
1350 return 1;
1351 }
1352