]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/udp_usrreq.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / bsd / netinet / udp_usrreq.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/domain.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/sysctl.h>
73 #include <sys/syslog.h>
74 #include <sys/mcache.h>
75 #include <net/ntstat.h>
76
77 #include <kern/zalloc.h>
78 #include <mach/boolean.h>
79
80 #include <net/if.h>
81 #include <net/if_types.h>
82 #include <net/route.h>
83 #include <net/dlil.h>
84 #include <net/net_api_stats.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/in_tclass.h>
89 #include <netinet/ip.h>
90 #if INET6
91 #include <netinet/ip6.h>
92 #endif /* INET6 */
93 #include <netinet/in_pcb.h>
94 #include <netinet/in_var.h>
95 #include <netinet/ip_var.h>
96 #if INET6
97 #include <netinet6/in6_pcb.h>
98 #include <netinet6/ip6_var.h>
99 #include <netinet6/udp6_var.h>
100 #endif /* INET6 */
101 #include <netinet/ip_icmp.h>
102 #include <netinet/icmp_var.h>
103 #include <netinet/udp.h>
104 #include <netinet/udp_var.h>
105 #include <sys/kdebug.h>
106
107 #if IPSEC
108 #include <netinet6/ipsec.h>
109 #include <netinet6/esp.h>
110 extern int ipsec_bypass;
111 extern int esp_udp_encap_port;
112 #endif /* IPSEC */
113
114 #if NECP
115 #include <net/necp.h>
116 #endif /* NECP */
117
118 #if FLOW_DIVERT
119 #include <netinet/flow_divert.h>
120 #endif /* FLOW_DIVERT */
121
122 #if CONTENT_FILTER
123 #include <net/content_filter.h>
124 #endif /* CONTENT_FILTER */
125
126 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0)
127 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2)
128 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1)
129 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3)
130 #define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8))
131 #define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1)
132
133 /*
134 * UDP protocol implementation.
135 * Per RFC 768, August, 1980.
136 */
137 #ifndef COMPAT_42
138 static int udpcksum = 1;
139 #else
140 static int udpcksum = 0; /* XXX */
141 #endif
142 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum,
143 CTLFLAG_RW | CTLFLAG_LOCKED, &udpcksum, 0, "");
144
145 int udp_log_in_vain = 0;
146 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED,
147 &udp_log_in_vain, 0, "Log all incoming UDP packets");
148
149 static int blackhole = 0;
150 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED,
151 &blackhole, 0, "Do not send port unreachables for refused connects");
152
153 struct inpcbhead udb; /* from udp_var.h */
154 #define udb6 udb /* for KAME src sync over BSD*'s */
155 struct inpcbinfo udbinfo;
156
157 #ifndef UDBHASHSIZE
158 #define UDBHASHSIZE 16
159 #endif
160
161 /* Garbage collection performed during most recent udp_gc() run */
162 static boolean_t udp_gc_done = FALSE;
163
164 #if IPFIREWALL
165 extern int fw_verbose;
166 extern void ipfwsyslog(int level, const char *format, ...);
167 extern void ipfw_stealth_stats_incr_udp(void);
168
169 /* Apple logging, log to ipfw.log */
170 #define log_in_vain_log(a) { \
171 if ((udp_log_in_vain == 3) && (fw_verbose == 2)) { \
172 ipfwsyslog a; \
173 } else if ((udp_log_in_vain == 4) && (fw_verbose == 2)) { \
174 ipfw_stealth_stats_incr_udp(); \
175 } else { \
176 log a; \
177 } \
178 }
179 #else /* !IPFIREWALL */
180 #define log_in_vain_log(a) { log a; }
181 #endif /* !IPFIREWALL */
182
183 static int udp_getstat SYSCTL_HANDLER_ARGS;
184 struct udpstat udpstat; /* from udp_var.h */
185 SYSCTL_PROC(_net_inet_udp, UDPCTL_STATS, stats,
186 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
187 0, 0, udp_getstat, "S,udpstat",
188 "UDP statistics (struct udpstat, netinet/udp_var.h)");
189
190 SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount,
191 CTLFLAG_RD | CTLFLAG_LOCKED, &udbinfo.ipi_count, 0,
192 "Number of active PCBs");
193
194 __private_extern__ int udp_use_randomport = 1;
195 SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports,
196 CTLFLAG_RW | CTLFLAG_LOCKED, &udp_use_randomport, 0,
197 "Randomize UDP port numbers");
198
199 #if INET6
200 struct udp_in6 {
201 struct sockaddr_in6 uin6_sin;
202 u_char uin6_init_done : 1;
203 };
204 struct udp_ip6 {
205 struct ip6_hdr uip6_ip6;
206 u_char uip6_init_done : 1;
207 };
208
209 int udp_abort(struct socket *);
210 int udp_attach(struct socket *, int, struct proc *);
211 int udp_bind(struct socket *, struct sockaddr *, struct proc *);
212 int udp_connect(struct socket *, struct sockaddr *, struct proc *);
213 int udp_connectx(struct socket *, struct sockaddr *,
214 struct sockaddr *, struct proc *, uint32_t, sae_associd_t,
215 sae_connid_t *, uint32_t, void *, uint32_t, struct uio *, user_ssize_t *);
216 int udp_detach(struct socket *);
217 int udp_disconnect(struct socket *);
218 int udp_disconnectx(struct socket *, sae_associd_t, sae_connid_t);
219 int udp_send(struct socket *, int, struct mbuf *, struct sockaddr *,
220 struct mbuf *, struct proc *);
221 static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int,
222 struct sockaddr_in *, struct udp_in6 *, struct udp_ip6 *, struct ifnet *);
223 #else /* !INET6 */
224 static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int,
225 struct sockaddr_in *, struct ifnet *);
226 #endif /* !INET6 */
227 static int udp_input_checksum(struct mbuf *, struct udphdr *, int, int);
228 int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
229 struct mbuf *, struct proc *);
230 static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
231 static void udp_gc(struct inpcbinfo *);
232
233 struct pr_usrreqs udp_usrreqs = {
234 .pru_abort = udp_abort,
235 .pru_attach = udp_attach,
236 .pru_bind = udp_bind,
237 .pru_connect = udp_connect,
238 .pru_connectx = udp_connectx,
239 .pru_control = in_control,
240 .pru_detach = udp_detach,
241 .pru_disconnect = udp_disconnect,
242 .pru_disconnectx = udp_disconnectx,
243 .pru_peeraddr = in_getpeeraddr,
244 .pru_send = udp_send,
245 .pru_shutdown = udp_shutdown,
246 .pru_sockaddr = in_getsockaddr,
247 .pru_sosend = sosend,
248 .pru_soreceive = soreceive,
249 .pru_soreceive_list = soreceive_list,
250 };
251
252 void
253 udp_init(struct protosw *pp, struct domain *dp)
254 {
255 #pragma unused(dp)
256 static int udp_initialized = 0;
257 vm_size_t str_size;
258 struct inpcbinfo *pcbinfo;
259
260 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
261
262 if (udp_initialized) {
263 return;
264 }
265 udp_initialized = 1;
266 uint32_t pool_size = (nmbclusters << MCLSHIFT) >> MBSHIFT;
267 if (pool_size >= 96) {
268 /* Improves 10GbE UDP performance. */
269 udp_recvspace = 786896;
270 }
271 LIST_INIT(&udb);
272 udbinfo.ipi_listhead = &udb;
273 udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB,
274 &udbinfo.ipi_hashmask);
275 udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB,
276 &udbinfo.ipi_porthashmask);
277 str_size = (vm_size_t) sizeof(struct inpcb);
278 udbinfo.ipi_zone = zinit(str_size, 80000 * str_size, 8192, "udpcb");
279
280 pcbinfo = &udbinfo;
281 /*
282 * allocate lock group attribute and group for udp pcb mutexes
283 */
284 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
285 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("udppcb",
286 pcbinfo->ipi_lock_grp_attr);
287 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
288 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
289 pcbinfo->ipi_lock_attr)) == NULL) {
290 panic("%s: unable to allocate PCB lock\n", __func__);
291 /* NOTREACHED */
292 }
293
294 udbinfo.ipi_gc = udp_gc;
295 in_pcbinfo_attach(&udbinfo);
296 }
297
298 void
299 udp_input(struct mbuf *m, int iphlen)
300 {
301 struct ip *ip;
302 struct udphdr *uh;
303 struct inpcb *inp;
304 struct mbuf *opts = NULL;
305 int len, isbroadcast;
306 struct ip save_ip;
307 struct sockaddr *append_sa;
308 struct inpcbinfo *pcbinfo = &udbinfo;
309 struct sockaddr_in udp_in;
310 struct ip_moptions *imo = NULL;
311 int foundmembership = 0, ret = 0;
312 #if INET6
313 struct udp_in6 udp_in6;
314 struct udp_ip6 udp_ip6;
315 #endif /* INET6 */
316 struct ifnet *ifp = m->m_pkthdr.rcvif;
317 boolean_t cell = IFNET_IS_CELLULAR(ifp);
318 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
319 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
320
321 bzero(&udp_in, sizeof(udp_in));
322 udp_in.sin_len = sizeof(struct sockaddr_in);
323 udp_in.sin_family = AF_INET;
324 #if INET6
325 bzero(&udp_in6, sizeof(udp_in6));
326 udp_in6.uin6_sin.sin6_len = sizeof(struct sockaddr_in6);
327 udp_in6.uin6_sin.sin6_family = AF_INET6;
328 #endif /* INET6 */
329
330 udpstat.udps_ipackets++;
331
332 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
333
334 /* Expect 32-bit aligned data pointer on strict-align platforms */
335 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
336
337 /*
338 * Strip IP options, if any; should skip this,
339 * make available to user, and use on returned packets,
340 * but we don't yet have a way to check the checksum
341 * with options still present.
342 */
343 if (iphlen > sizeof(struct ip)) {
344 ip_stripoptions(m);
345 iphlen = sizeof(struct ip);
346 }
347
348 /*
349 * Get IP and UDP header together in first mbuf.
350 */
351 ip = mtod(m, struct ip *);
352 if (m->m_len < iphlen + sizeof(struct udphdr)) {
353 m = m_pullup(m, iphlen + sizeof(struct udphdr));
354 if (m == NULL) {
355 udpstat.udps_hdrops++;
356 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
357 0, 0, 0, 0, 0);
358 return;
359 }
360 ip = mtod(m, struct ip *);
361 }
362 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
363
364 /* destination port of 0 is illegal, based on RFC768. */
365 if (uh->uh_dport == 0) {
366 IF_UDP_STATINC(ifp, port0);
367 goto bad;
368 }
369
370 KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
371 ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
372
373 /*
374 * Make mbuf data length reflect UDP length.
375 * If not enough data to reflect UDP length, drop.
376 */
377 len = ntohs((u_short)uh->uh_ulen);
378 if (ip->ip_len != len) {
379 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
380 udpstat.udps_badlen++;
381 IF_UDP_STATINC(ifp, badlength);
382 goto bad;
383 }
384 m_adj(m, len - ip->ip_len);
385 /* ip->ip_len = len; */
386 }
387 /*
388 * Save a copy of the IP header in case we want restore it
389 * for sending an ICMP error message in response.
390 */
391 save_ip = *ip;
392
393 /*
394 * Checksum extended UDP header and data.
395 */
396 if (udp_input_checksum(m, uh, iphlen, len)) {
397 goto bad;
398 }
399
400 isbroadcast = in_broadcast(ip->ip_dst, ifp);
401
402 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || isbroadcast) {
403 int reuse_sock = 0, mcast_delivered = 0;
404
405 lck_rw_lock_shared(pcbinfo->ipi_lock);
406 /*
407 * Deliver a multicast or broadcast datagram to *all* sockets
408 * for which the local and remote addresses and ports match
409 * those of the incoming datagram. This allows more than
410 * one process to receive multi/broadcasts on the same port.
411 * (This really ought to be done for unicast datagrams as
412 * well, but that would cause problems with existing
413 * applications that open both address-specific sockets and
414 * a wildcard socket listening to the same port -- they would
415 * end up receiving duplicates of every unicast datagram.
416 * Those applications open the multiple sockets to overcome an
417 * inadequacy of the UDP socket interface, but for backwards
418 * compatibility we avoid the problem here rather than
419 * fixing the interface. Maybe 4.5BSD will remedy this?)
420 */
421
422 /*
423 * Construct sockaddr format source address.
424 */
425 udp_in.sin_port = uh->uh_sport;
426 udp_in.sin_addr = ip->ip_src;
427 /*
428 * Locate pcb(s) for datagram.
429 * (Algorithm copied from raw_intr().)
430 */
431 #if INET6
432 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
433 #endif /* INET6 */
434 LIST_FOREACH(inp, &udb, inp_list) {
435 #if IPSEC
436 int skipit;
437 #endif /* IPSEC */
438
439 if (inp->inp_socket == NULL) {
440 continue;
441 }
442 if (inp != sotoinpcb(inp->inp_socket)) {
443 panic("%s: bad so back ptr inp=%p\n",
444 __func__, inp);
445 /* NOTREACHED */
446 }
447 #if INET6
448 if ((inp->inp_vflag & INP_IPV4) == 0) {
449 continue;
450 }
451 #endif /* INET6 */
452 if (inp_restricted_recv(inp, ifp)) {
453 continue;
454 }
455
456 if ((inp->inp_moptions == NULL) &&
457 (ntohl(ip->ip_dst.s_addr) !=
458 INADDR_ALLHOSTS_GROUP) && (isbroadcast == 0)) {
459 continue;
460 }
461
462 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
463 WNT_STOPUSING) {
464 continue;
465 }
466
467 udp_lock(inp->inp_socket, 1, 0);
468
469 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
470 WNT_STOPUSING) {
471 udp_unlock(inp->inp_socket, 1, 0);
472 continue;
473 }
474
475 if (inp->inp_lport != uh->uh_dport) {
476 udp_unlock(inp->inp_socket, 1, 0);
477 continue;
478 }
479 if (inp->inp_laddr.s_addr != INADDR_ANY) {
480 if (inp->inp_laddr.s_addr !=
481 ip->ip_dst.s_addr) {
482 udp_unlock(inp->inp_socket, 1, 0);
483 continue;
484 }
485 }
486 if (inp->inp_faddr.s_addr != INADDR_ANY) {
487 if (inp->inp_faddr.s_addr !=
488 ip->ip_src.s_addr ||
489 inp->inp_fport != uh->uh_sport) {
490 udp_unlock(inp->inp_socket, 1, 0);
491 continue;
492 }
493 }
494
495 if (isbroadcast == 0 && (ntohl(ip->ip_dst.s_addr) !=
496 INADDR_ALLHOSTS_GROUP)) {
497 struct sockaddr_in group;
498 int blocked;
499
500 if ((imo = inp->inp_moptions) == NULL) {
501 udp_unlock(inp->inp_socket, 1, 0);
502 continue;
503 }
504 IMO_LOCK(imo);
505
506 bzero(&group, sizeof(struct sockaddr_in));
507 group.sin_len = sizeof(struct sockaddr_in);
508 group.sin_family = AF_INET;
509 group.sin_addr = ip->ip_dst;
510
511 blocked = imo_multi_filter(imo, ifp,
512 &group, &udp_in);
513 if (blocked == MCAST_PASS) {
514 foundmembership = 1;
515 }
516
517 IMO_UNLOCK(imo);
518 if (!foundmembership) {
519 udp_unlock(inp->inp_socket, 1, 0);
520 if (blocked == MCAST_NOTSMEMBER ||
521 blocked == MCAST_MUTED) {
522 udpstat.udps_filtermcast++;
523 }
524 continue;
525 }
526 foundmembership = 0;
527 }
528
529 reuse_sock = (inp->inp_socket->so_options &
530 (SO_REUSEPORT | SO_REUSEADDR));
531
532 #if NECP
533 skipit = 0;
534 if (!necp_socket_is_allowed_to_send_recv_v4(inp,
535 uh->uh_dport, uh->uh_sport, &ip->ip_dst,
536 &ip->ip_src, ifp, NULL, NULL, NULL)) {
537 /* do not inject data to pcb */
538 skipit = 1;
539 }
540 if (skipit == 0)
541 #endif /* NECP */
542 {
543 struct mbuf *n = NULL;
544
545 if (reuse_sock) {
546 n = m_copy(m, 0, M_COPYALL);
547 }
548 #if INET6
549 udp_append(inp, ip, m,
550 iphlen + sizeof(struct udphdr),
551 &udp_in, &udp_in6, &udp_ip6, ifp);
552 #else /* !INET6 */
553 udp_append(inp, ip, m,
554 iphlen + sizeof(struct udphdr),
555 &udp_in, ifp);
556 #endif /* !INET6 */
557 mcast_delivered++;
558
559 m = n;
560 }
561 udp_unlock(inp->inp_socket, 1, 0);
562
563 /*
564 * Don't look for additional matches if this one does
565 * not have either the SO_REUSEPORT or SO_REUSEADDR
566 * socket options set. This heuristic avoids searching
567 * through all pcbs in the common case of a non-shared
568 * port. It assumes that an application will never
569 * clear these options after setting them.
570 */
571 if (reuse_sock == 0 || m == NULL) {
572 break;
573 }
574
575 /*
576 * Expect 32-bit aligned data pointer on strict-align
577 * platforms.
578 */
579 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
580 /*
581 * Recompute IP and UDP header pointers for new mbuf
582 */
583 ip = mtod(m, struct ip *);
584 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
585 }
586 lck_rw_done(pcbinfo->ipi_lock);
587
588 if (mcast_delivered == 0) {
589 /*
590 * No matching pcb found; discard datagram.
591 * (No need to send an ICMP Port Unreachable
592 * for a broadcast or multicast datgram.)
593 */
594 udpstat.udps_noportbcast++;
595 IF_UDP_STATINC(ifp, port_unreach);
596 goto bad;
597 }
598
599 /* free the extra copy of mbuf or skipped by IPSec */
600 if (m != NULL) {
601 m_freem(m);
602 }
603 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
604 return;
605 }
606
607 #if IPSEC
608 /*
609 * UDP to port 4500 with a payload where the first four bytes are
610 * not zero is a UDP encapsulated IPSec packet. Packets where
611 * the payload is one byte and that byte is 0xFF are NAT keepalive
612 * packets. Decapsulate the ESP packet and carry on with IPSec input
613 * or discard the NAT keep-alive.
614 */
615 if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 &&
616 uh->uh_dport == ntohs((u_short)esp_udp_encap_port)) {
617 int payload_len = len - sizeof(struct udphdr) > 4 ? 4 :
618 len - sizeof(struct udphdr);
619
620 if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) {
621 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) +
622 payload_len)) == NULL) {
623 udpstat.udps_hdrops++;
624 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
625 0, 0, 0, 0, 0);
626 return;
627 }
628 /*
629 * Expect 32-bit aligned data pointer on strict-align
630 * platforms.
631 */
632 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
633
634 ip = mtod(m, struct ip *);
635 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
636 }
637 /* Check for NAT keepalive packet */
638 if (payload_len == 1 && *(u_int8_t *)
639 ((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
640 m_freem(m);
641 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
642 0, 0, 0, 0, 0);
643 return;
644 } else if (payload_len == 4 && *(u_int32_t *)(void *)
645 ((caddr_t)uh + sizeof(struct udphdr)) != 0) {
646 /* UDP encapsulated IPSec packet to pass through NAT */
647 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
648 0, 0, 0, 0, 0);
649 /* preserve the udp header */
650 esp4_input(m, iphlen + sizeof(struct udphdr));
651 return;
652 }
653 }
654 #endif /* IPSEC */
655
656 /*
657 * Locate pcb for datagram.
658 */
659 inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
660 ip->ip_dst, uh->uh_dport, 1, ifp);
661 if (inp == NULL) {
662 IF_UDP_STATINC(ifp, port_unreach);
663
664 if (udp_log_in_vain) {
665 char buf[MAX_IPv4_STR_LEN];
666 char buf2[MAX_IPv4_STR_LEN];
667
668 /* check src and dst address */
669 if (udp_log_in_vain < 3) {
670 log(LOG_INFO, "Connection attempt to "
671 "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET,
672 &ip->ip_dst, buf, sizeof(buf)),
673 ntohs(uh->uh_dport), inet_ntop(AF_INET,
674 &ip->ip_src, buf2, sizeof(buf2)),
675 ntohs(uh->uh_sport));
676 } else if (!(m->m_flags & (M_BCAST | M_MCAST)) &&
677 ip->ip_dst.s_addr != ip->ip_src.s_addr) {
678 log_in_vain_log((LOG_INFO,
679 "Stealth Mode connection attempt to "
680 "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET,
681 &ip->ip_dst, buf, sizeof(buf)),
682 ntohs(uh->uh_dport), inet_ntop(AF_INET,
683 &ip->ip_src, buf2, sizeof(buf2)),
684 ntohs(uh->uh_sport)))
685 }
686 }
687 udpstat.udps_noport++;
688 if (m->m_flags & (M_BCAST | M_MCAST)) {
689 udpstat.udps_noportbcast++;
690 goto bad;
691 }
692 #if ICMP_BANDLIM
693 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) {
694 goto bad;
695 }
696 #endif /* ICMP_BANDLIM */
697 if (blackhole) {
698 if (ifp && ifp->if_type != IFT_LOOP) {
699 goto bad;
700 }
701 }
702 *ip = save_ip;
703 ip->ip_len += iphlen;
704 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
705 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
706 return;
707 }
708 udp_lock(inp->inp_socket, 1, 0);
709
710 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
711 udp_unlock(inp->inp_socket, 1, 0);
712 IF_UDP_STATINC(ifp, cleanup);
713 goto bad;
714 }
715 #if NECP
716 if (!necp_socket_is_allowed_to_send_recv_v4(inp, uh->uh_dport,
717 uh->uh_sport, &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
718 udp_unlock(inp->inp_socket, 1, 0);
719 IF_UDP_STATINC(ifp, badipsec);
720 goto bad;
721 }
722 #endif /* NECP */
723
724 /*
725 * Construct sockaddr format source address.
726 * Stuff source address and datagram in user buffer.
727 */
728 udp_in.sin_port = uh->uh_sport;
729 udp_in.sin_addr = ip->ip_src;
730 if ((inp->inp_flags & INP_CONTROLOPTS) != 0 ||
731 (inp->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
732 (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
733 (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
734 #if INET6
735 if (inp->inp_vflag & INP_IPV6) {
736 int savedflags;
737
738 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
739 savedflags = inp->inp_flags;
740 inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
741 ret = ip6_savecontrol(inp, m, &opts);
742 inp->inp_flags = savedflags;
743 } else
744 #endif /* INET6 */
745 {
746 ret = ip_savecontrol(inp, &opts, ip, m);
747 }
748 if (ret != 0) {
749 udp_unlock(inp->inp_socket, 1, 0);
750 goto bad;
751 }
752 }
753 m_adj(m, iphlen + sizeof(struct udphdr));
754
755 KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport,
756 save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen);
757
758 #if INET6
759 if (inp->inp_vflag & INP_IPV6) {
760 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
761 append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
762 } else
763 #endif /* INET6 */
764 {
765 append_sa = (struct sockaddr *)&udp_in;
766 }
767 if (nstat_collect) {
768 INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1);
769 INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, m->m_pkthdr.len);
770 inp_set_activity_bitmap(inp);
771 }
772 so_recv_data_stat(inp->inp_socket, m, 0);
773 if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa,
774 m, opts, NULL) == 0) {
775 udpstat.udps_fullsock++;
776 } else {
777 sorwakeup(inp->inp_socket);
778 }
779 udp_unlock(inp->inp_socket, 1, 0);
780 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
781 return;
782 bad:
783 m_freem(m);
784 if (opts) {
785 m_freem(opts);
786 }
787 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
788 }
789
790 #if INET6
791 static void
792 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
793 {
794 bzero(ip6, sizeof(*ip6));
795
796 ip6->ip6_vfc = IPV6_VERSION;
797 ip6->ip6_plen = ip->ip_len;
798 ip6->ip6_nxt = ip->ip_p;
799 ip6->ip6_hlim = ip->ip_ttl;
800 if (ip->ip_src.s_addr) {
801 ip6->ip6_src.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
802 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
803 }
804 if (ip->ip_dst.s_addr) {
805 ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
806 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
807 }
808 }
809 #endif /* INET6 */
810
811 /*
812 * subroutine of udp_input(), mainly for source code readability.
813 */
814 static void
815 #if INET6
816 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
817 struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6,
818 struct udp_ip6 *pudp_ip6, struct ifnet *ifp)
819 #else /* !INET6 */
820 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
821 struct sockaddr_in *pudp_in, struct ifnet *ifp)
822 #endif /* !INET6 */
823 {
824 struct sockaddr *append_sa;
825 struct mbuf *opts = 0;
826 boolean_t cell = IFNET_IS_CELLULAR(ifp);
827 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
828 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
829 int ret = 0;
830
831 #if CONFIG_MACF_NET
832 if (mac_inpcb_check_deliver(last, n, AF_INET, SOCK_DGRAM) != 0) {
833 m_freem(n);
834 return;
835 }
836 #endif /* CONFIG_MACF_NET */
837 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
838 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
839 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
840 (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
841 #if INET6
842 if (last->inp_vflag & INP_IPV6) {
843 int savedflags;
844
845 if (pudp_ip6->uip6_init_done == 0) {
846 ip_2_ip6_hdr(&pudp_ip6->uip6_ip6, ip);
847 pudp_ip6->uip6_init_done = 1;
848 }
849 savedflags = last->inp_flags;
850 last->inp_flags &= ~INP_UNMAPPABLEOPTS;
851 ret = ip6_savecontrol(last, n, &opts);
852 if (ret != 0) {
853 last->inp_flags = savedflags;
854 goto error;
855 }
856 last->inp_flags = savedflags;
857 } else
858 #endif /* INET6 */
859 {
860 ret = ip_savecontrol(last, &opts, ip, n);
861 if (ret != 0) {
862 goto error;
863 }
864 }
865 }
866 #if INET6
867 if (last->inp_vflag & INP_IPV6) {
868 if (pudp_in6->uin6_init_done == 0) {
869 in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin);
870 pudp_in6->uin6_init_done = 1;
871 }
872 append_sa = (struct sockaddr *)&pudp_in6->uin6_sin;
873 } else
874 #endif /* INET6 */
875 append_sa = (struct sockaddr *)pudp_in;
876 if (nstat_collect) {
877 INP_ADD_STAT(last, cell, wifi, wired, rxpackets, 1);
878 INP_ADD_STAT(last, cell, wifi, wired, rxbytes,
879 n->m_pkthdr.len);
880 inp_set_activity_bitmap(last);
881 }
882 so_recv_data_stat(last->inp_socket, n, 0);
883 m_adj(n, off);
884 if (sbappendaddr(&last->inp_socket->so_rcv, append_sa,
885 n, opts, NULL) == 0) {
886 udpstat.udps_fullsock++;
887 } else {
888 sorwakeup(last->inp_socket);
889 }
890 return;
891 error:
892 m_freem(n);
893 m_freem(opts);
894 }
895
896 /*
897 * Notify a udp user of an asynchronous error;
898 * just wake up so that he can collect error status.
899 */
900 void
901 udp_notify(struct inpcb *inp, int errno)
902 {
903 inp->inp_socket->so_error = errno;
904 sorwakeup(inp->inp_socket);
905 sowwakeup(inp->inp_socket);
906 }
907
908 void
909 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * ifp)
910 {
911 struct ip *ip = vip;
912 void (*notify)(struct inpcb *, int) = udp_notify;
913 struct in_addr faddr;
914 struct inpcb *inp = NULL;
915
916 faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
917 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
918 return;
919 }
920
921 if (PRC_IS_REDIRECT(cmd)) {
922 ip = 0;
923 notify = in_rtchange;
924 } else if (cmd == PRC_HOSTDEAD) {
925 ip = 0;
926 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
927 return;
928 }
929 if (ip) {
930 struct udphdr uh;
931
932 bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof(uh));
933 inp = in_pcblookup_hash(&udbinfo, faddr, uh.uh_dport,
934 ip->ip_src, uh.uh_sport, 0, NULL);
935 if (inp != NULL && inp->inp_socket != NULL) {
936 udp_lock(inp->inp_socket, 1, 0);
937 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
938 WNT_STOPUSING) {
939 udp_unlock(inp->inp_socket, 1, 0);
940 return;
941 }
942 (*notify)(inp, inetctlerrmap[cmd]);
943 udp_unlock(inp->inp_socket, 1, 0);
944 }
945 } else {
946 in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
947 }
948 }
949
950 int
951 udp_ctloutput(struct socket *so, struct sockopt *sopt)
952 {
953 int error = 0, optval = 0;
954 struct inpcb *inp;
955
956 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
957 if (sopt->sopt_level != IPPROTO_UDP &&
958 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) {
959 return ip_ctloutput(so, sopt);
960 }
961
962 inp = sotoinpcb(so);
963
964 switch (sopt->sopt_dir) {
965 case SOPT_SET:
966 switch (sopt->sopt_name) {
967 case UDP_NOCKSUM:
968 /* This option is settable only for UDP over IPv4 */
969 if (!(inp->inp_vflag & INP_IPV4)) {
970 error = EINVAL;
971 break;
972 }
973
974 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
975 sizeof(optval))) != 0) {
976 break;
977 }
978
979 if (optval != 0) {
980 inp->inp_flags |= INP_UDP_NOCKSUM;
981 } else {
982 inp->inp_flags &= ~INP_UDP_NOCKSUM;
983 }
984 break;
985 case UDP_KEEPALIVE_OFFLOAD:
986 {
987 struct udp_keepalive_offload ka;
988 /*
989 * If the socket is not connected, the stack will
990 * not know the destination address to put in the
991 * keepalive datagram. Return an error now instead
992 * of failing later.
993 */
994 if (!(so->so_state & SS_ISCONNECTED)) {
995 error = EINVAL;
996 break;
997 }
998 if (sopt->sopt_valsize != sizeof(ka)) {
999 error = EINVAL;
1000 break;
1001 }
1002 if ((error = sooptcopyin(sopt, &ka, sizeof(ka),
1003 sizeof(ka))) != 0) {
1004 break;
1005 }
1006
1007 /* application should specify the type */
1008 if (ka.ka_type == 0) {
1009 return EINVAL;
1010 }
1011
1012 if (ka.ka_interval == 0) {
1013 /*
1014 * if interval is 0, disable the offload
1015 * mechanism
1016 */
1017 if (inp->inp_keepalive_data != NULL) {
1018 FREE(inp->inp_keepalive_data,
1019 M_TEMP);
1020 }
1021 inp->inp_keepalive_data = NULL;
1022 inp->inp_keepalive_datalen = 0;
1023 inp->inp_keepalive_interval = 0;
1024 inp->inp_keepalive_type = 0;
1025 inp->inp_flags2 &= ~INP2_KEEPALIVE_OFFLOAD;
1026 } else {
1027 if (inp->inp_keepalive_data != NULL) {
1028 FREE(inp->inp_keepalive_data,
1029 M_TEMP);
1030 inp->inp_keepalive_data = NULL;
1031 }
1032
1033 inp->inp_keepalive_datalen = min(
1034 ka.ka_data_len,
1035 UDP_KEEPALIVE_OFFLOAD_DATA_SIZE);
1036 if (inp->inp_keepalive_datalen > 0) {
1037 MALLOC(inp->inp_keepalive_data,
1038 u_int8_t *,
1039 inp->inp_keepalive_datalen,
1040 M_TEMP, M_WAITOK);
1041 if (inp->inp_keepalive_data == NULL) {
1042 inp->inp_keepalive_datalen = 0;
1043 error = ENOMEM;
1044 break;
1045 }
1046 bcopy(ka.ka_data,
1047 inp->inp_keepalive_data,
1048 inp->inp_keepalive_datalen);
1049 } else {
1050 inp->inp_keepalive_datalen = 0;
1051 }
1052 inp->inp_keepalive_interval =
1053 min(UDP_KEEPALIVE_INTERVAL_MAX_SECONDS,
1054 ka.ka_interval);
1055 inp->inp_keepalive_type = ka.ka_type;
1056 inp->inp_flags2 |= INP2_KEEPALIVE_OFFLOAD;
1057 }
1058 break;
1059 }
1060 case SO_FLUSH:
1061 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
1062 sizeof(optval))) != 0) {
1063 break;
1064 }
1065
1066 error = inp_flush(inp, optval);
1067 break;
1068
1069 default:
1070 error = ENOPROTOOPT;
1071 break;
1072 }
1073 break;
1074
1075 case SOPT_GET:
1076 switch (sopt->sopt_name) {
1077 case UDP_NOCKSUM:
1078 optval = inp->inp_flags & INP_UDP_NOCKSUM;
1079 break;
1080
1081 default:
1082 error = ENOPROTOOPT;
1083 break;
1084 }
1085 if (error == 0) {
1086 error = sooptcopyout(sopt, &optval, sizeof(optval));
1087 }
1088 break;
1089 }
1090 return error;
1091 }
1092
1093 static int
1094 udp_pcblist SYSCTL_HANDLER_ARGS
1095 {
1096 #pragma unused(oidp, arg1, arg2)
1097 int error, i, n;
1098 struct inpcb *inp, **inp_list;
1099 inp_gen_t gencnt;
1100 struct xinpgen xig;
1101
1102 /*
1103 * The process of preparing the TCB list is too time-consuming and
1104 * resource-intensive to repeat twice on every request.
1105 */
1106 lck_rw_lock_exclusive(udbinfo.ipi_lock);
1107 if (req->oldptr == USER_ADDR_NULL) {
1108 n = udbinfo.ipi_count;
1109 req->oldidx = 2 * (sizeof(xig))
1110 + (n + n / 8) * sizeof(struct xinpcb);
1111 lck_rw_done(udbinfo.ipi_lock);
1112 return 0;
1113 }
1114
1115 if (req->newptr != USER_ADDR_NULL) {
1116 lck_rw_done(udbinfo.ipi_lock);
1117 return EPERM;
1118 }
1119
1120 /*
1121 * OK, now we're committed to doing something.
1122 */
1123 gencnt = udbinfo.ipi_gencnt;
1124 n = udbinfo.ipi_count;
1125
1126 bzero(&xig, sizeof(xig));
1127 xig.xig_len = sizeof(xig);
1128 xig.xig_count = n;
1129 xig.xig_gen = gencnt;
1130 xig.xig_sogen = so_gencnt;
1131 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1132 if (error) {
1133 lck_rw_done(udbinfo.ipi_lock);
1134 return error;
1135 }
1136 /*
1137 * We are done if there is no pcb
1138 */
1139 if (n == 0) {
1140 lck_rw_done(udbinfo.ipi_lock);
1141 return 0;
1142 }
1143
1144 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1145 if (inp_list == 0) {
1146 lck_rw_done(udbinfo.ipi_lock);
1147 return ENOMEM;
1148 }
1149
1150 for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
1151 inp = LIST_NEXT(inp, inp_list)) {
1152 if (inp->inp_gencnt <= gencnt &&
1153 inp->inp_state != INPCB_STATE_DEAD) {
1154 inp_list[i++] = inp;
1155 }
1156 }
1157 n = i;
1158
1159 error = 0;
1160 for (i = 0; i < n; i++) {
1161 struct xinpcb xi;
1162
1163 inp = inp_list[i];
1164
1165 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1166 continue;
1167 }
1168 udp_lock(inp->inp_socket, 1, 0);
1169 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1170 udp_unlock(inp->inp_socket, 1, 0);
1171 continue;
1172 }
1173 if (inp->inp_gencnt > gencnt) {
1174 udp_unlock(inp->inp_socket, 1, 0);
1175 continue;
1176 }
1177
1178 bzero(&xi, sizeof(xi));
1179 xi.xi_len = sizeof(xi);
1180 /* XXX should avoid extra copy */
1181 inpcb_to_compat(inp, &xi.xi_inp);
1182 if (inp->inp_socket) {
1183 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1184 }
1185
1186 udp_unlock(inp->inp_socket, 1, 0);
1187
1188 error = SYSCTL_OUT(req, &xi, sizeof(xi));
1189 }
1190 if (!error) {
1191 /*
1192 * Give the user an updated idea of our state.
1193 * If the generation differs from what we told
1194 * her before, she knows that something happened
1195 * while we were processing this request, and it
1196 * might be necessary to retry.
1197 */
1198 bzero(&xig, sizeof(xig));
1199 xig.xig_len = sizeof(xig);
1200 xig.xig_gen = udbinfo.ipi_gencnt;
1201 xig.xig_sogen = so_gencnt;
1202 xig.xig_count = udbinfo.ipi_count;
1203 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1204 }
1205 FREE(inp_list, M_TEMP);
1206 lck_rw_done(udbinfo.ipi_lock);
1207 return error;
1208 }
1209
1210 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
1211 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist,
1212 "S,xinpcb", "List of active UDP sockets");
1213
1214 #if !CONFIG_EMBEDDED
1215
1216 static int
1217 udp_pcblist64 SYSCTL_HANDLER_ARGS
1218 {
1219 #pragma unused(oidp, arg1, arg2)
1220 int error, i, n;
1221 struct inpcb *inp, **inp_list;
1222 inp_gen_t gencnt;
1223 struct xinpgen xig;
1224
1225 /*
1226 * The process of preparing the TCB list is too time-consuming and
1227 * resource-intensive to repeat twice on every request.
1228 */
1229 lck_rw_lock_shared(udbinfo.ipi_lock);
1230 if (req->oldptr == USER_ADDR_NULL) {
1231 n = udbinfo.ipi_count;
1232 req->oldidx =
1233 2 * (sizeof(xig)) + (n + n / 8) * sizeof(struct xinpcb64);
1234 lck_rw_done(udbinfo.ipi_lock);
1235 return 0;
1236 }
1237
1238 if (req->newptr != USER_ADDR_NULL) {
1239 lck_rw_done(udbinfo.ipi_lock);
1240 return EPERM;
1241 }
1242
1243 /*
1244 * OK, now we're committed to doing something.
1245 */
1246 gencnt = udbinfo.ipi_gencnt;
1247 n = udbinfo.ipi_count;
1248
1249 bzero(&xig, sizeof(xig));
1250 xig.xig_len = sizeof(xig);
1251 xig.xig_count = n;
1252 xig.xig_gen = gencnt;
1253 xig.xig_sogen = so_gencnt;
1254 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1255 if (error) {
1256 lck_rw_done(udbinfo.ipi_lock);
1257 return error;
1258 }
1259 /*
1260 * We are done if there is no pcb
1261 */
1262 if (n == 0) {
1263 lck_rw_done(udbinfo.ipi_lock);
1264 return 0;
1265 }
1266
1267 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1268 if (inp_list == 0) {
1269 lck_rw_done(udbinfo.ipi_lock);
1270 return ENOMEM;
1271 }
1272
1273 for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
1274 inp = LIST_NEXT(inp, inp_list)) {
1275 if (inp->inp_gencnt <= gencnt &&
1276 inp->inp_state != INPCB_STATE_DEAD) {
1277 inp_list[i++] = inp;
1278 }
1279 }
1280 n = i;
1281
1282 error = 0;
1283 for (i = 0; i < n; i++) {
1284 struct xinpcb64 xi;
1285
1286 inp = inp_list[i];
1287
1288 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1289 continue;
1290 }
1291 udp_lock(inp->inp_socket, 1, 0);
1292 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1293 udp_unlock(inp->inp_socket, 1, 0);
1294 continue;
1295 }
1296 if (inp->inp_gencnt > gencnt) {
1297 udp_unlock(inp->inp_socket, 1, 0);
1298 continue;
1299 }
1300
1301 bzero(&xi, sizeof(xi));
1302 xi.xi_len = sizeof(xi);
1303 inpcb_to_xinpcb64(inp, &xi);
1304 if (inp->inp_socket) {
1305 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1306 }
1307
1308 udp_unlock(inp->inp_socket, 1, 0);
1309
1310 error = SYSCTL_OUT(req, &xi, sizeof(xi));
1311 }
1312 if (!error) {
1313 /*
1314 * Give the user an updated idea of our state.
1315 * If the generation differs from what we told
1316 * her before, she knows that something happened
1317 * while we were processing this request, and it
1318 * might be necessary to retry.
1319 */
1320 bzero(&xig, sizeof(xig));
1321 xig.xig_len = sizeof(xig);
1322 xig.xig_gen = udbinfo.ipi_gencnt;
1323 xig.xig_sogen = so_gencnt;
1324 xig.xig_count = udbinfo.ipi_count;
1325 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1326 }
1327 FREE(inp_list, M_TEMP);
1328 lck_rw_done(udbinfo.ipi_lock);
1329 return error;
1330 }
1331
1332 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64,
1333 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist64,
1334 "S,xinpcb64", "List of active UDP sockets");
1335
1336 #endif /* !CONFIG_EMBEDDED */
1337
1338 static int
1339 udp_pcblist_n SYSCTL_HANDLER_ARGS
1340 {
1341 #pragma unused(oidp, arg1, arg2)
1342 return get_pcblist_n(IPPROTO_UDP, req, &udbinfo);
1343 }
1344
1345 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist_n,
1346 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist_n,
1347 "S,xinpcb_n", "List of active UDP sockets");
1348
1349 __private_extern__ void
1350 udp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags,
1351 bitstr_t *bitfield)
1352 {
1353 inpcb_get_ports_used(ifindex, protocol, flags, bitfield,
1354 &udbinfo);
1355 }
1356
1357 __private_extern__ uint32_t
1358 udp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
1359 {
1360 return inpcb_count_opportunistic(ifindex, &udbinfo, flags);
1361 }
1362
1363 __private_extern__ uint32_t
1364 udp_find_anypcb_byaddr(struct ifaddr *ifa)
1365 {
1366 return inpcb_find_anypcb_byaddr(ifa, &udbinfo);
1367 }
1368
1369 static int
1370 udp_check_pktinfo(struct mbuf *control, struct ifnet **outif,
1371 struct in_addr *laddr)
1372 {
1373 struct cmsghdr *cm = 0;
1374 struct in_pktinfo *pktinfo;
1375 struct ifnet *ifp;
1376
1377 if (outif != NULL) {
1378 *outif = NULL;
1379 }
1380
1381 /*
1382 * XXX: Currently, we assume all the optional information is stored
1383 * in a single mbuf.
1384 */
1385 if (control->m_next) {
1386 return EINVAL;
1387 }
1388
1389 if (control->m_len < CMSG_LEN(0)) {
1390 return EINVAL;
1391 }
1392
1393 for (cm = M_FIRST_CMSGHDR(control);
1394 is_cmsg_valid(control, cm);
1395 cm = M_NXT_CMSGHDR(control, cm)) {
1396 if (cm->cmsg_level != IPPROTO_IP ||
1397 cm->cmsg_type != IP_PKTINFO) {
1398 continue;
1399 }
1400
1401 if (cm->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) {
1402 return EINVAL;
1403 }
1404
1405 pktinfo = (struct in_pktinfo *)(void *)CMSG_DATA(cm);
1406
1407 /* Check for a valid ifindex in pktinfo */
1408 ifnet_head_lock_shared();
1409
1410 if (pktinfo->ipi_ifindex > if_index) {
1411 ifnet_head_done();
1412 return ENXIO;
1413 }
1414
1415 /*
1416 * If ipi_ifindex is specified it takes precedence
1417 * over ipi_spec_dst.
1418 */
1419 if (pktinfo->ipi_ifindex) {
1420 ifp = ifindex2ifnet[pktinfo->ipi_ifindex];
1421 if (ifp == NULL) {
1422 ifnet_head_done();
1423 return ENXIO;
1424 }
1425 if (outif != NULL) {
1426 ifnet_reference(ifp);
1427 *outif = ifp;
1428 }
1429 ifnet_head_done();
1430 laddr->s_addr = INADDR_ANY;
1431 break;
1432 }
1433
1434 ifnet_head_done();
1435
1436 /*
1437 * Use the provided ipi_spec_dst address for temp
1438 * source address.
1439 */
1440 *laddr = pktinfo->ipi_spec_dst;
1441 break;
1442 }
1443 return 0;
1444 }
1445
1446 int
1447 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
1448 struct mbuf *control, struct proc *p)
1449 {
1450 struct udpiphdr *ui;
1451 int len = m->m_pkthdr.len;
1452 struct sockaddr_in *sin;
1453 struct in_addr origladdr, laddr, faddr, pi_laddr;
1454 u_short lport, fport;
1455 int error = 0, udp_dodisconnect = 0, pktinfo = 0;
1456 struct socket *so = inp->inp_socket;
1457 int soopts = 0;
1458 struct mbuf *inpopts;
1459 struct ip_moptions *mopts;
1460 struct route ro;
1461 struct ip_out_args ipoa;
1462 #if CONTENT_FILTER
1463 struct m_tag *cfil_tag = NULL;
1464 bool cfil_faddr_use = false;
1465 uint32_t cfil_so_state_change_cnt = 0;
1466 short cfil_so_options = 0;
1467 struct sockaddr *cfil_faddr = NULL;
1468 #endif
1469
1470 bzero(&ipoa, sizeof(ipoa));
1471 ipoa.ipoa_boundif = IFSCOPE_NONE;
1472 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
1473
1474 struct ifnet *outif = NULL;
1475 struct flowadv *adv = &ipoa.ipoa_flowadv;
1476 int sotc = SO_TC_UNSPEC;
1477 int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1478 struct ifnet *origoutifp = NULL;
1479 int flowadv = 0;
1480
1481 /* Enable flow advisory only when connected */
1482 flowadv = (so->so_state & SS_ISCONNECTED) ? 1 : 0;
1483 pi_laddr.s_addr = INADDR_ANY;
1484
1485 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1486
1487 socket_lock_assert_owned(so);
1488
1489 #if CONTENT_FILTER
1490 /*
1491 * If socket is subject to UDP Content Filter and no addr is passed in,
1492 * retrieve CFIL saved state from mbuf and use it if necessary.
1493 */
1494 if (so->so_cfil_db && !addr) {
1495 cfil_tag = cfil_udp_get_socket_state(m, &cfil_so_state_change_cnt, &cfil_so_options, &cfil_faddr);
1496 if (cfil_tag) {
1497 sin = (struct sockaddr_in *)(void *)cfil_faddr;
1498 if (inp && inp->inp_faddr.s_addr == INADDR_ANY) {
1499 /*
1500 * Socket is unconnected, simply use the saved faddr as 'addr' to go through
1501 * the connect/disconnect logic.
1502 */
1503 addr = (struct sockaddr *)cfil_faddr;
1504 } else if ((so->so_state_change_cnt != cfil_so_state_change_cnt) &&
1505 (inp->inp_fport != sin->sin_port ||
1506 inp->inp_faddr.s_addr != sin->sin_addr.s_addr)) {
1507 /*
1508 * Socket is connected but socket state and dest addr/port changed.
1509 * We need to use the saved faddr info.
1510 */
1511 cfil_faddr_use = true;
1512 }
1513 }
1514 }
1515 #endif
1516
1517 if (control != NULL) {
1518 sotc = so_tc_from_control(control, &netsvctype);
1519 VERIFY(outif == NULL);
1520 error = udp_check_pktinfo(control, &outif, &pi_laddr);
1521 m_freem(control);
1522 control = NULL;
1523 if (error) {
1524 goto release;
1525 }
1526 pktinfo++;
1527 if (outif != NULL) {
1528 ipoa.ipoa_boundif = outif->if_index;
1529 }
1530 }
1531 if (sotc == SO_TC_UNSPEC) {
1532 sotc = so->so_traffic_class;
1533 netsvctype = so->so_netsvctype;
1534 }
1535
1536 KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
1537 inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
1538 (htons((u_short)len + sizeof(struct udphdr))));
1539
1540 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1541 error = EMSGSIZE;
1542 goto release;
1543 }
1544
1545 if (flowadv && INP_WAIT_FOR_IF_FEEDBACK(inp)) {
1546 /*
1547 * The socket is flow-controlled, drop the packets
1548 * until the inp is not flow controlled
1549 */
1550 error = ENOBUFS;
1551 goto release;
1552 }
1553 /*
1554 * If socket was bound to an ifindex, tell ip_output about it.
1555 * If the ancillary IP_PKTINFO option contains an interface index,
1556 * it takes precedence over the one specified by IP_BOUND_IF.
1557 */
1558 if (ipoa.ipoa_boundif == IFSCOPE_NONE &&
1559 (inp->inp_flags & INP_BOUND_IF)) {
1560 VERIFY(inp->inp_boundifp != NULL);
1561 ifnet_reference(inp->inp_boundifp); /* for this routine */
1562 if (outif != NULL) {
1563 ifnet_release(outif);
1564 }
1565 outif = inp->inp_boundifp;
1566 ipoa.ipoa_boundif = outif->if_index;
1567 }
1568 if (INP_NO_CELLULAR(inp)) {
1569 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1570 }
1571 if (INP_NO_EXPENSIVE(inp)) {
1572 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1573 }
1574 if (INP_AWDL_UNRESTRICTED(inp)) {
1575 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1576 }
1577 ipoa.ipoa_sotc = sotc;
1578 ipoa.ipoa_netsvctype = netsvctype;
1579 soopts |= IP_OUTARGS;
1580
1581 /*
1582 * If there was a routing change, discard cached route and check
1583 * that we have a valid source address. Reacquire a new source
1584 * address if INADDR_ANY was specified.
1585 *
1586 * If we are using cfil saved state, go through this cache cleanup
1587 * so that we can get a new route.
1588 */
1589 if (ROUTE_UNUSABLE(&inp->inp_route)
1590 #if CONTENT_FILTER
1591 || cfil_faddr_use
1592 #endif
1593 ) {
1594 struct in_ifaddr *ia = NULL;
1595
1596 ROUTE_RELEASE(&inp->inp_route);
1597
1598 /* src address is gone? */
1599 if (inp->inp_laddr.s_addr != INADDR_ANY &&
1600 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
1601 if (!(inp->inp_flags & INP_INADDR_ANY) ||
1602 (so->so_state & SS_ISCONNECTED)) {
1603 /*
1604 * Rdar://5448998
1605 * If the source address is gone, return an
1606 * error if:
1607 * - the source was specified
1608 * - the socket was already connected
1609 */
1610 soevent(so, (SO_FILT_HINT_LOCKED |
1611 SO_FILT_HINT_NOSRCADDR));
1612 error = EADDRNOTAVAIL;
1613 goto release;
1614 } else {
1615 /* new src will be set later */
1616 inp->inp_laddr.s_addr = INADDR_ANY;
1617 inp->inp_last_outifp = NULL;
1618 }
1619 }
1620 if (ia != NULL) {
1621 IFA_REMREF(&ia->ia_ifa);
1622 }
1623 }
1624
1625 /*
1626 * IP_PKTINFO option check. If a temporary scope or src address
1627 * is provided, use it for this packet only and make sure we forget
1628 * it after sending this datagram.
1629 */
1630 if (pi_laddr.s_addr != INADDR_ANY ||
1631 (ipoa.ipoa_boundif != IFSCOPE_NONE && pktinfo)) {
1632 /* temp src address for this datagram only */
1633 laddr = pi_laddr;
1634 origladdr.s_addr = INADDR_ANY;
1635 /* we don't want to keep the laddr or route */
1636 udp_dodisconnect = 1;
1637 /* remember we don't care about src addr */
1638 inp->inp_flags |= INP_INADDR_ANY;
1639 } else {
1640 origladdr = laddr = inp->inp_laddr;
1641 }
1642
1643 origoutifp = inp->inp_last_outifp;
1644 faddr = inp->inp_faddr;
1645 lport = inp->inp_lport;
1646 fport = inp->inp_fport;
1647
1648 #if CONTENT_FILTER
1649 if (cfil_faddr_use) {
1650 faddr = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_addr;
1651 fport = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_port;
1652 }
1653 #endif
1654
1655 if (addr) {
1656 sin = (struct sockaddr_in *)(void *)addr;
1657 if (faddr.s_addr != INADDR_ANY) {
1658 error = EISCONN;
1659 goto release;
1660 }
1661 if (lport == 0) {
1662 /*
1663 * In case we don't have a local port set, go through
1664 * the full connect. We don't have a local port yet
1665 * (i.e., we can't be looked up), so it's not an issue
1666 * if the input runs at the same time we do this.
1667 */
1668 /* if we have a source address specified, use that */
1669 if (pi_laddr.s_addr != INADDR_ANY) {
1670 inp->inp_laddr = pi_laddr;
1671 }
1672 /*
1673 * If a scope is specified, use it. Scope from
1674 * IP_PKTINFO takes precendence over the the scope
1675 * set via INP_BOUND_IF.
1676 */
1677 error = in_pcbconnect(inp, addr, p, ipoa.ipoa_boundif,
1678 &outif);
1679 if (error) {
1680 goto release;
1681 }
1682
1683 laddr = inp->inp_laddr;
1684 lport = inp->inp_lport;
1685 faddr = inp->inp_faddr;
1686 fport = inp->inp_fport;
1687 udp_dodisconnect = 1;
1688
1689 /* synch up in case in_pcbladdr() overrides */
1690 if (outif != NULL && ipoa.ipoa_boundif != IFSCOPE_NONE) {
1691 ipoa.ipoa_boundif = outif->if_index;
1692 }
1693 } else {
1694 /*
1695 * Fast path case
1696 *
1697 * We have a full address and a local port; use those
1698 * info to build the packet without changing the pcb
1699 * and interfering with the input path. See 3851370.
1700 *
1701 * Scope from IP_PKTINFO takes precendence over the
1702 * the scope set via INP_BOUND_IF.
1703 */
1704 if (laddr.s_addr == INADDR_ANY) {
1705 if ((error = in_pcbladdr(inp, addr, &laddr,
1706 ipoa.ipoa_boundif, &outif, 0)) != 0) {
1707 goto release;
1708 }
1709 /*
1710 * from pcbconnect: remember we don't
1711 * care about src addr.
1712 */
1713 inp->inp_flags |= INP_INADDR_ANY;
1714
1715 /* synch up in case in_pcbladdr() overrides */
1716 if (outif != NULL &&
1717 ipoa.ipoa_boundif != IFSCOPE_NONE) {
1718 ipoa.ipoa_boundif = outif->if_index;
1719 }
1720 }
1721
1722 faddr = sin->sin_addr;
1723 fport = sin->sin_port;
1724 }
1725 } else {
1726 if (faddr.s_addr == INADDR_ANY) {
1727 error = ENOTCONN;
1728 goto release;
1729 }
1730 }
1731
1732 #if CONFIG_MACF_NET
1733 mac_mbuf_label_associate_inpcb(inp, m);
1734 #endif /* CONFIG_MACF_NET */
1735
1736 if (inp->inp_flowhash == 0) {
1737 inp->inp_flowhash = inp_calc_flowhash(inp);
1738 }
1739
1740 if (fport == htons(53) && !(so->so_flags1 & SOF1_DNS_COUNTED)) {
1741 so->so_flags1 |= SOF1_DNS_COUNTED;
1742 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_dns);
1743 }
1744
1745 /*
1746 * Calculate data length and get a mbuf
1747 * for UDP and IP headers.
1748 */
1749 M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT, 1);
1750 if (m == 0) {
1751 error = ENOBUFS;
1752 goto abort;
1753 }
1754
1755 /*
1756 * Fill in mbuf with extended UDP header
1757 * and addresses and length put into network format.
1758 */
1759 ui = mtod(m, struct udpiphdr *);
1760 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1761 ui->ui_pr = IPPROTO_UDP;
1762 ui->ui_src = laddr;
1763 ui->ui_dst = faddr;
1764 ui->ui_sport = lport;
1765 ui->ui_dport = fport;
1766 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1767
1768 /*
1769 * Set up checksum to pseudo header checksum and output datagram.
1770 *
1771 * Treat flows to be CLAT46'd as IPv6 flow and compute checksum
1772 * no matter what, as IPv6 mandates checksum for UDP.
1773 *
1774 * Here we only compute the one's complement sum of the pseudo header.
1775 * The payload computation and final complement is delayed to much later
1776 * in IP processing to decide if remaining computation needs to be done
1777 * through offload.
1778 *
1779 * That is communicated by setting CSUM_UDP in csum_flags.
1780 * The offset of checksum from the start of ULP header is communicated
1781 * through csum_data.
1782 *
1783 * Note since this already contains the pseudo checksum header, any
1784 * later operation at IP layer that modify the values used here must
1785 * update the checksum as well (for example NAT etc).
1786 */
1787 if ((inp->inp_flags2 & INP2_CLAT46_FLOW) ||
1788 (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM))) {
1789 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
1790 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1791 m->m_pkthdr.csum_flags = (CSUM_UDP | CSUM_ZERO_INVERT);
1792 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1793 } else {
1794 ui->ui_sum = 0;
1795 }
1796 ((struct ip *)ui)->ip_len = sizeof(struct udpiphdr) + len;
1797 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1798 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1799 udpstat.udps_opackets++;
1800
1801 KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport,
1802 ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen);
1803
1804 #if NECP
1805 {
1806 necp_kernel_policy_id policy_id;
1807 necp_kernel_policy_id skip_policy_id;
1808 u_int32_t route_rule_id;
1809
1810 /*
1811 * We need a route to perform NECP route rule checks
1812 */
1813 if (net_qos_policy_restricted != 0 &&
1814 ROUTE_UNUSABLE(&inp->inp_route)) {
1815 struct sockaddr_in to;
1816 struct sockaddr_in from;
1817
1818 ROUTE_RELEASE(&inp->inp_route);
1819
1820 bzero(&from, sizeof(struct sockaddr_in));
1821 from.sin_family = AF_INET;
1822 from.sin_len = sizeof(struct sockaddr_in);
1823 from.sin_addr = laddr;
1824
1825 bzero(&to, sizeof(struct sockaddr_in));
1826 to.sin_family = AF_INET;
1827 to.sin_len = sizeof(struct sockaddr_in);
1828 to.sin_addr = faddr;
1829
1830 inp->inp_route.ro_dst.sa_family = AF_INET;
1831 inp->inp_route.ro_dst.sa_len = sizeof(struct sockaddr_in);
1832 ((struct sockaddr_in *)(void *)&inp->inp_route.ro_dst)->sin_addr =
1833 faddr;
1834
1835 rtalloc_scoped(&inp->inp_route, ipoa.ipoa_boundif);
1836
1837 inp_update_necp_policy(inp, (struct sockaddr *)&from,
1838 (struct sockaddr *)&to, ipoa.ipoa_boundif);
1839 inp->inp_policyresult.results.qos_marking_gencount = 0;
1840 }
1841
1842 if (!necp_socket_is_allowed_to_send_recv_v4(inp, lport, fport,
1843 &laddr, &faddr, NULL, &policy_id, &route_rule_id, &skip_policy_id)) {
1844 error = EHOSTUNREACH;
1845 goto abort;
1846 }
1847
1848 necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id);
1849
1850 if (net_qos_policy_restricted != 0) {
1851 necp_socket_update_qos_marking(inp,
1852 inp->inp_route.ro_rt, NULL, route_rule_id);
1853 }
1854 }
1855 #endif /* NECP */
1856 if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1857 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1858 }
1859
1860 #if IPSEC
1861 if (inp->inp_sp != NULL && ipsec_setsocket(m, inp->inp_socket) != 0) {
1862 error = ENOBUFS;
1863 goto abort;
1864 }
1865 #endif /* IPSEC */
1866
1867 inpopts = inp->inp_options;
1868 #if CONTENT_FILTER
1869 if (cfil_tag && (inp->inp_socket->so_options != cfil_so_options)) {
1870 soopts |= (cfil_so_options & (SO_DONTROUTE | SO_BROADCAST));
1871 } else
1872 #endif
1873 soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST));
1874
1875 mopts = inp->inp_moptions;
1876 if (mopts != NULL) {
1877 IMO_LOCK(mopts);
1878 IMO_ADDREF_LOCKED(mopts);
1879 if (IN_MULTICAST(ntohl(ui->ui_dst.s_addr)) &&
1880 mopts->imo_multicast_ifp != NULL) {
1881 /* no reference needed */
1882 inp->inp_last_outifp = mopts->imo_multicast_ifp;
1883 }
1884 IMO_UNLOCK(mopts);
1885 }
1886
1887 /* Copy the cached route and take an extra reference */
1888 inp_route_copyout(inp, &ro);
1889
1890 set_packet_service_class(m, so, sotc, 0);
1891 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
1892 m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
1893 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
1894 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC);
1895 if (flowadv) {
1896 m->m_pkthdr.pkt_flags |= PKTF_FLOW_ADV;
1897 }
1898 m->m_pkthdr.tx_udp_pid = so->last_pid;
1899 if (so->so_flags & SOF_DELEGATED) {
1900 m->m_pkthdr.tx_udp_e_pid = so->e_pid;
1901 } else {
1902 m->m_pkthdr.tx_udp_e_pid = 0;
1903 }
1904
1905 if (ipoa.ipoa_boundif != IFSCOPE_NONE) {
1906 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1907 }
1908
1909 if (laddr.s_addr != INADDR_ANY) {
1910 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
1911 }
1912
1913 inp->inp_sndinprog_cnt++;
1914
1915 socket_unlock(so, 0);
1916 error = ip_output(m, inpopts, &ro, soopts, mopts, &ipoa);
1917 m = NULL;
1918 socket_lock(so, 0);
1919 if (mopts != NULL) {
1920 IMO_REMREF(mopts);
1921 }
1922
1923 if (error == 0 && nstat_collect) {
1924 boolean_t cell, wifi, wired;
1925
1926 if (ro.ro_rt != NULL) {
1927 cell = IFNET_IS_CELLULAR(ro.ro_rt->rt_ifp);
1928 wifi = (!cell && IFNET_IS_WIFI(ro.ro_rt->rt_ifp));
1929 wired = (!wifi && IFNET_IS_WIRED(ro.ro_rt->rt_ifp));
1930 } else {
1931 cell = wifi = wired = FALSE;
1932 }
1933 INP_ADD_STAT(inp, cell, wifi, wired, txpackets, 1);
1934 INP_ADD_STAT(inp, cell, wifi, wired, txbytes, len);
1935 inp_set_activity_bitmap(inp);
1936 }
1937
1938 if (flowadv && (adv->code == FADV_FLOW_CONTROLLED ||
1939 adv->code == FADV_SUSPENDED)) {
1940 /*
1941 * return a hint to the application that
1942 * the packet has been dropped
1943 */
1944 error = ENOBUFS;
1945 inp_set_fc_state(inp, adv->code);
1946 }
1947
1948 VERIFY(inp->inp_sndinprog_cnt > 0);
1949 if (--inp->inp_sndinprog_cnt == 0) {
1950 inp->inp_flags &= ~(INP_FC_FEEDBACK);
1951 }
1952
1953 /* Synchronize PCB cached route */
1954 inp_route_copyin(inp, &ro);
1955
1956 abort:
1957 if (udp_dodisconnect) {
1958 /* Always discard the cached route for unconnected socket */
1959 ROUTE_RELEASE(&inp->inp_route);
1960 in_pcbdisconnect(inp);
1961 inp->inp_laddr = origladdr; /* XXX rehash? */
1962 /* no reference needed */
1963 inp->inp_last_outifp = origoutifp;
1964 } else if (inp->inp_route.ro_rt != NULL) {
1965 struct rtentry *rt = inp->inp_route.ro_rt;
1966 struct ifnet *outifp;
1967
1968 if (rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) {
1969 rt = NULL; /* unusable */
1970 }
1971 #if CONTENT_FILTER
1972 /*
1973 * Discard temporary route for cfil case
1974 */
1975 if (cfil_faddr_use) {
1976 rt = NULL; /* unusable */
1977 }
1978 #endif
1979
1980 /*
1981 * Always discard if it is a multicast or broadcast route.
1982 */
1983 if (rt == NULL) {
1984 ROUTE_RELEASE(&inp->inp_route);
1985 }
1986
1987 /*
1988 * If the destination route is unicast, update outifp with
1989 * that of the route interface used by IP.
1990 */
1991 if (rt != NULL &&
1992 (outifp = rt->rt_ifp) != inp->inp_last_outifp) {
1993 inp->inp_last_outifp = outifp; /* no reference needed */
1994
1995 so->so_pktheadroom = P2ROUNDUP(
1996 sizeof(struct udphdr) +
1997 sizeof(struct ip) +
1998 ifnet_hdrlen(outifp) +
1999 ifnet_mbuf_packetpreamblelen(outifp),
2000 sizeof(u_int32_t));
2001 }
2002 } else {
2003 ROUTE_RELEASE(&inp->inp_route);
2004 }
2005
2006 /*
2007 * If output interface was cellular/expensive, and this socket is
2008 * denied access to it, generate an event.
2009 */
2010 if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
2011 (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp))) {
2012 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
2013 }
2014
2015 release:
2016 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0);
2017
2018 if (m != NULL) {
2019 m_freem(m);
2020 }
2021
2022 if (outif != NULL) {
2023 ifnet_release(outif);
2024 }
2025
2026 #if CONTENT_FILTER
2027 if (cfil_tag) {
2028 m_tag_free(cfil_tag);
2029 }
2030 #endif
2031
2032 return error;
2033 }
2034
2035 u_int32_t udp_sendspace = 9216; /* really max datagram size */
2036 /* 187 1K datagrams (approx 192 KB) */
2037 u_int32_t udp_recvspace = 187 * (1024 +
2038 #if INET6
2039 sizeof(struct sockaddr_in6)
2040 #else /* !INET6 */
2041 sizeof(struct sockaddr_in)
2042 #endif /* !INET6 */
2043 );
2044
2045 /* Check that the values of udp send and recv space do not exceed sb_max */
2046 static int
2047 sysctl_udp_sospace(struct sysctl_oid *oidp, void *arg1, int arg2,
2048 struct sysctl_req *req)
2049 {
2050 #pragma unused(arg1, arg2)
2051 u_int32_t new_value = 0, *space_p = NULL;
2052 int changed = 0, error = 0;
2053 u_quad_t sb_effective_max = (sb_max / (MSIZE + MCLBYTES)) * MCLBYTES;
2054
2055 switch (oidp->oid_number) {
2056 case UDPCTL_RECVSPACE:
2057 space_p = &udp_recvspace;
2058 break;
2059 case UDPCTL_MAXDGRAM:
2060 space_p = &udp_sendspace;
2061 break;
2062 default:
2063 return EINVAL;
2064 }
2065 error = sysctl_io_number(req, *space_p, sizeof(u_int32_t),
2066 &new_value, &changed);
2067 if (changed) {
2068 if (new_value > 0 && new_value <= sb_effective_max) {
2069 *space_p = new_value;
2070 } else {
2071 error = ERANGE;
2072 }
2073 }
2074 return error;
2075 }
2076
2077 SYSCTL_PROC(_net_inet_udp, UDPCTL_RECVSPACE, recvspace,
2078 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_recvspace, 0,
2079 &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size");
2080
2081 SYSCTL_PROC(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram,
2082 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_sendspace, 0,
2083 &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size");
2084
2085 int
2086 udp_abort(struct socket *so)
2087 {
2088 struct inpcb *inp;
2089
2090 inp = sotoinpcb(so);
2091 if (inp == NULL) {
2092 panic("%s: so=%p null inp\n", __func__, so);
2093 /* NOTREACHED */
2094 }
2095 soisdisconnected(so);
2096 in_pcbdetach(inp);
2097 return 0;
2098 }
2099
2100 int
2101 udp_attach(struct socket *so, int proto, struct proc *p)
2102 {
2103 #pragma unused(proto)
2104 struct inpcb *inp;
2105 int error;
2106
2107 inp = sotoinpcb(so);
2108 if (inp != NULL) {
2109 panic("%s so=%p inp=%p\n", __func__, so, inp);
2110 /* NOTREACHED */
2111 }
2112 error = in_pcballoc(so, &udbinfo, p);
2113 if (error != 0) {
2114 return error;
2115 }
2116 error = soreserve(so, udp_sendspace, udp_recvspace);
2117 if (error != 0) {
2118 return error;
2119 }
2120 inp = (struct inpcb *)so->so_pcb;
2121 inp->inp_vflag |= INP_IPV4;
2122 inp->inp_ip_ttl = ip_defttl;
2123 if (nstat_collect) {
2124 nstat_udp_new_pcb(inp);
2125 }
2126 return 0;
2127 }
2128
2129 int
2130 udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
2131 {
2132 struct inpcb *inp;
2133 int error;
2134
2135 if (nam->sa_family != 0 && nam->sa_family != AF_INET &&
2136 nam->sa_family != AF_INET6) {
2137 return EAFNOSUPPORT;
2138 }
2139
2140 inp = sotoinpcb(so);
2141 if (inp == NULL) {
2142 return EINVAL;
2143 }
2144 error = in_pcbbind(inp, nam, p);
2145
2146 #if NECP
2147 /* Update NECP client with bind result if not in middle of connect */
2148 if (error == 0 &&
2149 (inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS) &&
2150 !uuid_is_null(inp->necp_client_uuid)) {
2151 socket_unlock(so, 0);
2152 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2153 socket_lock(so, 0);
2154 }
2155 #endif /* NECP */
2156
2157 return error;
2158 }
2159
2160 int
2161 udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
2162 {
2163 struct inpcb *inp;
2164 int error;
2165
2166 inp = sotoinpcb(so);
2167 if (inp == NULL) {
2168 return EINVAL;
2169 }
2170 if (inp->inp_faddr.s_addr != INADDR_ANY) {
2171 return EISCONN;
2172 }
2173
2174 if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
2175 so->so_flags1 |= SOF1_CONNECT_COUNTED;
2176 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
2177 }
2178
2179 #if NECP
2180 #if FLOW_DIVERT
2181 if (necp_socket_should_use_flow_divert(inp)) {
2182 uint32_t fd_ctl_unit =
2183 necp_socket_get_flow_divert_control_unit(inp);
2184 if (fd_ctl_unit > 0) {
2185 error = flow_divert_pcb_init(so, fd_ctl_unit);
2186 if (error == 0) {
2187 error = flow_divert_connect_out(so, nam, p);
2188 }
2189 } else {
2190 error = ENETDOWN;
2191 }
2192 return error;
2193 }
2194 #endif /* FLOW_DIVERT */
2195 #endif /* NECP */
2196
2197 error = in_pcbconnect(inp, nam, p, IFSCOPE_NONE, NULL);
2198 if (error == 0) {
2199 #if NECP
2200 /* Update NECP client with connected five-tuple */
2201 if (!uuid_is_null(inp->necp_client_uuid)) {
2202 socket_unlock(so, 0);
2203 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2204 socket_lock(so, 0);
2205 }
2206 #endif /* NECP */
2207
2208 soisconnected(so);
2209 if (inp->inp_flowhash == 0) {
2210 inp->inp_flowhash = inp_calc_flowhash(inp);
2211 }
2212 }
2213 return error;
2214 }
2215
2216 int
2217 udp_connectx_common(struct socket *so, int af, struct sockaddr *src, struct sockaddr *dst,
2218 struct proc *p, uint32_t ifscope, sae_associd_t aid, sae_connid_t *pcid,
2219 uint32_t flags, void *arg, uint32_t arglen,
2220 struct uio *uio, user_ssize_t *bytes_written)
2221 {
2222 #pragma unused(aid, flags, arg, arglen)
2223 struct inpcb *inp = sotoinpcb(so);
2224 int error = 0;
2225 user_ssize_t datalen = 0;
2226
2227 if (inp == NULL) {
2228 return EINVAL;
2229 }
2230
2231 VERIFY(dst != NULL);
2232
2233 ASSERT(!(inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS));
2234 inp->inp_flags2 |= INP2_CONNECT_IN_PROGRESS;
2235
2236 #if NECP
2237 inp_update_necp_policy(inp, src, dst, ifscope);
2238 #endif /* NECP */
2239
2240 /* bind socket to the specified interface, if requested */
2241 if (ifscope != IFSCOPE_NONE &&
2242 (error = inp_bindif(inp, ifscope, NULL)) != 0) {
2243 goto done;
2244 }
2245
2246 /* if source address and/or port is specified, bind to it */
2247 if (src != NULL) {
2248 error = sobindlock(so, src, 0); /* already locked */
2249 if (error != 0) {
2250 goto done;
2251 }
2252 }
2253
2254 switch (af) {
2255 case AF_INET:
2256 error = udp_connect(so, dst, p);
2257 break;
2258 #if INET6
2259 case AF_INET6:
2260 error = udp6_connect(so, dst, p);
2261 break;
2262 #endif /* INET6 */
2263 default:
2264 VERIFY(0);
2265 /* NOTREACHED */
2266 }
2267
2268 if (error != 0) {
2269 goto done;
2270 }
2271
2272 /*
2273 * If there is data, copy it. DATA_IDEMPOTENT is ignored.
2274 * CONNECT_RESUME_ON_READ_WRITE is ignored.
2275 */
2276 if (uio != NULL) {
2277 socket_unlock(so, 0);
2278
2279 VERIFY(bytes_written != NULL);
2280
2281 datalen = uio_resid(uio);
2282 error = so->so_proto->pr_usrreqs->pru_sosend(so, NULL,
2283 (uio_t)uio, NULL, NULL, 0);
2284 socket_lock(so, 0);
2285
2286 /* If error returned is EMSGSIZE, for example, disconnect */
2287 if (error == 0 || error == EWOULDBLOCK) {
2288 *bytes_written = datalen - uio_resid(uio);
2289 } else {
2290 (void) so->so_proto->pr_usrreqs->pru_disconnectx(so,
2291 SAE_ASSOCID_ANY, SAE_CONNID_ANY);
2292 }
2293 /*
2294 * mask the EWOULDBLOCK error so that the caller
2295 * knows that atleast the connect was successful.
2296 */
2297 if (error == EWOULDBLOCK) {
2298 error = 0;
2299 }
2300 }
2301
2302 if (error == 0 && pcid != NULL) {
2303 *pcid = 1; /* there is only 1 connection for UDP */
2304 }
2305 done:
2306 inp->inp_flags2 &= ~INP2_CONNECT_IN_PROGRESS;
2307 return error;
2308 }
2309
2310 int
2311 udp_connectx(struct socket *so, struct sockaddr *src,
2312 struct sockaddr *dst, struct proc *p, uint32_t ifscope,
2313 sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
2314 uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written)
2315 {
2316 return udp_connectx_common(so, AF_INET, src, dst,
2317 p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written);
2318 }
2319
2320 int
2321 udp_detach(struct socket *so)
2322 {
2323 struct inpcb *inp;
2324
2325 inp = sotoinpcb(so);
2326 if (inp == NULL) {
2327 panic("%s: so=%p null inp\n", __func__, so);
2328 /* NOTREACHED */
2329 }
2330
2331 /*
2332 * If this is a socket that does not want to wakeup the device
2333 * for it's traffic, the application might be waiting for
2334 * close to complete before going to sleep. Send a notification
2335 * for this kind of sockets
2336 */
2337 if (so->so_options & SO_NOWAKEFROMSLEEP) {
2338 socket_post_kev_msg_closed(so);
2339 }
2340
2341 in_pcbdetach(inp);
2342 inp->inp_state = INPCB_STATE_DEAD;
2343 return 0;
2344 }
2345
2346 int
2347 udp_disconnect(struct socket *so)
2348 {
2349 struct inpcb *inp;
2350
2351 inp = sotoinpcb(so);
2352 if (inp == NULL
2353 #if NECP
2354 || (necp_socket_should_use_flow_divert(inp))
2355 #endif /* NECP */
2356 ) {
2357 return inp == NULL ? EINVAL : EPROTOTYPE;
2358 }
2359 if (inp->inp_faddr.s_addr == INADDR_ANY) {
2360 return ENOTCONN;
2361 }
2362
2363 in_pcbdisconnect(inp);
2364
2365 /* reset flow controlled state, just in case */
2366 inp_reset_fc_state(inp);
2367
2368 inp->inp_laddr.s_addr = INADDR_ANY;
2369 so->so_state &= ~SS_ISCONNECTED; /* XXX */
2370 inp->inp_last_outifp = NULL;
2371
2372 return 0;
2373 }
2374
2375 int
2376 udp_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid)
2377 {
2378 #pragma unused(cid)
2379 if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) {
2380 return EINVAL;
2381 }
2382
2383 return udp_disconnect(so);
2384 }
2385
2386 int
2387 udp_send(struct socket *so, int flags, struct mbuf *m,
2388 struct sockaddr *addr, struct mbuf *control, struct proc *p)
2389 {
2390 #ifndef FLOW_DIVERT
2391 #pragma unused(flags)
2392 #endif /* !(FLOW_DIVERT) */
2393 struct inpcb *inp;
2394
2395 inp = sotoinpcb(so);
2396 if (inp == NULL) {
2397 if (m != NULL) {
2398 m_freem(m);
2399 }
2400 if (control != NULL) {
2401 m_freem(control);
2402 }
2403 return EINVAL;
2404 }
2405
2406 #if NECP
2407 #if FLOW_DIVERT
2408 if (necp_socket_should_use_flow_divert(inp)) {
2409 /* Implicit connect */
2410 return flow_divert_implicit_data_out(so, flags, m, addr,
2411 control, p);
2412 }
2413 #endif /* FLOW_DIVERT */
2414 #endif /* NECP */
2415
2416 return udp_output(inp, m, addr, control, p);
2417 }
2418
2419 int
2420 udp_shutdown(struct socket *so)
2421 {
2422 struct inpcb *inp;
2423
2424 inp = sotoinpcb(so);
2425 if (inp == NULL) {
2426 return EINVAL;
2427 }
2428 socantsendmore(so);
2429 return 0;
2430 }
2431
2432 int
2433 udp_lock(struct socket *so, int refcount, void *debug)
2434 {
2435 void *lr_saved;
2436
2437 if (debug == NULL) {
2438 lr_saved = __builtin_return_address(0);
2439 } else {
2440 lr_saved = debug;
2441 }
2442
2443 if (so->so_pcb != NULL) {
2444 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
2445 LCK_MTX_ASSERT_NOTOWNED);
2446 lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
2447 } else {
2448 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2449 so, lr_saved, solockhistory_nr(so));
2450 /* NOTREACHED */
2451 }
2452 if (refcount) {
2453 so->so_usecount++;
2454 }
2455
2456 so->lock_lr[so->next_lock_lr] = lr_saved;
2457 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2458 return 0;
2459 }
2460
2461 int
2462 udp_unlock(struct socket *so, int refcount, void *debug)
2463 {
2464 void *lr_saved;
2465
2466 if (debug == NULL) {
2467 lr_saved = __builtin_return_address(0);
2468 } else {
2469 lr_saved = debug;
2470 }
2471
2472 if (refcount) {
2473 VERIFY(so->so_usecount > 0);
2474 so->so_usecount--;
2475 }
2476 if (so->so_pcb == NULL) {
2477 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2478 so, lr_saved, solockhistory_nr(so));
2479 /* NOTREACHED */
2480 } else {
2481 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
2482 LCK_MTX_ASSERT_OWNED);
2483 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2484 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2485 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
2486 }
2487 return 0;
2488 }
2489
2490 lck_mtx_t *
2491 udp_getlock(struct socket *so, int flags)
2492 {
2493 #pragma unused(flags)
2494 struct inpcb *inp = sotoinpcb(so);
2495
2496 if (so->so_pcb == NULL) {
2497 panic("%s: so=%p NULL so_pcb lrh= %s\n", __func__,
2498 so, solockhistory_nr(so));
2499 /* NOTREACHED */
2500 }
2501 return &inp->inpcb_mtx;
2502 }
2503
2504 /*
2505 * UDP garbage collector callback (inpcb_timer_func_t).
2506 *
2507 * Returns > 0 to keep timer active.
2508 */
2509 static void
2510 udp_gc(struct inpcbinfo *ipi)
2511 {
2512 struct inpcb *inp, *inpnxt;
2513 struct socket *so;
2514
2515 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
2516 if (udp_gc_done == TRUE) {
2517 udp_gc_done = FALSE;
2518 /* couldn't get the lock, must lock next time */
2519 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2520 return;
2521 }
2522 lck_rw_lock_exclusive(ipi->ipi_lock);
2523 }
2524
2525 udp_gc_done = TRUE;
2526
2527 for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
2528 inpnxt = inp->inp_list.le_next;
2529
2530 /*
2531 * Skip unless it's STOPUSING; garbage collector will
2532 * be triggered by in_pcb_checkstate() upon setting
2533 * wantcnt to that value. If the PCB is already dead,
2534 * keep gc active to anticipate wantcnt changing.
2535 */
2536 if (inp->inp_wantcnt != WNT_STOPUSING) {
2537 continue;
2538 }
2539
2540 /*
2541 * Skip if busy, no hurry for cleanup. Keep gc active
2542 * and try the lock again during next round.
2543 */
2544 if (!socket_try_lock(inp->inp_socket)) {
2545 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2546 continue;
2547 }
2548
2549 /*
2550 * Keep gc active unless usecount is 0.
2551 */
2552 so = inp->inp_socket;
2553 if (so->so_usecount == 0) {
2554 if (inp->inp_state != INPCB_STATE_DEAD) {
2555 #if INET6
2556 if (SOCK_CHECK_DOM(so, PF_INET6)) {
2557 in6_pcbdetach(inp);
2558 } else
2559 #endif /* INET6 */
2560 in_pcbdetach(inp);
2561 }
2562 in_pcbdispose(inp);
2563 } else {
2564 socket_unlock(so, 0);
2565 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2566 }
2567 }
2568 lck_rw_done(ipi->ipi_lock);
2569 }
2570
2571 static int
2572 udp_getstat SYSCTL_HANDLER_ARGS
2573 {
2574 #pragma unused(oidp, arg1, arg2)
2575 if (req->oldptr == USER_ADDR_NULL) {
2576 req->oldlen = (size_t)sizeof(struct udpstat);
2577 }
2578
2579 return SYSCTL_OUT(req, &udpstat, MIN(sizeof(udpstat), req->oldlen));
2580 }
2581
2582 void
2583 udp_in_cksum_stats(u_int32_t len)
2584 {
2585 udpstat.udps_rcv_swcsum++;
2586 udpstat.udps_rcv_swcsum_bytes += len;
2587 }
2588
2589 void
2590 udp_out_cksum_stats(u_int32_t len)
2591 {
2592 udpstat.udps_snd_swcsum++;
2593 udpstat.udps_snd_swcsum_bytes += len;
2594 }
2595
2596 #if INET6
2597 void
2598 udp_in6_cksum_stats(u_int32_t len)
2599 {
2600 udpstat.udps_rcv6_swcsum++;
2601 udpstat.udps_rcv6_swcsum_bytes += len;
2602 }
2603
2604 void
2605 udp_out6_cksum_stats(u_int32_t len)
2606 {
2607 udpstat.udps_snd6_swcsum++;
2608 udpstat.udps_snd6_swcsum_bytes += len;
2609 }
2610 #endif /* INET6 */
2611
2612 /*
2613 * Checksum extended UDP header and data.
2614 */
2615 static int
2616 udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen)
2617 {
2618 struct ifnet *ifp = m->m_pkthdr.rcvif;
2619 struct ip *ip = mtod(m, struct ip *);
2620 struct ipovly *ipov = (struct ipovly *)ip;
2621
2622 if (uh->uh_sum == 0) {
2623 udpstat.udps_nosum++;
2624 return 0;
2625 }
2626
2627 /* ip_stripoptions() must have been called before we get here */
2628 ASSERT((ip->ip_hl << 2) == sizeof(*ip));
2629
2630 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
2631 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
2632 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
2633 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
2634 uh->uh_sum = m->m_pkthdr.csum_rx_val;
2635 } else {
2636 uint32_t sum = m->m_pkthdr.csum_rx_val;
2637 uint32_t start = m->m_pkthdr.csum_rx_start;
2638 int32_t trailer = (m_pktlen(m) - (off + ulen));
2639
2640 /*
2641 * Perform 1's complement adjustment of octets
2642 * that got included/excluded in the hardware-
2643 * calculated checksum value. Ignore cases
2644 * where the value already includes the entire
2645 * IP header span, as the sum for those octets
2646 * would already be 0 by the time we get here;
2647 * IP has already performed its header checksum
2648 * checks. If we do need to adjust, restore
2649 * the original fields in the IP header when
2650 * computing the adjustment value. Also take
2651 * care of any trailing bytes and subtract out
2652 * their partial sum.
2653 */
2654 ASSERT(trailer >= 0);
2655 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
2656 ((start != 0 && start != off) || trailer != 0)) {
2657 uint32_t swbytes = (uint32_t)trailer;
2658
2659 if (start < off) {
2660 ip->ip_len += sizeof(*ip);
2661 #if BYTE_ORDER != BIG_ENDIAN
2662 HTONS(ip->ip_len);
2663 HTONS(ip->ip_off);
2664 #endif /* BYTE_ORDER != BIG_ENDIAN */
2665 }
2666 /* callee folds in sum */
2667 sum = m_adj_sum16(m, start, off, ulen, sum);
2668 if (off > start) {
2669 swbytes += (off - start);
2670 } else {
2671 swbytes += (start - off);
2672 }
2673
2674 if (start < off) {
2675 #if BYTE_ORDER != BIG_ENDIAN
2676 NTOHS(ip->ip_off);
2677 NTOHS(ip->ip_len);
2678 #endif /* BYTE_ORDER != BIG_ENDIAN */
2679 ip->ip_len -= sizeof(*ip);
2680 }
2681
2682 if (swbytes != 0) {
2683 udp_in_cksum_stats(swbytes);
2684 }
2685 if (trailer != 0) {
2686 m_adj(m, -trailer);
2687 }
2688 }
2689
2690 /* callee folds in sum */
2691 uh->uh_sum = in_pseudo(ip->ip_src.s_addr,
2692 ip->ip_dst.s_addr, sum + htonl(ulen + IPPROTO_UDP));
2693 }
2694 uh->uh_sum ^= 0xffff;
2695 } else {
2696 uint16_t ip_sum;
2697 char b[9];
2698
2699 bcopy(ipov->ih_x1, b, sizeof(ipov->ih_x1));
2700 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
2701 ip_sum = ipov->ih_len;
2702 ipov->ih_len = uh->uh_ulen;
2703 uh->uh_sum = in_cksum(m, ulen + sizeof(struct ip));
2704 bcopy(b, ipov->ih_x1, sizeof(ipov->ih_x1));
2705 ipov->ih_len = ip_sum;
2706
2707 udp_in_cksum_stats(ulen);
2708 }
2709
2710 if (uh->uh_sum != 0) {
2711 udpstat.udps_badsum++;
2712 IF_UDP_STATINC(ifp, badchksum);
2713 return -1;
2714 }
2715
2716 return 0;
2717 }
2718
2719 void
2720 udp_fill_keepalive_offload_frames(ifnet_t ifp,
2721 struct ifnet_keepalive_offload_frame *frames_array,
2722 u_int32_t frames_array_count, size_t frame_data_offset,
2723 u_int32_t *used_frames_count)
2724 {
2725 struct inpcb *inp;
2726 inp_gen_t gencnt;
2727 u_int32_t frame_index = *used_frames_count;
2728
2729 if (ifp == NULL || frames_array == NULL ||
2730 frames_array_count == 0 ||
2731 frame_index >= frames_array_count ||
2732 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2733 return;
2734 }
2735
2736 lck_rw_lock_shared(udbinfo.ipi_lock);
2737 gencnt = udbinfo.ipi_gencnt;
2738 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
2739 struct socket *so;
2740 u_int8_t *data;
2741 struct ifnet_keepalive_offload_frame *frame;
2742 struct mbuf *m = NULL;
2743
2744 if (frame_index >= frames_array_count) {
2745 break;
2746 }
2747
2748 if (inp->inp_gencnt > gencnt ||
2749 inp->inp_state == INPCB_STATE_DEAD) {
2750 continue;
2751 }
2752
2753 if ((so = inp->inp_socket) == NULL ||
2754 (so->so_state & SS_DEFUNCT)) {
2755 continue;
2756 }
2757 /*
2758 * check for keepalive offload flag without socket
2759 * lock to avoid a deadlock
2760 */
2761 if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
2762 continue;
2763 }
2764
2765 udp_lock(so, 1, 0);
2766 if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
2767 udp_unlock(so, 1, 0);
2768 continue;
2769 }
2770 if ((inp->inp_vflag & INP_IPV4) &&
2771 (inp->inp_laddr.s_addr == INADDR_ANY ||
2772 inp->inp_faddr.s_addr == INADDR_ANY)) {
2773 udp_unlock(so, 1, 0);
2774 continue;
2775 }
2776 if ((inp->inp_vflag & INP_IPV6) &&
2777 (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
2778 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
2779 udp_unlock(so, 1, 0);
2780 continue;
2781 }
2782 if (inp->inp_lport == 0 || inp->inp_fport == 0) {
2783 udp_unlock(so, 1, 0);
2784 continue;
2785 }
2786 if (inp->inp_last_outifp == NULL ||
2787 inp->inp_last_outifp->if_index != ifp->if_index) {
2788 udp_unlock(so, 1, 0);
2789 continue;
2790 }
2791 if ((inp->inp_vflag & INP_IPV4)) {
2792 if ((frame_data_offset + sizeof(struct udpiphdr) +
2793 inp->inp_keepalive_datalen) >
2794 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2795 udp_unlock(so, 1, 0);
2796 continue;
2797 }
2798 if ((sizeof(struct udpiphdr) +
2799 inp->inp_keepalive_datalen) > _MHLEN) {
2800 udp_unlock(so, 1, 0);
2801 continue;
2802 }
2803 } else {
2804 if ((frame_data_offset + sizeof(struct ip6_hdr) +
2805 sizeof(struct udphdr) +
2806 inp->inp_keepalive_datalen) >
2807 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2808 udp_unlock(so, 1, 0);
2809 continue;
2810 }
2811 if ((sizeof(struct ip6_hdr) + sizeof(struct udphdr) +
2812 inp->inp_keepalive_datalen) > _MHLEN) {
2813 udp_unlock(so, 1, 0);
2814 continue;
2815 }
2816 }
2817 MGETHDR(m, M_WAIT, MT_HEADER);
2818 if (m == NULL) {
2819 udp_unlock(so, 1, 0);
2820 continue;
2821 }
2822 /*
2823 * This inp has all the information that is needed to
2824 * generate an offload frame.
2825 */
2826 if (inp->inp_vflag & INP_IPV4) {
2827 struct ip *ip;
2828 struct udphdr *udp;
2829
2830 frame = &frames_array[frame_index];
2831 frame->length = frame_data_offset +
2832 sizeof(struct udpiphdr) +
2833 inp->inp_keepalive_datalen;
2834 frame->ether_type =
2835 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
2836 frame->interval = inp->inp_keepalive_interval;
2837 switch (inp->inp_keepalive_type) {
2838 case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY:
2839 frame->type =
2840 IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY;
2841 break;
2842 default:
2843 break;
2844 }
2845 data = mtod(m, u_int8_t *);
2846 bzero(data, sizeof(struct udpiphdr));
2847 ip = (__typeof__(ip))(void *)data;
2848 udp = (__typeof__(udp))(void *) (data +
2849 sizeof(struct ip));
2850 m->m_len = sizeof(struct udpiphdr);
2851 data = data + sizeof(struct udpiphdr);
2852 if (inp->inp_keepalive_datalen > 0 &&
2853 inp->inp_keepalive_data != NULL) {
2854 bcopy(inp->inp_keepalive_data, data,
2855 inp->inp_keepalive_datalen);
2856 m->m_len += inp->inp_keepalive_datalen;
2857 }
2858 m->m_pkthdr.len = m->m_len;
2859
2860 ip->ip_v = IPVERSION;
2861 ip->ip_hl = (sizeof(struct ip) >> 2);
2862 ip->ip_p = IPPROTO_UDP;
2863 ip->ip_len = htons(sizeof(struct udpiphdr) +
2864 (u_short)inp->inp_keepalive_datalen);
2865 ip->ip_ttl = inp->inp_ip_ttl;
2866 ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
2867 ip->ip_src = inp->inp_laddr;
2868 ip->ip_dst = inp->inp_faddr;
2869 ip->ip_sum = in_cksum_hdr_opt(ip);
2870
2871 udp->uh_sport = inp->inp_lport;
2872 udp->uh_dport = inp->inp_fport;
2873 udp->uh_ulen = htons(sizeof(struct udphdr) +
2874 (u_short)inp->inp_keepalive_datalen);
2875
2876 if (!(inp->inp_flags & INP_UDP_NOCKSUM)) {
2877 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
2878 ip->ip_dst.s_addr,
2879 htons(sizeof(struct udphdr) +
2880 (u_short)inp->inp_keepalive_datalen +
2881 IPPROTO_UDP));
2882 m->m_pkthdr.csum_flags =
2883 (CSUM_UDP | CSUM_ZERO_INVERT);
2884 m->m_pkthdr.csum_data = offsetof(struct udphdr,
2885 uh_sum);
2886 }
2887 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
2888 in_delayed_cksum(m);
2889 bcopy(m->m_data, frame->data + frame_data_offset,
2890 m->m_len);
2891 } else {
2892 struct ip6_hdr *ip6;
2893 struct udphdr *udp6;
2894
2895 VERIFY(inp->inp_vflag & INP_IPV6);
2896 frame = &frames_array[frame_index];
2897 frame->length = frame_data_offset +
2898 sizeof(struct ip6_hdr) +
2899 sizeof(struct udphdr) +
2900 inp->inp_keepalive_datalen;
2901 frame->ether_type =
2902 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
2903 frame->interval = inp->inp_keepalive_interval;
2904 switch (inp->inp_keepalive_type) {
2905 case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY:
2906 frame->type =
2907 IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY;
2908 break;
2909 default:
2910 break;
2911 }
2912 data = mtod(m, u_int8_t *);
2913 bzero(data, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
2914 ip6 = (__typeof__(ip6))(void *)data;
2915 udp6 = (__typeof__(udp6))(void *)(data +
2916 sizeof(struct ip6_hdr));
2917 m->m_len = sizeof(struct ip6_hdr) +
2918 sizeof(struct udphdr);
2919 data = data + (sizeof(struct ip6_hdr) +
2920 sizeof(struct udphdr));
2921 if (inp->inp_keepalive_datalen > 0 &&
2922 inp->inp_keepalive_data != NULL) {
2923 bcopy(inp->inp_keepalive_data, data,
2924 inp->inp_keepalive_datalen);
2925 m->m_len += inp->inp_keepalive_datalen;
2926 }
2927 m->m_pkthdr.len = m->m_len;
2928 ip6->ip6_flow = inp->inp_flow & IPV6_FLOWINFO_MASK;
2929 ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
2930 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2931 ip6->ip6_vfc |= IPV6_VERSION;
2932 ip6->ip6_nxt = IPPROTO_UDP;
2933 ip6->ip6_hlim = ip6_defhlim;
2934 ip6->ip6_plen = htons(sizeof(struct udphdr) +
2935 (u_short)inp->inp_keepalive_datalen);
2936 ip6->ip6_src = inp->in6p_laddr;
2937 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
2938 ip6->ip6_src.s6_addr16[1] = 0;
2939 }
2940
2941 ip6->ip6_dst = inp->in6p_faddr;
2942 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
2943 ip6->ip6_dst.s6_addr16[1] = 0;
2944 }
2945
2946 udp6->uh_sport = inp->in6p_lport;
2947 udp6->uh_dport = inp->in6p_fport;
2948 udp6->uh_ulen = htons(sizeof(struct udphdr) +
2949 (u_short)inp->inp_keepalive_datalen);
2950 if (!(inp->inp_flags & INP_UDP_NOCKSUM)) {
2951 udp6->uh_sum = in6_pseudo(&ip6->ip6_src,
2952 &ip6->ip6_dst,
2953 htonl(sizeof(struct udphdr) +
2954 (u_short)inp->inp_keepalive_datalen +
2955 IPPROTO_UDP));
2956 m->m_pkthdr.csum_flags =
2957 (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
2958 m->m_pkthdr.csum_data = offsetof(struct udphdr,
2959 uh_sum);
2960 }
2961 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
2962 in6_delayed_cksum(m);
2963 bcopy(m->m_data, frame->data + frame_data_offset,
2964 m->m_len);
2965 }
2966 if (m != NULL) {
2967 m_freem(m);
2968 m = NULL;
2969 }
2970 frame_index++;
2971 udp_unlock(so, 1, 0);
2972 }
2973 lck_rw_done(udbinfo.ipi_lock);
2974 *used_frames_count = frame_index;
2975 }