]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/udp_usrreq.c
247e0180270b987f2756080ee088425a4fe09240
[apple/xnu.git] / bsd / netinet / udp_usrreq.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/domain.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/sysctl.h>
73 #include <sys/syslog.h>
74 #include <sys/mcache.h>
75 #include <net/ntstat.h>
76
77 #include <kern/zalloc.h>
78 #include <mach/boolean.h>
79
80 #include <net/if.h>
81 #include <net/if_types.h>
82 #include <net/route.h>
83 #include <net/dlil.h>
84 #include <net/net_api_stats.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/in_tclass.h>
89 #include <netinet/ip.h>
90 #if INET6
91 #include <netinet/ip6.h>
92 #endif /* INET6 */
93 #include <netinet/in_pcb.h>
94 #include <netinet/in_var.h>
95 #include <netinet/ip_var.h>
96 #if INET6
97 #include <netinet6/in6_pcb.h>
98 #include <netinet6/ip6_var.h>
99 #include <netinet6/udp6_var.h>
100 #endif /* INET6 */
101 #include <netinet/ip_icmp.h>
102 #include <netinet/icmp_var.h>
103 #include <netinet/udp.h>
104 #include <netinet/udp_var.h>
105 #include <sys/kdebug.h>
106
107 #if IPSEC
108 #include <netinet6/ipsec.h>
109 #include <netinet6/esp.h>
110 extern int ipsec_bypass;
111 extern int esp_udp_encap_port;
112 #endif /* IPSEC */
113
114 #if NECP
115 #include <net/necp.h>
116 #endif /* NECP */
117
118 #if FLOW_DIVERT
119 #include <netinet/flow_divert.h>
120 #endif /* FLOW_DIVERT */
121
122 #if CONTENT_FILTER
123 #include <net/content_filter.h>
124 #endif /* CONTENT_FILTER */
125
126 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0)
127 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2)
128 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1)
129 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3)
130 #define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8))
131 #define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1)
132
133 /*
134 * UDP protocol implementation.
135 * Per RFC 768, August, 1980.
136 */
137 #ifndef COMPAT_42
138 static int udpcksum = 1;
139 #else
140 static int udpcksum = 0; /* XXX */
141 #endif
142 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum,
143 CTLFLAG_RW | CTLFLAG_LOCKED, &udpcksum, 0, "");
144
145 int udp_log_in_vain = 0;
146 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED,
147 &udp_log_in_vain, 0, "Log all incoming UDP packets");
148
149 static int blackhole = 0;
150 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED,
151 &blackhole, 0, "Do not send port unreachables for refused connects");
152
153 struct inpcbhead udb; /* from udp_var.h */
154 #define udb6 udb /* for KAME src sync over BSD*'s */
155 struct inpcbinfo udbinfo;
156
157 #ifndef UDBHASHSIZE
158 #define UDBHASHSIZE 16
159 #endif
160
161 /* Garbage collection performed during most recent udp_gc() run */
162 static boolean_t udp_gc_done = FALSE;
163
164 #if IPFIREWALL
165 extern int fw_verbose;
166 extern void ipfwsyslog(int level, const char *format, ...);
167 extern void ipfw_stealth_stats_incr_udp(void);
168
169 /* Apple logging, log to ipfw.log */
170 #define log_in_vain_log(a) { \
171 if ((udp_log_in_vain == 3) && (fw_verbose == 2)) { \
172 ipfwsyslog a; \
173 } else if ((udp_log_in_vain == 4) && (fw_verbose == 2)) { \
174 ipfw_stealth_stats_incr_udp(); \
175 } else { \
176 log a; \
177 } \
178 }
179 #else /* !IPFIREWALL */
180 #define log_in_vain_log(a) { log a; }
181 #endif /* !IPFIREWALL */
182
183 static int udp_getstat SYSCTL_HANDLER_ARGS;
184 struct udpstat udpstat; /* from udp_var.h */
185 SYSCTL_PROC(_net_inet_udp, UDPCTL_STATS, stats,
186 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
187 0, 0, udp_getstat, "S,udpstat",
188 "UDP statistics (struct udpstat, netinet/udp_var.h)");
189
190 SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount,
191 CTLFLAG_RD | CTLFLAG_LOCKED, &udbinfo.ipi_count, 0,
192 "Number of active PCBs");
193
194 __private_extern__ int udp_use_randomport = 1;
195 SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports,
196 CTLFLAG_RW | CTLFLAG_LOCKED, &udp_use_randomport, 0,
197 "Randomize UDP port numbers");
198
199 #if INET6
200 struct udp_in6 {
201 struct sockaddr_in6 uin6_sin;
202 u_char uin6_init_done : 1;
203 };
204 struct udp_ip6 {
205 struct ip6_hdr uip6_ip6;
206 u_char uip6_init_done : 1;
207 };
208
209 int udp_abort(struct socket *);
210 int udp_attach(struct socket *, int, struct proc *);
211 int udp_bind(struct socket *, struct sockaddr *, struct proc *);
212 int udp_connect(struct socket *, struct sockaddr *, struct proc *);
213 int udp_connectx(struct socket *, struct sockaddr *,
214 struct sockaddr *, struct proc *, uint32_t, sae_associd_t,
215 sae_connid_t *, uint32_t, void *, uint32_t, struct uio *, user_ssize_t *);
216 int udp_detach(struct socket *);
217 int udp_disconnect(struct socket *);
218 int udp_disconnectx(struct socket *, sae_associd_t, sae_connid_t);
219 int udp_send(struct socket *, int, struct mbuf *, struct sockaddr *,
220 struct mbuf *, struct proc *);
221 static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int,
222 struct sockaddr_in *, struct udp_in6 *, struct udp_ip6 *, struct ifnet *);
223 #else /* !INET6 */
224 static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int,
225 struct sockaddr_in *, struct ifnet *);
226 #endif /* !INET6 */
227 static int udp_input_checksum(struct mbuf *, struct udphdr *, int, int);
228 int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
229 struct mbuf *, struct proc *);
230 static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
231 static void udp_gc(struct inpcbinfo *);
232
233 struct pr_usrreqs udp_usrreqs = {
234 .pru_abort = udp_abort,
235 .pru_attach = udp_attach,
236 .pru_bind = udp_bind,
237 .pru_connect = udp_connect,
238 .pru_connectx = udp_connectx,
239 .pru_control = in_control,
240 .pru_detach = udp_detach,
241 .pru_disconnect = udp_disconnect,
242 .pru_disconnectx = udp_disconnectx,
243 .pru_peeraddr = in_getpeeraddr,
244 .pru_send = udp_send,
245 .pru_shutdown = udp_shutdown,
246 .pru_sockaddr = in_getsockaddr,
247 .pru_sosend = sosend,
248 .pru_soreceive = soreceive,
249 .pru_soreceive_list = soreceive_list,
250 };
251
252 void
253 udp_init(struct protosw *pp, struct domain *dp)
254 {
255 #pragma unused(dp)
256 static int udp_initialized = 0;
257 vm_size_t str_size;
258 struct inpcbinfo *pcbinfo;
259
260 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
261
262 if (udp_initialized) {
263 return;
264 }
265 udp_initialized = 1;
266 uint32_t pool_size = (nmbclusters << MCLSHIFT) >> MBSHIFT;
267 if (pool_size >= 96) {
268 /* Improves 10GbE UDP performance. */
269 udp_recvspace = 786896;
270 }
271 LIST_INIT(&udb);
272 udbinfo.ipi_listhead = &udb;
273 udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB,
274 &udbinfo.ipi_hashmask);
275 udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB,
276 &udbinfo.ipi_porthashmask);
277 str_size = (vm_size_t) sizeof(struct inpcb);
278 udbinfo.ipi_zone = zinit(str_size, 80000 * str_size, 8192, "udpcb");
279
280 pcbinfo = &udbinfo;
281 /*
282 * allocate lock group attribute and group for udp pcb mutexes
283 */
284 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
285 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("udppcb",
286 pcbinfo->ipi_lock_grp_attr);
287 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
288 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
289 pcbinfo->ipi_lock_attr)) == NULL) {
290 panic("%s: unable to allocate PCB lock\n", __func__);
291 /* NOTREACHED */
292 }
293
294 udbinfo.ipi_gc = udp_gc;
295 in_pcbinfo_attach(&udbinfo);
296 }
297
298 void
299 udp_input(struct mbuf *m, int iphlen)
300 {
301 struct ip *ip;
302 struct udphdr *uh;
303 struct inpcb *inp;
304 struct mbuf *opts = NULL;
305 int len, isbroadcast;
306 struct ip save_ip;
307 struct sockaddr *append_sa;
308 struct inpcbinfo *pcbinfo = &udbinfo;
309 struct sockaddr_in udp_in;
310 struct ip_moptions *imo = NULL;
311 int foundmembership = 0, ret = 0;
312 #if INET6
313 struct udp_in6 udp_in6;
314 struct udp_ip6 udp_ip6;
315 #endif /* INET6 */
316 struct ifnet *ifp = m->m_pkthdr.rcvif;
317 boolean_t cell = IFNET_IS_CELLULAR(ifp);
318 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
319 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
320
321 bzero(&udp_in, sizeof(udp_in));
322 udp_in.sin_len = sizeof(struct sockaddr_in);
323 udp_in.sin_family = AF_INET;
324 #if INET6
325 bzero(&udp_in6, sizeof(udp_in6));
326 udp_in6.uin6_sin.sin6_len = sizeof(struct sockaddr_in6);
327 udp_in6.uin6_sin.sin6_family = AF_INET6;
328 #endif /* INET6 */
329
330 udpstat.udps_ipackets++;
331
332 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
333
334 /* Expect 32-bit aligned data pointer on strict-align platforms */
335 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
336
337 /*
338 * Strip IP options, if any; should skip this,
339 * make available to user, and use on returned packets,
340 * but we don't yet have a way to check the checksum
341 * with options still present.
342 */
343 if (iphlen > sizeof(struct ip)) {
344 ip_stripoptions(m);
345 iphlen = sizeof(struct ip);
346 }
347
348 /*
349 * Get IP and UDP header together in first mbuf.
350 */
351 ip = mtod(m, struct ip *);
352 if (m->m_len < iphlen + sizeof(struct udphdr)) {
353 m = m_pullup(m, iphlen + sizeof(struct udphdr));
354 if (m == NULL) {
355 udpstat.udps_hdrops++;
356 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
357 0, 0, 0, 0, 0);
358 return;
359 }
360 ip = mtod(m, struct ip *);
361 }
362 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
363
364 /* destination port of 0 is illegal, based on RFC768. */
365 if (uh->uh_dport == 0) {
366 IF_UDP_STATINC(ifp, port0);
367 goto bad;
368 }
369
370 KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
371 ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
372
373 /*
374 * Make mbuf data length reflect UDP length.
375 * If not enough data to reflect UDP length, drop.
376 */
377 len = ntohs((u_short)uh->uh_ulen);
378 if (ip->ip_len != len) {
379 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
380 udpstat.udps_badlen++;
381 IF_UDP_STATINC(ifp, badlength);
382 goto bad;
383 }
384 m_adj(m, len - ip->ip_len);
385 /* ip->ip_len = len; */
386 }
387 /*
388 * Save a copy of the IP header in case we want restore it
389 * for sending an ICMP error message in response.
390 */
391 save_ip = *ip;
392
393 /*
394 * Checksum extended UDP header and data.
395 */
396 if (udp_input_checksum(m, uh, iphlen, len)) {
397 goto bad;
398 }
399
400 isbroadcast = in_broadcast(ip->ip_dst, ifp);
401
402 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || isbroadcast) {
403 int reuse_sock = 0, mcast_delivered = 0;
404
405 lck_rw_lock_shared(pcbinfo->ipi_lock);
406 /*
407 * Deliver a multicast or broadcast datagram to *all* sockets
408 * for which the local and remote addresses and ports match
409 * those of the incoming datagram. This allows more than
410 * one process to receive multi/broadcasts on the same port.
411 * (This really ought to be done for unicast datagrams as
412 * well, but that would cause problems with existing
413 * applications that open both address-specific sockets and
414 * a wildcard socket listening to the same port -- they would
415 * end up receiving duplicates of every unicast datagram.
416 * Those applications open the multiple sockets to overcome an
417 * inadequacy of the UDP socket interface, but for backwards
418 * compatibility we avoid the problem here rather than
419 * fixing the interface. Maybe 4.5BSD will remedy this?)
420 */
421
422 /*
423 * Construct sockaddr format source address.
424 */
425 udp_in.sin_port = uh->uh_sport;
426 udp_in.sin_addr = ip->ip_src;
427 /*
428 * Locate pcb(s) for datagram.
429 * (Algorithm copied from raw_intr().)
430 */
431 #if INET6
432 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
433 #endif /* INET6 */
434 LIST_FOREACH(inp, &udb, inp_list) {
435 #if IPSEC
436 int skipit;
437 #endif /* IPSEC */
438
439 if (inp->inp_socket == NULL) {
440 continue;
441 }
442 if (inp != sotoinpcb(inp->inp_socket)) {
443 panic("%s: bad so back ptr inp=%p\n",
444 __func__, inp);
445 /* NOTREACHED */
446 }
447 #if INET6
448 if ((inp->inp_vflag & INP_IPV4) == 0) {
449 continue;
450 }
451 #endif /* INET6 */
452 if (inp_restricted_recv(inp, ifp)) {
453 continue;
454 }
455
456 if ((inp->inp_moptions == NULL) &&
457 (ntohl(ip->ip_dst.s_addr) !=
458 INADDR_ALLHOSTS_GROUP) && (isbroadcast == 0)) {
459 continue;
460 }
461
462 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
463 WNT_STOPUSING) {
464 continue;
465 }
466
467 udp_lock(inp->inp_socket, 1, 0);
468
469 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
470 WNT_STOPUSING) {
471 udp_unlock(inp->inp_socket, 1, 0);
472 continue;
473 }
474
475 if (inp->inp_lport != uh->uh_dport) {
476 udp_unlock(inp->inp_socket, 1, 0);
477 continue;
478 }
479 if (inp->inp_laddr.s_addr != INADDR_ANY) {
480 if (inp->inp_laddr.s_addr !=
481 ip->ip_dst.s_addr) {
482 udp_unlock(inp->inp_socket, 1, 0);
483 continue;
484 }
485 }
486 if (inp->inp_faddr.s_addr != INADDR_ANY) {
487 if (inp->inp_faddr.s_addr !=
488 ip->ip_src.s_addr ||
489 inp->inp_fport != uh->uh_sport) {
490 udp_unlock(inp->inp_socket, 1, 0);
491 continue;
492 }
493 }
494
495 if (isbroadcast == 0 && (ntohl(ip->ip_dst.s_addr) !=
496 INADDR_ALLHOSTS_GROUP)) {
497 struct sockaddr_in group;
498 int blocked;
499
500 if ((imo = inp->inp_moptions) == NULL) {
501 udp_unlock(inp->inp_socket, 1, 0);
502 continue;
503 }
504 IMO_LOCK(imo);
505
506 bzero(&group, sizeof(struct sockaddr_in));
507 group.sin_len = sizeof(struct sockaddr_in);
508 group.sin_family = AF_INET;
509 group.sin_addr = ip->ip_dst;
510
511 blocked = imo_multi_filter(imo, ifp,
512 &group, &udp_in);
513 if (blocked == MCAST_PASS) {
514 foundmembership = 1;
515 }
516
517 IMO_UNLOCK(imo);
518 if (!foundmembership) {
519 udp_unlock(inp->inp_socket, 1, 0);
520 if (blocked == MCAST_NOTSMEMBER ||
521 blocked == MCAST_MUTED) {
522 udpstat.udps_filtermcast++;
523 }
524 continue;
525 }
526 foundmembership = 0;
527 }
528
529 reuse_sock = (inp->inp_socket->so_options &
530 (SO_REUSEPORT | SO_REUSEADDR));
531
532 #if NECP
533 skipit = 0;
534 if (!necp_socket_is_allowed_to_send_recv_v4(inp,
535 uh->uh_dport, uh->uh_sport, &ip->ip_dst,
536 &ip->ip_src, ifp, NULL, NULL, NULL)) {
537 /* do not inject data to pcb */
538 skipit = 1;
539 }
540 if (skipit == 0)
541 #endif /* NECP */
542 {
543 struct mbuf *n = NULL;
544
545 if (reuse_sock) {
546 n = m_copy(m, 0, M_COPYALL);
547 }
548 #if INET6
549 udp_append(inp, ip, m,
550 iphlen + sizeof(struct udphdr),
551 &udp_in, &udp_in6, &udp_ip6, ifp);
552 #else /* !INET6 */
553 udp_append(inp, ip, m,
554 iphlen + sizeof(struct udphdr),
555 &udp_in, ifp);
556 #endif /* !INET6 */
557 mcast_delivered++;
558
559 m = n;
560 }
561 udp_unlock(inp->inp_socket, 1, 0);
562
563 /*
564 * Don't look for additional matches if this one does
565 * not have either the SO_REUSEPORT or SO_REUSEADDR
566 * socket options set. This heuristic avoids searching
567 * through all pcbs in the common case of a non-shared
568 * port. It assumes that an application will never
569 * clear these options after setting them.
570 */
571 if (reuse_sock == 0 || m == NULL) {
572 break;
573 }
574
575 /*
576 * Expect 32-bit aligned data pointer on strict-align
577 * platforms.
578 */
579 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
580 /*
581 * Recompute IP and UDP header pointers for new mbuf
582 */
583 ip = mtod(m, struct ip *);
584 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
585 }
586 lck_rw_done(pcbinfo->ipi_lock);
587
588 if (mcast_delivered == 0) {
589 /*
590 * No matching pcb found; discard datagram.
591 * (No need to send an ICMP Port Unreachable
592 * for a broadcast or multicast datgram.)
593 */
594 udpstat.udps_noportbcast++;
595 IF_UDP_STATINC(ifp, port_unreach);
596 goto bad;
597 }
598
599 /* free the extra copy of mbuf or skipped by IPsec */
600 if (m != NULL) {
601 m_freem(m);
602 }
603 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
604 return;
605 }
606
607 #if IPSEC
608 /*
609 * UDP to port 4500 with a payload where the first four bytes are
610 * not zero is a UDP encapsulated IPsec packet. Packets where
611 * the payload is one byte and that byte is 0xFF are NAT keepalive
612 * packets. Decapsulate the ESP packet and carry on with IPsec input
613 * or discard the NAT keep-alive.
614 */
615 if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 &&
616 (uh->uh_dport == ntohs((u_short)esp_udp_encap_port) ||
617 uh->uh_sport == ntohs((u_short)esp_udp_encap_port))) {
618 int payload_len = len - sizeof(struct udphdr) > 4 ? 4 :
619 len - sizeof(struct udphdr);
620
621 if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) {
622 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) +
623 payload_len)) == NULL) {
624 udpstat.udps_hdrops++;
625 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
626 0, 0, 0, 0, 0);
627 return;
628 }
629 /*
630 * Expect 32-bit aligned data pointer on strict-align
631 * platforms.
632 */
633 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
634
635 ip = mtod(m, struct ip *);
636 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
637 }
638 /* Check for NAT keepalive packet */
639 if (payload_len == 1 && *(u_int8_t *)
640 ((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
641 m_freem(m);
642 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
643 0, 0, 0, 0, 0);
644 return;
645 } else if (payload_len == 4 && *(u_int32_t *)(void *)
646 ((caddr_t)uh + sizeof(struct udphdr)) != 0) {
647 /* UDP encapsulated IPsec packet to pass through NAT */
648 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
649 0, 0, 0, 0, 0);
650 /* preserve the udp header */
651 esp4_input(m, iphlen + sizeof(struct udphdr));
652 return;
653 }
654 }
655 #endif /* IPSEC */
656
657 /*
658 * Locate pcb for datagram.
659 */
660 inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
661 ip->ip_dst, uh->uh_dport, 1, ifp);
662 if (inp == NULL) {
663 IF_UDP_STATINC(ifp, port_unreach);
664
665 if (udp_log_in_vain) {
666 char buf[MAX_IPv4_STR_LEN];
667 char buf2[MAX_IPv4_STR_LEN];
668
669 /* check src and dst address */
670 if (udp_log_in_vain < 3) {
671 log(LOG_INFO, "Connection attempt to "
672 "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET,
673 &ip->ip_dst, buf, sizeof(buf)),
674 ntohs(uh->uh_dport), inet_ntop(AF_INET,
675 &ip->ip_src, buf2, sizeof(buf2)),
676 ntohs(uh->uh_sport));
677 } else if (!(m->m_flags & (M_BCAST | M_MCAST)) &&
678 ip->ip_dst.s_addr != ip->ip_src.s_addr) {
679 log_in_vain_log((LOG_INFO,
680 "Stealth Mode connection attempt to "
681 "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET,
682 &ip->ip_dst, buf, sizeof(buf)),
683 ntohs(uh->uh_dport), inet_ntop(AF_INET,
684 &ip->ip_src, buf2, sizeof(buf2)),
685 ntohs(uh->uh_sport)))
686 }
687 }
688 udpstat.udps_noport++;
689 if (m->m_flags & (M_BCAST | M_MCAST)) {
690 udpstat.udps_noportbcast++;
691 goto bad;
692 }
693 #if ICMP_BANDLIM
694 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) {
695 goto bad;
696 }
697 #endif /* ICMP_BANDLIM */
698 if (blackhole) {
699 if (ifp && ifp->if_type != IFT_LOOP) {
700 goto bad;
701 }
702 }
703 *ip = save_ip;
704 ip->ip_len += iphlen;
705 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
706 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
707 return;
708 }
709 udp_lock(inp->inp_socket, 1, 0);
710
711 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
712 udp_unlock(inp->inp_socket, 1, 0);
713 IF_UDP_STATINC(ifp, cleanup);
714 goto bad;
715 }
716 #if NECP
717 if (!necp_socket_is_allowed_to_send_recv_v4(inp, uh->uh_dport,
718 uh->uh_sport, &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
719 udp_unlock(inp->inp_socket, 1, 0);
720 IF_UDP_STATINC(ifp, badipsec);
721 goto bad;
722 }
723 #endif /* NECP */
724
725 /*
726 * Construct sockaddr format source address.
727 * Stuff source address and datagram in user buffer.
728 */
729 udp_in.sin_port = uh->uh_sport;
730 udp_in.sin_addr = ip->ip_src;
731 if ((inp->inp_flags & INP_CONTROLOPTS) != 0 ||
732 (inp->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
733 (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
734 (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
735 #if INET6
736 if (inp->inp_vflag & INP_IPV6) {
737 int savedflags;
738
739 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
740 savedflags = inp->inp_flags;
741 inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
742 ret = ip6_savecontrol(inp, m, &opts);
743 inp->inp_flags = savedflags;
744 } else
745 #endif /* INET6 */
746 {
747 ret = ip_savecontrol(inp, &opts, ip, m);
748 }
749 if (ret != 0) {
750 udp_unlock(inp->inp_socket, 1, 0);
751 goto bad;
752 }
753 }
754 m_adj(m, iphlen + sizeof(struct udphdr));
755
756 KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport,
757 save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen);
758
759 #if INET6
760 if (inp->inp_vflag & INP_IPV6) {
761 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
762 append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
763 } else
764 #endif /* INET6 */
765 {
766 append_sa = (struct sockaddr *)&udp_in;
767 }
768 if (nstat_collect) {
769 INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1);
770 INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, m->m_pkthdr.len);
771 inp_set_activity_bitmap(inp);
772 }
773 so_recv_data_stat(inp->inp_socket, m, 0);
774 if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa,
775 m, opts, NULL) == 0) {
776 udpstat.udps_fullsock++;
777 } else {
778 sorwakeup(inp->inp_socket);
779 }
780 udp_unlock(inp->inp_socket, 1, 0);
781 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
782 return;
783 bad:
784 m_freem(m);
785 if (opts) {
786 m_freem(opts);
787 }
788 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
789 }
790
791 #if INET6
792 static void
793 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
794 {
795 bzero(ip6, sizeof(*ip6));
796
797 ip6->ip6_vfc = IPV6_VERSION;
798 ip6->ip6_plen = ip->ip_len;
799 ip6->ip6_nxt = ip->ip_p;
800 ip6->ip6_hlim = ip->ip_ttl;
801 if (ip->ip_src.s_addr) {
802 ip6->ip6_src.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
803 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
804 }
805 if (ip->ip_dst.s_addr) {
806 ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
807 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
808 }
809 }
810 #endif /* INET6 */
811
812 /*
813 * subroutine of udp_input(), mainly for source code readability.
814 */
815 static void
816 #if INET6
817 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
818 struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6,
819 struct udp_ip6 *pudp_ip6, struct ifnet *ifp)
820 #else /* !INET6 */
821 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
822 struct sockaddr_in *pudp_in, struct ifnet *ifp)
823 #endif /* !INET6 */
824 {
825 struct sockaddr *append_sa;
826 struct mbuf *opts = 0;
827 boolean_t cell = IFNET_IS_CELLULAR(ifp);
828 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
829 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
830 int ret = 0;
831
832 #if CONFIG_MACF_NET
833 if (mac_inpcb_check_deliver(last, n, AF_INET, SOCK_DGRAM) != 0) {
834 m_freem(n);
835 return;
836 }
837 #endif /* CONFIG_MACF_NET */
838 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
839 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
840 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
841 (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
842 #if INET6
843 if (last->inp_vflag & INP_IPV6) {
844 int savedflags;
845
846 if (pudp_ip6->uip6_init_done == 0) {
847 ip_2_ip6_hdr(&pudp_ip6->uip6_ip6, ip);
848 pudp_ip6->uip6_init_done = 1;
849 }
850 savedflags = last->inp_flags;
851 last->inp_flags &= ~INP_UNMAPPABLEOPTS;
852 ret = ip6_savecontrol(last, n, &opts);
853 if (ret != 0) {
854 last->inp_flags = savedflags;
855 goto error;
856 }
857 last->inp_flags = savedflags;
858 } else
859 #endif /* INET6 */
860 {
861 ret = ip_savecontrol(last, &opts, ip, n);
862 if (ret != 0) {
863 goto error;
864 }
865 }
866 }
867 #if INET6
868 if (last->inp_vflag & INP_IPV6) {
869 if (pudp_in6->uin6_init_done == 0) {
870 in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin);
871 pudp_in6->uin6_init_done = 1;
872 }
873 append_sa = (struct sockaddr *)&pudp_in6->uin6_sin;
874 } else
875 #endif /* INET6 */
876 append_sa = (struct sockaddr *)pudp_in;
877 if (nstat_collect) {
878 INP_ADD_STAT(last, cell, wifi, wired, rxpackets, 1);
879 INP_ADD_STAT(last, cell, wifi, wired, rxbytes,
880 n->m_pkthdr.len);
881 inp_set_activity_bitmap(last);
882 }
883 so_recv_data_stat(last->inp_socket, n, 0);
884 m_adj(n, off);
885 if (sbappendaddr(&last->inp_socket->so_rcv, append_sa,
886 n, opts, NULL) == 0) {
887 udpstat.udps_fullsock++;
888 } else {
889 sorwakeup(last->inp_socket);
890 }
891 return;
892 error:
893 m_freem(n);
894 m_freem(opts);
895 }
896
897 /*
898 * Notify a udp user of an asynchronous error;
899 * just wake up so that he can collect error status.
900 */
901 void
902 udp_notify(struct inpcb *inp, int errno)
903 {
904 inp->inp_socket->so_error = errno;
905 sorwakeup(inp->inp_socket);
906 sowwakeup(inp->inp_socket);
907 }
908
909 void
910 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * ifp)
911 {
912 struct ip *ip = vip;
913 void (*notify)(struct inpcb *, int) = udp_notify;
914 struct in_addr faddr;
915 struct inpcb *inp = NULL;
916
917 faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
918 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
919 return;
920 }
921
922 if (PRC_IS_REDIRECT(cmd)) {
923 ip = 0;
924 notify = in_rtchange;
925 } else if (cmd == PRC_HOSTDEAD) {
926 ip = 0;
927 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
928 return;
929 }
930 if (ip) {
931 struct udphdr uh;
932
933 bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof(uh));
934 inp = in_pcblookup_hash(&udbinfo, faddr, uh.uh_dport,
935 ip->ip_src, uh.uh_sport, 0, NULL);
936 if (inp != NULL && inp->inp_socket != NULL) {
937 udp_lock(inp->inp_socket, 1, 0);
938 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
939 WNT_STOPUSING) {
940 udp_unlock(inp->inp_socket, 1, 0);
941 return;
942 }
943 (*notify)(inp, inetctlerrmap[cmd]);
944 udp_unlock(inp->inp_socket, 1, 0);
945 }
946 } else {
947 in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
948 }
949 }
950
951 int
952 udp_ctloutput(struct socket *so, struct sockopt *sopt)
953 {
954 int error = 0, optval = 0;
955 struct inpcb *inp;
956
957 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
958 if (sopt->sopt_level != IPPROTO_UDP &&
959 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) {
960 return ip_ctloutput(so, sopt);
961 }
962
963 inp = sotoinpcb(so);
964
965 switch (sopt->sopt_dir) {
966 case SOPT_SET:
967 switch (sopt->sopt_name) {
968 case UDP_NOCKSUM:
969 /* This option is settable only for UDP over IPv4 */
970 if (!(inp->inp_vflag & INP_IPV4)) {
971 error = EINVAL;
972 break;
973 }
974
975 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
976 sizeof(optval))) != 0) {
977 break;
978 }
979
980 if (optval != 0) {
981 inp->inp_flags |= INP_UDP_NOCKSUM;
982 } else {
983 inp->inp_flags &= ~INP_UDP_NOCKSUM;
984 }
985 break;
986 case UDP_KEEPALIVE_OFFLOAD:
987 {
988 struct udp_keepalive_offload ka;
989 /*
990 * If the socket is not connected, the stack will
991 * not know the destination address to put in the
992 * keepalive datagram. Return an error now instead
993 * of failing later.
994 */
995 if (!(so->so_state & SS_ISCONNECTED)) {
996 error = EINVAL;
997 break;
998 }
999 if (sopt->sopt_valsize != sizeof(ka)) {
1000 error = EINVAL;
1001 break;
1002 }
1003 if ((error = sooptcopyin(sopt, &ka, sizeof(ka),
1004 sizeof(ka))) != 0) {
1005 break;
1006 }
1007
1008 /* application should specify the type */
1009 if (ka.ka_type == 0) {
1010 return EINVAL;
1011 }
1012
1013 if (ka.ka_interval == 0) {
1014 /*
1015 * if interval is 0, disable the offload
1016 * mechanism
1017 */
1018 if (inp->inp_keepalive_data != NULL) {
1019 FREE(inp->inp_keepalive_data,
1020 M_TEMP);
1021 }
1022 inp->inp_keepalive_data = NULL;
1023 inp->inp_keepalive_datalen = 0;
1024 inp->inp_keepalive_interval = 0;
1025 inp->inp_keepalive_type = 0;
1026 inp->inp_flags2 &= ~INP2_KEEPALIVE_OFFLOAD;
1027 } else {
1028 if (inp->inp_keepalive_data != NULL) {
1029 FREE(inp->inp_keepalive_data,
1030 M_TEMP);
1031 inp->inp_keepalive_data = NULL;
1032 }
1033
1034 inp->inp_keepalive_datalen = min(
1035 ka.ka_data_len,
1036 UDP_KEEPALIVE_OFFLOAD_DATA_SIZE);
1037 if (inp->inp_keepalive_datalen > 0) {
1038 MALLOC(inp->inp_keepalive_data,
1039 u_int8_t *,
1040 inp->inp_keepalive_datalen,
1041 M_TEMP, M_WAITOK);
1042 if (inp->inp_keepalive_data == NULL) {
1043 inp->inp_keepalive_datalen = 0;
1044 error = ENOMEM;
1045 break;
1046 }
1047 bcopy(ka.ka_data,
1048 inp->inp_keepalive_data,
1049 inp->inp_keepalive_datalen);
1050 } else {
1051 inp->inp_keepalive_datalen = 0;
1052 }
1053 inp->inp_keepalive_interval =
1054 min(UDP_KEEPALIVE_INTERVAL_MAX_SECONDS,
1055 ka.ka_interval);
1056 inp->inp_keepalive_type = ka.ka_type;
1057 inp->inp_flags2 |= INP2_KEEPALIVE_OFFLOAD;
1058 }
1059 break;
1060 }
1061 case SO_FLUSH:
1062 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
1063 sizeof(optval))) != 0) {
1064 break;
1065 }
1066
1067 error = inp_flush(inp, optval);
1068 break;
1069
1070 default:
1071 error = ENOPROTOOPT;
1072 break;
1073 }
1074 break;
1075
1076 case SOPT_GET:
1077 switch (sopt->sopt_name) {
1078 case UDP_NOCKSUM:
1079 optval = inp->inp_flags & INP_UDP_NOCKSUM;
1080 break;
1081
1082 default:
1083 error = ENOPROTOOPT;
1084 break;
1085 }
1086 if (error == 0) {
1087 error = sooptcopyout(sopt, &optval, sizeof(optval));
1088 }
1089 break;
1090 }
1091 return error;
1092 }
1093
1094 static int
1095 udp_pcblist SYSCTL_HANDLER_ARGS
1096 {
1097 #pragma unused(oidp, arg1, arg2)
1098 int error, i, n;
1099 struct inpcb *inp, **inp_list;
1100 inp_gen_t gencnt;
1101 struct xinpgen xig;
1102
1103 /*
1104 * The process of preparing the TCB list is too time-consuming and
1105 * resource-intensive to repeat twice on every request.
1106 */
1107 lck_rw_lock_exclusive(udbinfo.ipi_lock);
1108 if (req->oldptr == USER_ADDR_NULL) {
1109 n = udbinfo.ipi_count;
1110 req->oldidx = 2 * (sizeof(xig))
1111 + (n + n / 8) * sizeof(struct xinpcb);
1112 lck_rw_done(udbinfo.ipi_lock);
1113 return 0;
1114 }
1115
1116 if (req->newptr != USER_ADDR_NULL) {
1117 lck_rw_done(udbinfo.ipi_lock);
1118 return EPERM;
1119 }
1120
1121 /*
1122 * OK, now we're committed to doing something.
1123 */
1124 gencnt = udbinfo.ipi_gencnt;
1125 n = udbinfo.ipi_count;
1126
1127 bzero(&xig, sizeof(xig));
1128 xig.xig_len = sizeof(xig);
1129 xig.xig_count = n;
1130 xig.xig_gen = gencnt;
1131 xig.xig_sogen = so_gencnt;
1132 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1133 if (error) {
1134 lck_rw_done(udbinfo.ipi_lock);
1135 return error;
1136 }
1137 /*
1138 * We are done if there is no pcb
1139 */
1140 if (n == 0) {
1141 lck_rw_done(udbinfo.ipi_lock);
1142 return 0;
1143 }
1144
1145 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1146 if (inp_list == 0) {
1147 lck_rw_done(udbinfo.ipi_lock);
1148 return ENOMEM;
1149 }
1150
1151 for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
1152 inp = LIST_NEXT(inp, inp_list)) {
1153 if (inp->inp_gencnt <= gencnt &&
1154 inp->inp_state != INPCB_STATE_DEAD) {
1155 inp_list[i++] = inp;
1156 }
1157 }
1158 n = i;
1159
1160 error = 0;
1161 for (i = 0; i < n; i++) {
1162 struct xinpcb xi;
1163
1164 inp = inp_list[i];
1165
1166 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1167 continue;
1168 }
1169 udp_lock(inp->inp_socket, 1, 0);
1170 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1171 udp_unlock(inp->inp_socket, 1, 0);
1172 continue;
1173 }
1174 if (inp->inp_gencnt > gencnt) {
1175 udp_unlock(inp->inp_socket, 1, 0);
1176 continue;
1177 }
1178
1179 bzero(&xi, sizeof(xi));
1180 xi.xi_len = sizeof(xi);
1181 /* XXX should avoid extra copy */
1182 inpcb_to_compat(inp, &xi.xi_inp);
1183 if (inp->inp_socket) {
1184 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1185 }
1186
1187 udp_unlock(inp->inp_socket, 1, 0);
1188
1189 error = SYSCTL_OUT(req, &xi, sizeof(xi));
1190 }
1191 if (!error) {
1192 /*
1193 * Give the user an updated idea of our state.
1194 * If the generation differs from what we told
1195 * her before, she knows that something happened
1196 * while we were processing this request, and it
1197 * might be necessary to retry.
1198 */
1199 bzero(&xig, sizeof(xig));
1200 xig.xig_len = sizeof(xig);
1201 xig.xig_gen = udbinfo.ipi_gencnt;
1202 xig.xig_sogen = so_gencnt;
1203 xig.xig_count = udbinfo.ipi_count;
1204 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1205 }
1206 FREE(inp_list, M_TEMP);
1207 lck_rw_done(udbinfo.ipi_lock);
1208 return error;
1209 }
1210
1211 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
1212 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist,
1213 "S,xinpcb", "List of active UDP sockets");
1214
1215 #if !CONFIG_EMBEDDED
1216
1217 static int
1218 udp_pcblist64 SYSCTL_HANDLER_ARGS
1219 {
1220 #pragma unused(oidp, arg1, arg2)
1221 int error, i, n;
1222 struct inpcb *inp, **inp_list;
1223 inp_gen_t gencnt;
1224 struct xinpgen xig;
1225
1226 /*
1227 * The process of preparing the TCB list is too time-consuming and
1228 * resource-intensive to repeat twice on every request.
1229 */
1230 lck_rw_lock_shared(udbinfo.ipi_lock);
1231 if (req->oldptr == USER_ADDR_NULL) {
1232 n = udbinfo.ipi_count;
1233 req->oldidx =
1234 2 * (sizeof(xig)) + (n + n / 8) * sizeof(struct xinpcb64);
1235 lck_rw_done(udbinfo.ipi_lock);
1236 return 0;
1237 }
1238
1239 if (req->newptr != USER_ADDR_NULL) {
1240 lck_rw_done(udbinfo.ipi_lock);
1241 return EPERM;
1242 }
1243
1244 /*
1245 * OK, now we're committed to doing something.
1246 */
1247 gencnt = udbinfo.ipi_gencnt;
1248 n = udbinfo.ipi_count;
1249
1250 bzero(&xig, sizeof(xig));
1251 xig.xig_len = sizeof(xig);
1252 xig.xig_count = n;
1253 xig.xig_gen = gencnt;
1254 xig.xig_sogen = so_gencnt;
1255 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1256 if (error) {
1257 lck_rw_done(udbinfo.ipi_lock);
1258 return error;
1259 }
1260 /*
1261 * We are done if there is no pcb
1262 */
1263 if (n == 0) {
1264 lck_rw_done(udbinfo.ipi_lock);
1265 return 0;
1266 }
1267
1268 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1269 if (inp_list == 0) {
1270 lck_rw_done(udbinfo.ipi_lock);
1271 return ENOMEM;
1272 }
1273
1274 for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
1275 inp = LIST_NEXT(inp, inp_list)) {
1276 if (inp->inp_gencnt <= gencnt &&
1277 inp->inp_state != INPCB_STATE_DEAD) {
1278 inp_list[i++] = inp;
1279 }
1280 }
1281 n = i;
1282
1283 error = 0;
1284 for (i = 0; i < n; i++) {
1285 struct xinpcb64 xi;
1286
1287 inp = inp_list[i];
1288
1289 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1290 continue;
1291 }
1292 udp_lock(inp->inp_socket, 1, 0);
1293 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1294 udp_unlock(inp->inp_socket, 1, 0);
1295 continue;
1296 }
1297 if (inp->inp_gencnt > gencnt) {
1298 udp_unlock(inp->inp_socket, 1, 0);
1299 continue;
1300 }
1301
1302 bzero(&xi, sizeof(xi));
1303 xi.xi_len = sizeof(xi);
1304 inpcb_to_xinpcb64(inp, &xi);
1305 if (inp->inp_socket) {
1306 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1307 }
1308
1309 udp_unlock(inp->inp_socket, 1, 0);
1310
1311 error = SYSCTL_OUT(req, &xi, sizeof(xi));
1312 }
1313 if (!error) {
1314 /*
1315 * Give the user an updated idea of our state.
1316 * If the generation differs from what we told
1317 * her before, she knows that something happened
1318 * while we were processing this request, and it
1319 * might be necessary to retry.
1320 */
1321 bzero(&xig, sizeof(xig));
1322 xig.xig_len = sizeof(xig);
1323 xig.xig_gen = udbinfo.ipi_gencnt;
1324 xig.xig_sogen = so_gencnt;
1325 xig.xig_count = udbinfo.ipi_count;
1326 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1327 }
1328 FREE(inp_list, M_TEMP);
1329 lck_rw_done(udbinfo.ipi_lock);
1330 return error;
1331 }
1332
1333 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64,
1334 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist64,
1335 "S,xinpcb64", "List of active UDP sockets");
1336
1337 #endif /* !CONFIG_EMBEDDED */
1338
1339 static int
1340 udp_pcblist_n SYSCTL_HANDLER_ARGS
1341 {
1342 #pragma unused(oidp, arg1, arg2)
1343 return get_pcblist_n(IPPROTO_UDP, req, &udbinfo);
1344 }
1345
1346 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist_n,
1347 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist_n,
1348 "S,xinpcb_n", "List of active UDP sockets");
1349
1350 __private_extern__ void
1351 udp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags,
1352 bitstr_t *bitfield)
1353 {
1354 inpcb_get_ports_used(ifindex, protocol, flags, bitfield,
1355 &udbinfo);
1356 }
1357
1358 __private_extern__ uint32_t
1359 udp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
1360 {
1361 return inpcb_count_opportunistic(ifindex, &udbinfo, flags);
1362 }
1363
1364 __private_extern__ uint32_t
1365 udp_find_anypcb_byaddr(struct ifaddr *ifa)
1366 {
1367 return inpcb_find_anypcb_byaddr(ifa, &udbinfo);
1368 }
1369
1370 static int
1371 udp_check_pktinfo(struct mbuf *control, struct ifnet **outif,
1372 struct in_addr *laddr)
1373 {
1374 struct cmsghdr *cm = 0;
1375 struct in_pktinfo *pktinfo;
1376 struct ifnet *ifp;
1377
1378 if (outif != NULL) {
1379 *outif = NULL;
1380 }
1381
1382 /*
1383 * XXX: Currently, we assume all the optional information is stored
1384 * in a single mbuf.
1385 */
1386 if (control->m_next) {
1387 return EINVAL;
1388 }
1389
1390 if (control->m_len < CMSG_LEN(0)) {
1391 return EINVAL;
1392 }
1393
1394 for (cm = M_FIRST_CMSGHDR(control);
1395 is_cmsg_valid(control, cm);
1396 cm = M_NXT_CMSGHDR(control, cm)) {
1397 if (cm->cmsg_level != IPPROTO_IP ||
1398 cm->cmsg_type != IP_PKTINFO) {
1399 continue;
1400 }
1401
1402 if (cm->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) {
1403 return EINVAL;
1404 }
1405
1406 pktinfo = (struct in_pktinfo *)(void *)CMSG_DATA(cm);
1407
1408 /* Check for a valid ifindex in pktinfo */
1409 ifnet_head_lock_shared();
1410
1411 if (pktinfo->ipi_ifindex > if_index) {
1412 ifnet_head_done();
1413 return ENXIO;
1414 }
1415
1416 /*
1417 * If ipi_ifindex is specified it takes precedence
1418 * over ipi_spec_dst.
1419 */
1420 if (pktinfo->ipi_ifindex) {
1421 ifp = ifindex2ifnet[pktinfo->ipi_ifindex];
1422 if (ifp == NULL) {
1423 ifnet_head_done();
1424 return ENXIO;
1425 }
1426 if (outif != NULL) {
1427 ifnet_reference(ifp);
1428 *outif = ifp;
1429 }
1430 ifnet_head_done();
1431 laddr->s_addr = INADDR_ANY;
1432 break;
1433 }
1434
1435 ifnet_head_done();
1436
1437 /*
1438 * Use the provided ipi_spec_dst address for temp
1439 * source address.
1440 */
1441 *laddr = pktinfo->ipi_spec_dst;
1442 break;
1443 }
1444 return 0;
1445 }
1446
1447 int
1448 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
1449 struct mbuf *control, struct proc *p)
1450 {
1451 struct udpiphdr *ui;
1452 int len = m->m_pkthdr.len;
1453 struct sockaddr_in *sin;
1454 struct in_addr origladdr, laddr, faddr, pi_laddr;
1455 u_short lport, fport;
1456 int error = 0, udp_dodisconnect = 0, pktinfo = 0;
1457 struct socket *so = inp->inp_socket;
1458 int soopts = 0;
1459 struct mbuf *inpopts;
1460 struct ip_moptions *mopts;
1461 struct route ro;
1462 struct ip_out_args ipoa;
1463 #if CONTENT_FILTER
1464 struct m_tag *cfil_tag = NULL;
1465 bool cfil_faddr_use = false;
1466 uint32_t cfil_so_state_change_cnt = 0;
1467 short cfil_so_options = 0;
1468 struct sockaddr *cfil_faddr = NULL;
1469 #endif
1470
1471 bzero(&ipoa, sizeof(ipoa));
1472 ipoa.ipoa_boundif = IFSCOPE_NONE;
1473 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
1474
1475 struct ifnet *outif = NULL;
1476 struct flowadv *adv = &ipoa.ipoa_flowadv;
1477 int sotc = SO_TC_UNSPEC;
1478 int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1479 struct ifnet *origoutifp = NULL;
1480 int flowadv = 0;
1481
1482 /* Enable flow advisory only when connected */
1483 flowadv = (so->so_state & SS_ISCONNECTED) ? 1 : 0;
1484 pi_laddr.s_addr = INADDR_ANY;
1485
1486 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1487
1488 socket_lock_assert_owned(so);
1489
1490 #if CONTENT_FILTER
1491 /*
1492 * If socket is subject to UDP Content Filter and no addr is passed in,
1493 * retrieve CFIL saved state from mbuf and use it if necessary.
1494 */
1495 if (so->so_cfil_db && !addr) {
1496 cfil_tag = cfil_udp_get_socket_state(m, &cfil_so_state_change_cnt, &cfil_so_options, &cfil_faddr);
1497 if (cfil_tag) {
1498 sin = (struct sockaddr_in *)(void *)cfil_faddr;
1499 if (inp && inp->inp_faddr.s_addr == INADDR_ANY) {
1500 /*
1501 * Socket is unconnected, simply use the saved faddr as 'addr' to go through
1502 * the connect/disconnect logic.
1503 */
1504 addr = (struct sockaddr *)cfil_faddr;
1505 } else if ((so->so_state_change_cnt != cfil_so_state_change_cnt) &&
1506 (inp->inp_fport != sin->sin_port ||
1507 inp->inp_faddr.s_addr != sin->sin_addr.s_addr)) {
1508 /*
1509 * Socket is connected but socket state and dest addr/port changed.
1510 * We need to use the saved faddr info.
1511 */
1512 cfil_faddr_use = true;
1513 }
1514 }
1515 }
1516 #endif
1517
1518 if (control != NULL) {
1519 sotc = so_tc_from_control(control, &netsvctype);
1520 VERIFY(outif == NULL);
1521 error = udp_check_pktinfo(control, &outif, &pi_laddr);
1522 m_freem(control);
1523 control = NULL;
1524 if (error) {
1525 goto release;
1526 }
1527 pktinfo++;
1528 if (outif != NULL) {
1529 ipoa.ipoa_boundif = outif->if_index;
1530 }
1531 }
1532 if (sotc == SO_TC_UNSPEC) {
1533 sotc = so->so_traffic_class;
1534 netsvctype = so->so_netsvctype;
1535 }
1536
1537 KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
1538 inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
1539 (htons((u_short)len + sizeof(struct udphdr))));
1540
1541 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1542 error = EMSGSIZE;
1543 goto release;
1544 }
1545
1546 if (flowadv && INP_WAIT_FOR_IF_FEEDBACK(inp)) {
1547 /*
1548 * The socket is flow-controlled, drop the packets
1549 * until the inp is not flow controlled
1550 */
1551 error = ENOBUFS;
1552 goto release;
1553 }
1554 /*
1555 * If socket was bound to an ifindex, tell ip_output about it.
1556 * If the ancillary IP_PKTINFO option contains an interface index,
1557 * it takes precedence over the one specified by IP_BOUND_IF.
1558 */
1559 if (ipoa.ipoa_boundif == IFSCOPE_NONE &&
1560 (inp->inp_flags & INP_BOUND_IF)) {
1561 VERIFY(inp->inp_boundifp != NULL);
1562 ifnet_reference(inp->inp_boundifp); /* for this routine */
1563 if (outif != NULL) {
1564 ifnet_release(outif);
1565 }
1566 outif = inp->inp_boundifp;
1567 ipoa.ipoa_boundif = outif->if_index;
1568 }
1569 if (INP_NO_CELLULAR(inp)) {
1570 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1571 }
1572 if (INP_NO_EXPENSIVE(inp)) {
1573 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1574 }
1575 if (INP_NO_CONSTRAINED(inp)) {
1576 ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1577 }
1578 if (INP_AWDL_UNRESTRICTED(inp)) {
1579 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1580 }
1581 ipoa.ipoa_sotc = sotc;
1582 ipoa.ipoa_netsvctype = netsvctype;
1583 soopts |= IP_OUTARGS;
1584
1585 /*
1586 * If there was a routing change, discard cached route and check
1587 * that we have a valid source address. Reacquire a new source
1588 * address if INADDR_ANY was specified.
1589 *
1590 * If we are using cfil saved state, go through this cache cleanup
1591 * so that we can get a new route.
1592 */
1593 if (ROUTE_UNUSABLE(&inp->inp_route)
1594 #if CONTENT_FILTER
1595 || cfil_faddr_use
1596 #endif
1597 ) {
1598 struct in_ifaddr *ia = NULL;
1599
1600 ROUTE_RELEASE(&inp->inp_route);
1601
1602 /* src address is gone? */
1603 if (inp->inp_laddr.s_addr != INADDR_ANY &&
1604 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
1605 if (!(inp->inp_flags & INP_INADDR_ANY) ||
1606 (so->so_state & SS_ISCONNECTED)) {
1607 /*
1608 * Rdar://5448998
1609 * If the source address is gone, return an
1610 * error if:
1611 * - the source was specified
1612 * - the socket was already connected
1613 */
1614 soevent(so, (SO_FILT_HINT_LOCKED |
1615 SO_FILT_HINT_NOSRCADDR));
1616 error = EADDRNOTAVAIL;
1617 goto release;
1618 } else {
1619 /* new src will be set later */
1620 inp->inp_laddr.s_addr = INADDR_ANY;
1621 inp->inp_last_outifp = NULL;
1622 }
1623 }
1624 if (ia != NULL) {
1625 IFA_REMREF(&ia->ia_ifa);
1626 }
1627 }
1628
1629 /*
1630 * IP_PKTINFO option check. If a temporary scope or src address
1631 * is provided, use it for this packet only and make sure we forget
1632 * it after sending this datagram.
1633 */
1634 if (pi_laddr.s_addr != INADDR_ANY ||
1635 (ipoa.ipoa_boundif != IFSCOPE_NONE && pktinfo)) {
1636 /* temp src address for this datagram only */
1637 laddr = pi_laddr;
1638 origladdr.s_addr = INADDR_ANY;
1639 /* we don't want to keep the laddr or route */
1640 udp_dodisconnect = 1;
1641 /* remember we don't care about src addr */
1642 inp->inp_flags |= INP_INADDR_ANY;
1643 } else {
1644 origladdr = laddr = inp->inp_laddr;
1645 }
1646
1647 origoutifp = inp->inp_last_outifp;
1648 faddr = inp->inp_faddr;
1649 lport = inp->inp_lport;
1650 fport = inp->inp_fport;
1651
1652 #if CONTENT_FILTER
1653 if (cfil_faddr_use) {
1654 faddr = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_addr;
1655 fport = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_port;
1656 }
1657 #endif
1658
1659 if (addr) {
1660 sin = (struct sockaddr_in *)(void *)addr;
1661 if (faddr.s_addr != INADDR_ANY) {
1662 error = EISCONN;
1663 goto release;
1664 }
1665 if (lport == 0) {
1666 /*
1667 * In case we don't have a local port set, go through
1668 * the full connect. We don't have a local port yet
1669 * (i.e., we can't be looked up), so it's not an issue
1670 * if the input runs at the same time we do this.
1671 */
1672 /* if we have a source address specified, use that */
1673 if (pi_laddr.s_addr != INADDR_ANY) {
1674 inp->inp_laddr = pi_laddr;
1675 }
1676 /*
1677 * If a scope is specified, use it. Scope from
1678 * IP_PKTINFO takes precendence over the the scope
1679 * set via INP_BOUND_IF.
1680 */
1681 error = in_pcbconnect(inp, addr, p, ipoa.ipoa_boundif,
1682 &outif);
1683 if (error) {
1684 goto release;
1685 }
1686
1687 laddr = inp->inp_laddr;
1688 lport = inp->inp_lport;
1689 faddr = inp->inp_faddr;
1690 fport = inp->inp_fport;
1691 udp_dodisconnect = 1;
1692
1693 /* synch up in case in_pcbladdr() overrides */
1694 if (outif != NULL && ipoa.ipoa_boundif != IFSCOPE_NONE) {
1695 ipoa.ipoa_boundif = outif->if_index;
1696 }
1697 } else {
1698 /*
1699 * Fast path case
1700 *
1701 * We have a full address and a local port; use those
1702 * info to build the packet without changing the pcb
1703 * and interfering with the input path. See 3851370.
1704 *
1705 * Scope from IP_PKTINFO takes precendence over the
1706 * the scope set via INP_BOUND_IF.
1707 */
1708 if (laddr.s_addr == INADDR_ANY) {
1709 if ((error = in_pcbladdr(inp, addr, &laddr,
1710 ipoa.ipoa_boundif, &outif, 0)) != 0) {
1711 goto release;
1712 }
1713 /*
1714 * from pcbconnect: remember we don't
1715 * care about src addr.
1716 */
1717 inp->inp_flags |= INP_INADDR_ANY;
1718
1719 /* synch up in case in_pcbladdr() overrides */
1720 if (outif != NULL &&
1721 ipoa.ipoa_boundif != IFSCOPE_NONE) {
1722 ipoa.ipoa_boundif = outif->if_index;
1723 }
1724 }
1725
1726 faddr = sin->sin_addr;
1727 fport = sin->sin_port;
1728 }
1729 } else {
1730 if (faddr.s_addr == INADDR_ANY) {
1731 error = ENOTCONN;
1732 goto release;
1733 }
1734 }
1735
1736 #if CONFIG_MACF_NET
1737 mac_mbuf_label_associate_inpcb(inp, m);
1738 #endif /* CONFIG_MACF_NET */
1739
1740 if (inp->inp_flowhash == 0) {
1741 inp->inp_flowhash = inp_calc_flowhash(inp);
1742 }
1743
1744 if (fport == htons(53) && !(so->so_flags1 & SOF1_DNS_COUNTED)) {
1745 so->so_flags1 |= SOF1_DNS_COUNTED;
1746 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_dns);
1747 }
1748
1749 /*
1750 * Calculate data length and get a mbuf
1751 * for UDP and IP headers.
1752 */
1753 M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT, 1);
1754 if (m == 0) {
1755 error = ENOBUFS;
1756 goto abort;
1757 }
1758
1759 /*
1760 * Fill in mbuf with extended UDP header
1761 * and addresses and length put into network format.
1762 */
1763 ui = mtod(m, struct udpiphdr *);
1764 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1765 ui->ui_pr = IPPROTO_UDP;
1766 ui->ui_src = laddr;
1767 ui->ui_dst = faddr;
1768 ui->ui_sport = lport;
1769 ui->ui_dport = fport;
1770 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1771
1772 /*
1773 * Set up checksum to pseudo header checksum and output datagram.
1774 *
1775 * Treat flows to be CLAT46'd as IPv6 flow and compute checksum
1776 * no matter what, as IPv6 mandates checksum for UDP.
1777 *
1778 * Here we only compute the one's complement sum of the pseudo header.
1779 * The payload computation and final complement is delayed to much later
1780 * in IP processing to decide if remaining computation needs to be done
1781 * through offload.
1782 *
1783 * That is communicated by setting CSUM_UDP in csum_flags.
1784 * The offset of checksum from the start of ULP header is communicated
1785 * through csum_data.
1786 *
1787 * Note since this already contains the pseudo checksum header, any
1788 * later operation at IP layer that modify the values used here must
1789 * update the checksum as well (for example NAT etc).
1790 */
1791 if ((inp->inp_flags2 & INP2_CLAT46_FLOW) ||
1792 (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM))) {
1793 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
1794 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1795 m->m_pkthdr.csum_flags = (CSUM_UDP | CSUM_ZERO_INVERT);
1796 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1797 } else {
1798 ui->ui_sum = 0;
1799 }
1800 ((struct ip *)ui)->ip_len = sizeof(struct udpiphdr) + len;
1801 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1802 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1803 udpstat.udps_opackets++;
1804
1805 KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport,
1806 ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen);
1807
1808 #if NECP
1809 {
1810 necp_kernel_policy_id policy_id;
1811 necp_kernel_policy_id skip_policy_id;
1812 u_int32_t route_rule_id;
1813
1814 /*
1815 * We need a route to perform NECP route rule checks
1816 */
1817 if (net_qos_policy_restricted != 0 &&
1818 ROUTE_UNUSABLE(&inp->inp_route)) {
1819 struct sockaddr_in to;
1820 struct sockaddr_in from;
1821
1822 ROUTE_RELEASE(&inp->inp_route);
1823
1824 bzero(&from, sizeof(struct sockaddr_in));
1825 from.sin_family = AF_INET;
1826 from.sin_len = sizeof(struct sockaddr_in);
1827 from.sin_addr = laddr;
1828
1829 bzero(&to, sizeof(struct sockaddr_in));
1830 to.sin_family = AF_INET;
1831 to.sin_len = sizeof(struct sockaddr_in);
1832 to.sin_addr = faddr;
1833
1834 inp->inp_route.ro_dst.sa_family = AF_INET;
1835 inp->inp_route.ro_dst.sa_len = sizeof(struct sockaddr_in);
1836 ((struct sockaddr_in *)(void *)&inp->inp_route.ro_dst)->sin_addr =
1837 faddr;
1838
1839 rtalloc_scoped(&inp->inp_route, ipoa.ipoa_boundif);
1840
1841 inp_update_necp_policy(inp, (struct sockaddr *)&from,
1842 (struct sockaddr *)&to, ipoa.ipoa_boundif);
1843 inp->inp_policyresult.results.qos_marking_gencount = 0;
1844 }
1845
1846 if (!necp_socket_is_allowed_to_send_recv_v4(inp, lport, fport,
1847 &laddr, &faddr, NULL, &policy_id, &route_rule_id, &skip_policy_id)) {
1848 error = EHOSTUNREACH;
1849 goto abort;
1850 }
1851
1852 necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id);
1853
1854 if (net_qos_policy_restricted != 0) {
1855 necp_socket_update_qos_marking(inp,
1856 inp->inp_route.ro_rt, NULL, route_rule_id);
1857 }
1858 }
1859 #endif /* NECP */
1860 if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1861 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1862 }
1863
1864 #if IPSEC
1865 if (inp->inp_sp != NULL && ipsec_setsocket(m, inp->inp_socket) != 0) {
1866 error = ENOBUFS;
1867 goto abort;
1868 }
1869 #endif /* IPSEC */
1870
1871 inpopts = inp->inp_options;
1872 #if CONTENT_FILTER
1873 if (cfil_tag && (inp->inp_socket->so_options != cfil_so_options)) {
1874 soopts |= (cfil_so_options & (SO_DONTROUTE | SO_BROADCAST));
1875 } else
1876 #endif
1877 soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST));
1878
1879 mopts = inp->inp_moptions;
1880 if (mopts != NULL) {
1881 IMO_LOCK(mopts);
1882 IMO_ADDREF_LOCKED(mopts);
1883 if (IN_MULTICAST(ntohl(ui->ui_dst.s_addr)) &&
1884 mopts->imo_multicast_ifp != NULL) {
1885 /* no reference needed */
1886 inp->inp_last_outifp = mopts->imo_multicast_ifp;
1887 }
1888 IMO_UNLOCK(mopts);
1889 }
1890
1891 /* Copy the cached route and take an extra reference */
1892 inp_route_copyout(inp, &ro);
1893
1894 set_packet_service_class(m, so, sotc, 0);
1895 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
1896 m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
1897 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
1898 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC);
1899 if (flowadv) {
1900 m->m_pkthdr.pkt_flags |= PKTF_FLOW_ADV;
1901 }
1902 m->m_pkthdr.tx_udp_pid = so->last_pid;
1903 if (so->so_flags & SOF_DELEGATED) {
1904 m->m_pkthdr.tx_udp_e_pid = so->e_pid;
1905 } else {
1906 m->m_pkthdr.tx_udp_e_pid = 0;
1907 }
1908
1909 if (ipoa.ipoa_boundif != IFSCOPE_NONE) {
1910 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1911 }
1912
1913 if (laddr.s_addr != INADDR_ANY) {
1914 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
1915 }
1916
1917 inp->inp_sndinprog_cnt++;
1918
1919 socket_unlock(so, 0);
1920 error = ip_output(m, inpopts, &ro, soopts, mopts, &ipoa);
1921 m = NULL;
1922 socket_lock(so, 0);
1923 if (mopts != NULL) {
1924 IMO_REMREF(mopts);
1925 }
1926
1927 if (error == 0 && nstat_collect) {
1928 boolean_t cell, wifi, wired;
1929
1930 if (ro.ro_rt != NULL) {
1931 cell = IFNET_IS_CELLULAR(ro.ro_rt->rt_ifp);
1932 wifi = (!cell && IFNET_IS_WIFI(ro.ro_rt->rt_ifp));
1933 wired = (!wifi && IFNET_IS_WIRED(ro.ro_rt->rt_ifp));
1934 } else {
1935 cell = wifi = wired = FALSE;
1936 }
1937 INP_ADD_STAT(inp, cell, wifi, wired, txpackets, 1);
1938 INP_ADD_STAT(inp, cell, wifi, wired, txbytes, len);
1939 inp_set_activity_bitmap(inp);
1940 }
1941
1942 if (flowadv && (adv->code == FADV_FLOW_CONTROLLED ||
1943 adv->code == FADV_SUSPENDED)) {
1944 /*
1945 * return a hint to the application that
1946 * the packet has been dropped
1947 */
1948 error = ENOBUFS;
1949 inp_set_fc_state(inp, adv->code);
1950 }
1951
1952 VERIFY(inp->inp_sndinprog_cnt > 0);
1953 if (--inp->inp_sndinprog_cnt == 0) {
1954 inp->inp_flags &= ~(INP_FC_FEEDBACK);
1955 if (inp->inp_sndingprog_waiters > 0) {
1956 wakeup(&inp->inp_sndinprog_cnt);
1957 }
1958 }
1959
1960 /* Synchronize PCB cached route */
1961 inp_route_copyin(inp, &ro);
1962
1963 abort:
1964 if (udp_dodisconnect) {
1965 /* Always discard the cached route for unconnected socket */
1966 ROUTE_RELEASE(&inp->inp_route);
1967 in_pcbdisconnect(inp);
1968 inp->inp_laddr = origladdr; /* XXX rehash? */
1969 /* no reference needed */
1970 inp->inp_last_outifp = origoutifp;
1971 } else if (inp->inp_route.ro_rt != NULL) {
1972 struct rtentry *rt = inp->inp_route.ro_rt;
1973 struct ifnet *outifp;
1974
1975 if (rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) {
1976 rt = NULL; /* unusable */
1977 }
1978 #if CONTENT_FILTER
1979 /*
1980 * Discard temporary route for cfil case
1981 */
1982 if (cfil_faddr_use) {
1983 rt = NULL; /* unusable */
1984 }
1985 #endif
1986
1987 /*
1988 * Always discard if it is a multicast or broadcast route.
1989 */
1990 if (rt == NULL) {
1991 ROUTE_RELEASE(&inp->inp_route);
1992 }
1993
1994 /*
1995 * If the destination route is unicast, update outifp with
1996 * that of the route interface used by IP.
1997 */
1998 if (rt != NULL &&
1999 (outifp = rt->rt_ifp) != inp->inp_last_outifp) {
2000 inp->inp_last_outifp = outifp; /* no reference needed */
2001
2002 so->so_pktheadroom = P2ROUNDUP(
2003 sizeof(struct udphdr) +
2004 sizeof(struct ip) +
2005 ifnet_hdrlen(outifp) +
2006 ifnet_mbuf_packetpreamblelen(outifp),
2007 sizeof(u_int32_t));
2008 }
2009 } else {
2010 ROUTE_RELEASE(&inp->inp_route);
2011 }
2012
2013 /*
2014 * If output interface was cellular/expensive, and this socket is
2015 * denied access to it, generate an event.
2016 */
2017 if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
2018 (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) {
2019 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
2020 }
2021
2022 release:
2023 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0);
2024
2025 if (m != NULL) {
2026 m_freem(m);
2027 }
2028
2029 if (outif != NULL) {
2030 ifnet_release(outif);
2031 }
2032
2033 #if CONTENT_FILTER
2034 if (cfil_tag) {
2035 m_tag_free(cfil_tag);
2036 }
2037 #endif
2038
2039 return error;
2040 }
2041
2042 u_int32_t udp_sendspace = 9216; /* really max datagram size */
2043 /* 187 1K datagrams (approx 192 KB) */
2044 u_int32_t udp_recvspace = 187 * (1024 +
2045 #if INET6
2046 sizeof(struct sockaddr_in6)
2047 #else /* !INET6 */
2048 sizeof(struct sockaddr_in)
2049 #endif /* !INET6 */
2050 );
2051
2052 /* Check that the values of udp send and recv space do not exceed sb_max */
2053 static int
2054 sysctl_udp_sospace(struct sysctl_oid *oidp, void *arg1, int arg2,
2055 struct sysctl_req *req)
2056 {
2057 #pragma unused(arg1, arg2)
2058 u_int32_t new_value = 0, *space_p = NULL;
2059 int changed = 0, error = 0;
2060 u_quad_t sb_effective_max = (sb_max / (MSIZE + MCLBYTES)) * MCLBYTES;
2061
2062 switch (oidp->oid_number) {
2063 case UDPCTL_RECVSPACE:
2064 space_p = &udp_recvspace;
2065 break;
2066 case UDPCTL_MAXDGRAM:
2067 space_p = &udp_sendspace;
2068 break;
2069 default:
2070 return EINVAL;
2071 }
2072 error = sysctl_io_number(req, *space_p, sizeof(u_int32_t),
2073 &new_value, &changed);
2074 if (changed) {
2075 if (new_value > 0 && new_value <= sb_effective_max) {
2076 *space_p = new_value;
2077 } else {
2078 error = ERANGE;
2079 }
2080 }
2081 return error;
2082 }
2083
2084 SYSCTL_PROC(_net_inet_udp, UDPCTL_RECVSPACE, recvspace,
2085 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_recvspace, 0,
2086 &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size");
2087
2088 SYSCTL_PROC(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram,
2089 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_sendspace, 0,
2090 &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size");
2091
2092 int
2093 udp_abort(struct socket *so)
2094 {
2095 struct inpcb *inp;
2096
2097 inp = sotoinpcb(so);
2098 if (inp == NULL) {
2099 panic("%s: so=%p null inp\n", __func__, so);
2100 /* NOTREACHED */
2101 }
2102 soisdisconnected(so);
2103 in_pcbdetach(inp);
2104 return 0;
2105 }
2106
2107 int
2108 udp_attach(struct socket *so, int proto, struct proc *p)
2109 {
2110 #pragma unused(proto)
2111 struct inpcb *inp;
2112 int error;
2113
2114 inp = sotoinpcb(so);
2115 if (inp != NULL) {
2116 panic("%s so=%p inp=%p\n", __func__, so, inp);
2117 /* NOTREACHED */
2118 }
2119 error = in_pcballoc(so, &udbinfo, p);
2120 if (error != 0) {
2121 return error;
2122 }
2123 error = soreserve(so, udp_sendspace, udp_recvspace);
2124 if (error != 0) {
2125 return error;
2126 }
2127 inp = (struct inpcb *)so->so_pcb;
2128 inp->inp_vflag |= INP_IPV4;
2129 inp->inp_ip_ttl = ip_defttl;
2130 if (nstat_collect) {
2131 nstat_udp_new_pcb(inp);
2132 }
2133 return 0;
2134 }
2135
2136 int
2137 udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
2138 {
2139 struct inpcb *inp;
2140 int error;
2141
2142 if (nam->sa_family != 0 && nam->sa_family != AF_INET &&
2143 nam->sa_family != AF_INET6) {
2144 return EAFNOSUPPORT;
2145 }
2146
2147 inp = sotoinpcb(so);
2148 if (inp == NULL) {
2149 return EINVAL;
2150 }
2151 error = in_pcbbind(inp, nam, p);
2152
2153 #if NECP
2154 /* Update NECP client with bind result if not in middle of connect */
2155 if (error == 0 &&
2156 (inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS) &&
2157 !uuid_is_null(inp->necp_client_uuid)) {
2158 socket_unlock(so, 0);
2159 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2160 socket_lock(so, 0);
2161 }
2162 #endif /* NECP */
2163
2164 return error;
2165 }
2166
2167 int
2168 udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
2169 {
2170 struct inpcb *inp;
2171 int error;
2172
2173 inp = sotoinpcb(so);
2174 if (inp == NULL) {
2175 return EINVAL;
2176 }
2177 if (inp->inp_faddr.s_addr != INADDR_ANY) {
2178 return EISCONN;
2179 }
2180
2181 if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
2182 so->so_flags1 |= SOF1_CONNECT_COUNTED;
2183 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
2184 }
2185
2186 #if NECP
2187 #if FLOW_DIVERT
2188 if (necp_socket_should_use_flow_divert(inp)) {
2189 uint32_t fd_ctl_unit =
2190 necp_socket_get_flow_divert_control_unit(inp);
2191 if (fd_ctl_unit > 0) {
2192 error = flow_divert_pcb_init(so, fd_ctl_unit);
2193 if (error == 0) {
2194 error = flow_divert_connect_out(so, nam, p);
2195 }
2196 } else {
2197 error = ENETDOWN;
2198 }
2199 return error;
2200 }
2201 #endif /* FLOW_DIVERT */
2202 #endif /* NECP */
2203
2204 error = in_pcbconnect(inp, nam, p, IFSCOPE_NONE, NULL);
2205 if (error == 0) {
2206 #if NECP
2207 /* Update NECP client with connected five-tuple */
2208 if (!uuid_is_null(inp->necp_client_uuid)) {
2209 socket_unlock(so, 0);
2210 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2211 socket_lock(so, 0);
2212 }
2213 #endif /* NECP */
2214
2215 soisconnected(so);
2216 if (inp->inp_flowhash == 0) {
2217 inp->inp_flowhash = inp_calc_flowhash(inp);
2218 }
2219 }
2220 return error;
2221 }
2222
2223 int
2224 udp_connectx_common(struct socket *so, int af, struct sockaddr *src, struct sockaddr *dst,
2225 struct proc *p, uint32_t ifscope, sae_associd_t aid, sae_connid_t *pcid,
2226 uint32_t flags, void *arg, uint32_t arglen,
2227 struct uio *uio, user_ssize_t *bytes_written)
2228 {
2229 #pragma unused(aid, flags, arg, arglen)
2230 struct inpcb *inp = sotoinpcb(so);
2231 int error = 0;
2232 user_ssize_t datalen = 0;
2233
2234 if (inp == NULL) {
2235 return EINVAL;
2236 }
2237
2238 VERIFY(dst != NULL);
2239
2240 ASSERT(!(inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS));
2241 inp->inp_flags2 |= INP2_CONNECT_IN_PROGRESS;
2242
2243 #if NECP
2244 inp_update_necp_policy(inp, src, dst, ifscope);
2245 #endif /* NECP */
2246
2247 /* bind socket to the specified interface, if requested */
2248 if (ifscope != IFSCOPE_NONE &&
2249 (error = inp_bindif(inp, ifscope, NULL)) != 0) {
2250 goto done;
2251 }
2252
2253 /* if source address and/or port is specified, bind to it */
2254 if (src != NULL) {
2255 error = sobindlock(so, src, 0); /* already locked */
2256 if (error != 0) {
2257 goto done;
2258 }
2259 }
2260
2261 switch (af) {
2262 case AF_INET:
2263 error = udp_connect(so, dst, p);
2264 break;
2265 #if INET6
2266 case AF_INET6:
2267 error = udp6_connect(so, dst, p);
2268 break;
2269 #endif /* INET6 */
2270 default:
2271 VERIFY(0);
2272 /* NOTREACHED */
2273 }
2274
2275 if (error != 0) {
2276 goto done;
2277 }
2278
2279 /*
2280 * If there is data, copy it. DATA_IDEMPOTENT is ignored.
2281 * CONNECT_RESUME_ON_READ_WRITE is ignored.
2282 */
2283 if (uio != NULL) {
2284 socket_unlock(so, 0);
2285
2286 VERIFY(bytes_written != NULL);
2287
2288 datalen = uio_resid(uio);
2289 error = so->so_proto->pr_usrreqs->pru_sosend(so, NULL,
2290 (uio_t)uio, NULL, NULL, 0);
2291 socket_lock(so, 0);
2292
2293 /* If error returned is EMSGSIZE, for example, disconnect */
2294 if (error == 0 || error == EWOULDBLOCK) {
2295 *bytes_written = datalen - uio_resid(uio);
2296 } else {
2297 (void) so->so_proto->pr_usrreqs->pru_disconnectx(so,
2298 SAE_ASSOCID_ANY, SAE_CONNID_ANY);
2299 }
2300 /*
2301 * mask the EWOULDBLOCK error so that the caller
2302 * knows that atleast the connect was successful.
2303 */
2304 if (error == EWOULDBLOCK) {
2305 error = 0;
2306 }
2307 }
2308
2309 if (error == 0 && pcid != NULL) {
2310 *pcid = 1; /* there is only 1 connection for UDP */
2311 }
2312 done:
2313 inp->inp_flags2 &= ~INP2_CONNECT_IN_PROGRESS;
2314 return error;
2315 }
2316
2317 int
2318 udp_connectx(struct socket *so, struct sockaddr *src,
2319 struct sockaddr *dst, struct proc *p, uint32_t ifscope,
2320 sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
2321 uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written)
2322 {
2323 return udp_connectx_common(so, AF_INET, src, dst,
2324 p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written);
2325 }
2326
2327 int
2328 udp_detach(struct socket *so)
2329 {
2330 struct inpcb *inp;
2331
2332 inp = sotoinpcb(so);
2333 if (inp == NULL) {
2334 panic("%s: so=%p null inp\n", __func__, so);
2335 /* NOTREACHED */
2336 }
2337
2338 /*
2339 * If this is a socket that does not want to wakeup the device
2340 * for it's traffic, the application might be waiting for
2341 * close to complete before going to sleep. Send a notification
2342 * for this kind of sockets
2343 */
2344 if (so->so_options & SO_NOWAKEFROMSLEEP) {
2345 socket_post_kev_msg_closed(so);
2346 }
2347
2348 in_pcbdetach(inp);
2349 inp->inp_state = INPCB_STATE_DEAD;
2350 return 0;
2351 }
2352
2353 int
2354 udp_disconnect(struct socket *so)
2355 {
2356 struct inpcb *inp;
2357
2358 inp = sotoinpcb(so);
2359 if (inp == NULL
2360 #if NECP
2361 || (necp_socket_should_use_flow_divert(inp))
2362 #endif /* NECP */
2363 ) {
2364 return inp == NULL ? EINVAL : EPROTOTYPE;
2365 }
2366 if (inp->inp_faddr.s_addr == INADDR_ANY) {
2367 return ENOTCONN;
2368 }
2369
2370 in_pcbdisconnect(inp);
2371
2372 /* reset flow controlled state, just in case */
2373 inp_reset_fc_state(inp);
2374
2375 inp->inp_laddr.s_addr = INADDR_ANY;
2376 so->so_state &= ~SS_ISCONNECTED; /* XXX */
2377 inp->inp_last_outifp = NULL;
2378
2379 return 0;
2380 }
2381
2382 int
2383 udp_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid)
2384 {
2385 #pragma unused(cid)
2386 if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) {
2387 return EINVAL;
2388 }
2389
2390 return udp_disconnect(so);
2391 }
2392
2393 int
2394 udp_send(struct socket *so, int flags, struct mbuf *m,
2395 struct sockaddr *addr, struct mbuf *control, struct proc *p)
2396 {
2397 #ifndef FLOW_DIVERT
2398 #pragma unused(flags)
2399 #endif /* !(FLOW_DIVERT) */
2400 struct inpcb *inp;
2401
2402 inp = sotoinpcb(so);
2403 if (inp == NULL) {
2404 if (m != NULL) {
2405 m_freem(m);
2406 }
2407 if (control != NULL) {
2408 m_freem(control);
2409 }
2410 return EINVAL;
2411 }
2412
2413 #if NECP
2414 #if FLOW_DIVERT
2415 if (necp_socket_should_use_flow_divert(inp)) {
2416 /* Implicit connect */
2417 return flow_divert_implicit_data_out(so, flags, m, addr,
2418 control, p);
2419 }
2420 #endif /* FLOW_DIVERT */
2421 #endif /* NECP */
2422
2423 return udp_output(inp, m, addr, control, p);
2424 }
2425
2426 int
2427 udp_shutdown(struct socket *so)
2428 {
2429 struct inpcb *inp;
2430
2431 inp = sotoinpcb(so);
2432 if (inp == NULL) {
2433 return EINVAL;
2434 }
2435 socantsendmore(so);
2436 return 0;
2437 }
2438
2439 int
2440 udp_lock(struct socket *so, int refcount, void *debug)
2441 {
2442 void *lr_saved;
2443
2444 if (debug == NULL) {
2445 lr_saved = __builtin_return_address(0);
2446 } else {
2447 lr_saved = debug;
2448 }
2449
2450 if (so->so_pcb != NULL) {
2451 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
2452 LCK_MTX_ASSERT_NOTOWNED);
2453 lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
2454 } else {
2455 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2456 so, lr_saved, solockhistory_nr(so));
2457 /* NOTREACHED */
2458 }
2459 if (refcount) {
2460 so->so_usecount++;
2461 }
2462
2463 so->lock_lr[so->next_lock_lr] = lr_saved;
2464 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2465 return 0;
2466 }
2467
2468 int
2469 udp_unlock(struct socket *so, int refcount, void *debug)
2470 {
2471 void *lr_saved;
2472
2473 if (debug == NULL) {
2474 lr_saved = __builtin_return_address(0);
2475 } else {
2476 lr_saved = debug;
2477 }
2478
2479 if (refcount) {
2480 VERIFY(so->so_usecount > 0);
2481 so->so_usecount--;
2482 }
2483 if (so->so_pcb == NULL) {
2484 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2485 so, lr_saved, solockhistory_nr(so));
2486 /* NOTREACHED */
2487 } else {
2488 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
2489 LCK_MTX_ASSERT_OWNED);
2490 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2491 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2492 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
2493 }
2494 return 0;
2495 }
2496
2497 lck_mtx_t *
2498 udp_getlock(struct socket *so, int flags)
2499 {
2500 #pragma unused(flags)
2501 struct inpcb *inp = sotoinpcb(so);
2502
2503 if (so->so_pcb == NULL) {
2504 panic("%s: so=%p NULL so_pcb lrh= %s\n", __func__,
2505 so, solockhistory_nr(so));
2506 /* NOTREACHED */
2507 }
2508 return &inp->inpcb_mtx;
2509 }
2510
2511 /*
2512 * UDP garbage collector callback (inpcb_timer_func_t).
2513 *
2514 * Returns > 0 to keep timer active.
2515 */
2516 static void
2517 udp_gc(struct inpcbinfo *ipi)
2518 {
2519 struct inpcb *inp, *inpnxt;
2520 struct socket *so;
2521
2522 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
2523 if (udp_gc_done == TRUE) {
2524 udp_gc_done = FALSE;
2525 /* couldn't get the lock, must lock next time */
2526 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2527 return;
2528 }
2529 lck_rw_lock_exclusive(ipi->ipi_lock);
2530 }
2531
2532 udp_gc_done = TRUE;
2533
2534 for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
2535 inpnxt = inp->inp_list.le_next;
2536
2537 /*
2538 * Skip unless it's STOPUSING; garbage collector will
2539 * be triggered by in_pcb_checkstate() upon setting
2540 * wantcnt to that value. If the PCB is already dead,
2541 * keep gc active to anticipate wantcnt changing.
2542 */
2543 if (inp->inp_wantcnt != WNT_STOPUSING) {
2544 continue;
2545 }
2546
2547 /*
2548 * Skip if busy, no hurry for cleanup. Keep gc active
2549 * and try the lock again during next round.
2550 */
2551 if (!socket_try_lock(inp->inp_socket)) {
2552 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2553 continue;
2554 }
2555
2556 /*
2557 * Keep gc active unless usecount is 0.
2558 */
2559 so = inp->inp_socket;
2560 if (so->so_usecount == 0) {
2561 if (inp->inp_state != INPCB_STATE_DEAD) {
2562 #if INET6
2563 if (SOCK_CHECK_DOM(so, PF_INET6)) {
2564 in6_pcbdetach(inp);
2565 } else
2566 #endif /* INET6 */
2567 in_pcbdetach(inp);
2568 }
2569 in_pcbdispose(inp);
2570 } else {
2571 socket_unlock(so, 0);
2572 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2573 }
2574 }
2575 lck_rw_done(ipi->ipi_lock);
2576 }
2577
2578 static int
2579 udp_getstat SYSCTL_HANDLER_ARGS
2580 {
2581 #pragma unused(oidp, arg1, arg2)
2582 if (req->oldptr == USER_ADDR_NULL) {
2583 req->oldlen = (size_t)sizeof(struct udpstat);
2584 }
2585
2586 return SYSCTL_OUT(req, &udpstat, MIN(sizeof(udpstat), req->oldlen));
2587 }
2588
2589 void
2590 udp_in_cksum_stats(u_int32_t len)
2591 {
2592 udpstat.udps_rcv_swcsum++;
2593 udpstat.udps_rcv_swcsum_bytes += len;
2594 }
2595
2596 void
2597 udp_out_cksum_stats(u_int32_t len)
2598 {
2599 udpstat.udps_snd_swcsum++;
2600 udpstat.udps_snd_swcsum_bytes += len;
2601 }
2602
2603 #if INET6
2604 void
2605 udp_in6_cksum_stats(u_int32_t len)
2606 {
2607 udpstat.udps_rcv6_swcsum++;
2608 udpstat.udps_rcv6_swcsum_bytes += len;
2609 }
2610
2611 void
2612 udp_out6_cksum_stats(u_int32_t len)
2613 {
2614 udpstat.udps_snd6_swcsum++;
2615 udpstat.udps_snd6_swcsum_bytes += len;
2616 }
2617 #endif /* INET6 */
2618
2619 /*
2620 * Checksum extended UDP header and data.
2621 */
2622 static int
2623 udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen)
2624 {
2625 struct ifnet *ifp = m->m_pkthdr.rcvif;
2626 struct ip *ip = mtod(m, struct ip *);
2627 struct ipovly *ipov = (struct ipovly *)ip;
2628
2629 if (uh->uh_sum == 0) {
2630 udpstat.udps_nosum++;
2631 return 0;
2632 }
2633
2634 /* ip_stripoptions() must have been called before we get here */
2635 ASSERT((ip->ip_hl << 2) == sizeof(*ip));
2636
2637 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
2638 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
2639 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
2640 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
2641 uh->uh_sum = m->m_pkthdr.csum_rx_val;
2642 } else {
2643 uint32_t sum = m->m_pkthdr.csum_rx_val;
2644 uint32_t start = m->m_pkthdr.csum_rx_start;
2645 int32_t trailer = (m_pktlen(m) - (off + ulen));
2646
2647 /*
2648 * Perform 1's complement adjustment of octets
2649 * that got included/excluded in the hardware-
2650 * calculated checksum value. Ignore cases
2651 * where the value already includes the entire
2652 * IP header span, as the sum for those octets
2653 * would already be 0 by the time we get here;
2654 * IP has already performed its header checksum
2655 * checks. If we do need to adjust, restore
2656 * the original fields in the IP header when
2657 * computing the adjustment value. Also take
2658 * care of any trailing bytes and subtract out
2659 * their partial sum.
2660 */
2661 ASSERT(trailer >= 0);
2662 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
2663 ((start != 0 && start != off) || trailer != 0)) {
2664 uint32_t swbytes = (uint32_t)trailer;
2665
2666 if (start < off) {
2667 ip->ip_len += sizeof(*ip);
2668 #if BYTE_ORDER != BIG_ENDIAN
2669 HTONS(ip->ip_len);
2670 HTONS(ip->ip_off);
2671 #endif /* BYTE_ORDER != BIG_ENDIAN */
2672 }
2673 /* callee folds in sum */
2674 sum = m_adj_sum16(m, start, off, ulen, sum);
2675 if (off > start) {
2676 swbytes += (off - start);
2677 } else {
2678 swbytes += (start - off);
2679 }
2680
2681 if (start < off) {
2682 #if BYTE_ORDER != BIG_ENDIAN
2683 NTOHS(ip->ip_off);
2684 NTOHS(ip->ip_len);
2685 #endif /* BYTE_ORDER != BIG_ENDIAN */
2686 ip->ip_len -= sizeof(*ip);
2687 }
2688
2689 if (swbytes != 0) {
2690 udp_in_cksum_stats(swbytes);
2691 }
2692 if (trailer != 0) {
2693 m_adj(m, -trailer);
2694 }
2695 }
2696
2697 /* callee folds in sum */
2698 uh->uh_sum = in_pseudo(ip->ip_src.s_addr,
2699 ip->ip_dst.s_addr, sum + htonl(ulen + IPPROTO_UDP));
2700 }
2701 uh->uh_sum ^= 0xffff;
2702 } else {
2703 uint16_t ip_sum;
2704 char b[9];
2705
2706 bcopy(ipov->ih_x1, b, sizeof(ipov->ih_x1));
2707 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
2708 ip_sum = ipov->ih_len;
2709 ipov->ih_len = uh->uh_ulen;
2710 uh->uh_sum = in_cksum(m, ulen + sizeof(struct ip));
2711 bcopy(b, ipov->ih_x1, sizeof(ipov->ih_x1));
2712 ipov->ih_len = ip_sum;
2713
2714 udp_in_cksum_stats(ulen);
2715 }
2716
2717 if (uh->uh_sum != 0) {
2718 udpstat.udps_badsum++;
2719 IF_UDP_STATINC(ifp, badchksum);
2720 return -1;
2721 }
2722
2723 return 0;
2724 }
2725
2726 void
2727 udp_fill_keepalive_offload_frames(ifnet_t ifp,
2728 struct ifnet_keepalive_offload_frame *frames_array,
2729 u_int32_t frames_array_count, size_t frame_data_offset,
2730 u_int32_t *used_frames_count)
2731 {
2732 struct inpcb *inp;
2733 inp_gen_t gencnt;
2734 u_int32_t frame_index = *used_frames_count;
2735
2736 if (ifp == NULL || frames_array == NULL ||
2737 frames_array_count == 0 ||
2738 frame_index >= frames_array_count ||
2739 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2740 return;
2741 }
2742
2743 lck_rw_lock_shared(udbinfo.ipi_lock);
2744 gencnt = udbinfo.ipi_gencnt;
2745 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
2746 struct socket *so;
2747 u_int8_t *data;
2748 struct ifnet_keepalive_offload_frame *frame;
2749 struct mbuf *m = NULL;
2750
2751 if (frame_index >= frames_array_count) {
2752 break;
2753 }
2754
2755 if (inp->inp_gencnt > gencnt ||
2756 inp->inp_state == INPCB_STATE_DEAD) {
2757 continue;
2758 }
2759
2760 if ((so = inp->inp_socket) == NULL ||
2761 (so->so_state & SS_DEFUNCT)) {
2762 continue;
2763 }
2764 /*
2765 * check for keepalive offload flag without socket
2766 * lock to avoid a deadlock
2767 */
2768 if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
2769 continue;
2770 }
2771
2772 udp_lock(so, 1, 0);
2773 if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
2774 udp_unlock(so, 1, 0);
2775 continue;
2776 }
2777 if ((inp->inp_vflag & INP_IPV4) &&
2778 (inp->inp_laddr.s_addr == INADDR_ANY ||
2779 inp->inp_faddr.s_addr == INADDR_ANY)) {
2780 udp_unlock(so, 1, 0);
2781 continue;
2782 }
2783 if ((inp->inp_vflag & INP_IPV6) &&
2784 (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
2785 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
2786 udp_unlock(so, 1, 0);
2787 continue;
2788 }
2789 if (inp->inp_lport == 0 || inp->inp_fport == 0) {
2790 udp_unlock(so, 1, 0);
2791 continue;
2792 }
2793 if (inp->inp_last_outifp == NULL ||
2794 inp->inp_last_outifp->if_index != ifp->if_index) {
2795 udp_unlock(so, 1, 0);
2796 continue;
2797 }
2798 if ((inp->inp_vflag & INP_IPV4)) {
2799 if ((frame_data_offset + sizeof(struct udpiphdr) +
2800 inp->inp_keepalive_datalen) >
2801 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2802 udp_unlock(so, 1, 0);
2803 continue;
2804 }
2805 if ((sizeof(struct udpiphdr) +
2806 inp->inp_keepalive_datalen) > _MHLEN) {
2807 udp_unlock(so, 1, 0);
2808 continue;
2809 }
2810 } else {
2811 if ((frame_data_offset + sizeof(struct ip6_hdr) +
2812 sizeof(struct udphdr) +
2813 inp->inp_keepalive_datalen) >
2814 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2815 udp_unlock(so, 1, 0);
2816 continue;
2817 }
2818 if ((sizeof(struct ip6_hdr) + sizeof(struct udphdr) +
2819 inp->inp_keepalive_datalen) > _MHLEN) {
2820 udp_unlock(so, 1, 0);
2821 continue;
2822 }
2823 }
2824 MGETHDR(m, M_WAIT, MT_HEADER);
2825 if (m == NULL) {
2826 udp_unlock(so, 1, 0);
2827 continue;
2828 }
2829 /*
2830 * This inp has all the information that is needed to
2831 * generate an offload frame.
2832 */
2833 if (inp->inp_vflag & INP_IPV4) {
2834 struct ip *ip;
2835 struct udphdr *udp;
2836
2837 frame = &frames_array[frame_index];
2838 frame->length = frame_data_offset +
2839 sizeof(struct udpiphdr) +
2840 inp->inp_keepalive_datalen;
2841 frame->ether_type =
2842 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
2843 frame->interval = inp->inp_keepalive_interval;
2844 switch (inp->inp_keepalive_type) {
2845 case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY:
2846 frame->type =
2847 IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY;
2848 break;
2849 default:
2850 break;
2851 }
2852 data = mtod(m, u_int8_t *);
2853 bzero(data, sizeof(struct udpiphdr));
2854 ip = (__typeof__(ip))(void *)data;
2855 udp = (__typeof__(udp))(void *) (data +
2856 sizeof(struct ip));
2857 m->m_len = sizeof(struct udpiphdr);
2858 data = data + sizeof(struct udpiphdr);
2859 if (inp->inp_keepalive_datalen > 0 &&
2860 inp->inp_keepalive_data != NULL) {
2861 bcopy(inp->inp_keepalive_data, data,
2862 inp->inp_keepalive_datalen);
2863 m->m_len += inp->inp_keepalive_datalen;
2864 }
2865 m->m_pkthdr.len = m->m_len;
2866
2867 ip->ip_v = IPVERSION;
2868 ip->ip_hl = (sizeof(struct ip) >> 2);
2869 ip->ip_p = IPPROTO_UDP;
2870 ip->ip_len = htons(sizeof(struct udpiphdr) +
2871 (u_short)inp->inp_keepalive_datalen);
2872 ip->ip_ttl = inp->inp_ip_ttl;
2873 ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
2874 ip->ip_src = inp->inp_laddr;
2875 ip->ip_dst = inp->inp_faddr;
2876 ip->ip_sum = in_cksum_hdr_opt(ip);
2877
2878 udp->uh_sport = inp->inp_lport;
2879 udp->uh_dport = inp->inp_fport;
2880 udp->uh_ulen = htons(sizeof(struct udphdr) +
2881 (u_short)inp->inp_keepalive_datalen);
2882
2883 if (!(inp->inp_flags & INP_UDP_NOCKSUM)) {
2884 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
2885 ip->ip_dst.s_addr,
2886 htons(sizeof(struct udphdr) +
2887 (u_short)inp->inp_keepalive_datalen +
2888 IPPROTO_UDP));
2889 m->m_pkthdr.csum_flags =
2890 (CSUM_UDP | CSUM_ZERO_INVERT);
2891 m->m_pkthdr.csum_data = offsetof(struct udphdr,
2892 uh_sum);
2893 }
2894 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
2895 in_delayed_cksum(m);
2896 bcopy(m->m_data, frame->data + frame_data_offset,
2897 m->m_len);
2898 } else {
2899 struct ip6_hdr *ip6;
2900 struct udphdr *udp6;
2901
2902 VERIFY(inp->inp_vflag & INP_IPV6);
2903 frame = &frames_array[frame_index];
2904 frame->length = frame_data_offset +
2905 sizeof(struct ip6_hdr) +
2906 sizeof(struct udphdr) +
2907 inp->inp_keepalive_datalen;
2908 frame->ether_type =
2909 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
2910 frame->interval = inp->inp_keepalive_interval;
2911 switch (inp->inp_keepalive_type) {
2912 case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY:
2913 frame->type =
2914 IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY;
2915 break;
2916 default:
2917 break;
2918 }
2919 data = mtod(m, u_int8_t *);
2920 bzero(data, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
2921 ip6 = (__typeof__(ip6))(void *)data;
2922 udp6 = (__typeof__(udp6))(void *)(data +
2923 sizeof(struct ip6_hdr));
2924 m->m_len = sizeof(struct ip6_hdr) +
2925 sizeof(struct udphdr);
2926 data = data + (sizeof(struct ip6_hdr) +
2927 sizeof(struct udphdr));
2928 if (inp->inp_keepalive_datalen > 0 &&
2929 inp->inp_keepalive_data != NULL) {
2930 bcopy(inp->inp_keepalive_data, data,
2931 inp->inp_keepalive_datalen);
2932 m->m_len += inp->inp_keepalive_datalen;
2933 }
2934 m->m_pkthdr.len = m->m_len;
2935 ip6->ip6_flow = inp->inp_flow & IPV6_FLOWINFO_MASK;
2936 ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
2937 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2938 ip6->ip6_vfc |= IPV6_VERSION;
2939 ip6->ip6_nxt = IPPROTO_UDP;
2940 ip6->ip6_hlim = ip6_defhlim;
2941 ip6->ip6_plen = htons(sizeof(struct udphdr) +
2942 (u_short)inp->inp_keepalive_datalen);
2943 ip6->ip6_src = inp->in6p_laddr;
2944 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
2945 ip6->ip6_src.s6_addr16[1] = 0;
2946 }
2947
2948 ip6->ip6_dst = inp->in6p_faddr;
2949 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
2950 ip6->ip6_dst.s6_addr16[1] = 0;
2951 }
2952
2953 udp6->uh_sport = inp->in6p_lport;
2954 udp6->uh_dport = inp->in6p_fport;
2955 udp6->uh_ulen = htons(sizeof(struct udphdr) +
2956 (u_short)inp->inp_keepalive_datalen);
2957 if (!(inp->inp_flags & INP_UDP_NOCKSUM)) {
2958 udp6->uh_sum = in6_pseudo(&ip6->ip6_src,
2959 &ip6->ip6_dst,
2960 htonl(sizeof(struct udphdr) +
2961 (u_short)inp->inp_keepalive_datalen +
2962 IPPROTO_UDP));
2963 m->m_pkthdr.csum_flags =
2964 (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
2965 m->m_pkthdr.csum_data = offsetof(struct udphdr,
2966 uh_sum);
2967 }
2968 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
2969 in6_delayed_cksum(m);
2970 bcopy(m->m_data, frame->data + frame_data_offset,
2971 m->m_len);
2972 }
2973 if (m != NULL) {
2974 m_freem(m);
2975 m = NULL;
2976 }
2977 frame_index++;
2978 udp_unlock(so, 1, 0);
2979 }
2980 lck_rw_done(udbinfo.ipi_lock);
2981 *used_frames_count = frame_index;
2982 }