]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/udp_usrreq.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / netinet / udp_usrreq.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/domain.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/sysctl.h>
73 #include <sys/syslog.h>
74 #include <sys/mcache.h>
75 #include <net/ntstat.h>
76
77 #include <kern/zalloc.h>
78 #include <mach/boolean.h>
79
80 #include <net/if.h>
81 #include <net/if_types.h>
82 #include <net/route.h>
83 #include <net/dlil.h>
84 #include <net/net_api_stats.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/in_tclass.h>
89 #include <netinet/ip.h>
90 #if INET6
91 #include <netinet/ip6.h>
92 #endif /* INET6 */
93 #include <netinet/in_pcb.h>
94 #include <netinet/in_var.h>
95 #include <netinet/ip_var.h>
96 #if INET6
97 #include <netinet6/in6_pcb.h>
98 #include <netinet6/ip6_var.h>
99 #include <netinet6/udp6_var.h>
100 #endif /* INET6 */
101 #include <netinet/ip_icmp.h>
102 #include <netinet/icmp_var.h>
103 #include <netinet/udp.h>
104 #include <netinet/udp_var.h>
105 #include <sys/kdebug.h>
106
107 #if IPSEC
108 #include <netinet6/ipsec.h>
109 #include <netinet6/esp.h>
110 #include <netkey/key.h>
111 extern int ipsec_bypass;
112 extern int esp_udp_encap_port;
113 #endif /* IPSEC */
114
115 #if NECP
116 #include <net/necp.h>
117 #endif /* NECP */
118
119 #if FLOW_DIVERT
120 #include <netinet/flow_divert.h>
121 #endif /* FLOW_DIVERT */
122
123 #if CONTENT_FILTER
124 #include <net/content_filter.h>
125 #endif /* CONTENT_FILTER */
126
127 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0)
128 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2)
129 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1)
130 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3)
131 #define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8))
132 #define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1)
133
134 /*
135 * UDP protocol implementation.
136 * Per RFC 768, August, 1980.
137 */
138 #ifndef COMPAT_42
139 static int udpcksum = 1;
140 #else
141 static int udpcksum = 0; /* XXX */
142 #endif
143 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum,
144 CTLFLAG_RW | CTLFLAG_LOCKED, &udpcksum, 0, "");
145
146 int udp_log_in_vain = 0;
147 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED,
148 &udp_log_in_vain, 0, "Log all incoming UDP packets");
149
150 static int blackhole = 0;
151 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED,
152 &blackhole, 0, "Do not send port unreachables for refused connects");
153
154 struct inpcbhead udb; /* from udp_var.h */
155 #define udb6 udb /* for KAME src sync over BSD*'s */
156 struct inpcbinfo udbinfo;
157
158 #ifndef UDBHASHSIZE
159 #define UDBHASHSIZE 16
160 #endif
161
162 /* Garbage collection performed during most recent udp_gc() run */
163 static boolean_t udp_gc_done = FALSE;
164
165 #if IPFIREWALL
166 extern int fw_verbose;
167 extern void ipfwsyslog(int level, const char *format, ...);
168 extern void ipfw_stealth_stats_incr_udp(void);
169
170 /* Apple logging, log to ipfw.log */
171 #define log_in_vain_log(a) { \
172 if ((udp_log_in_vain == 3) && (fw_verbose == 2)) { \
173 ipfwsyslog a; \
174 } else if ((udp_log_in_vain == 4) && (fw_verbose == 2)) { \
175 ipfw_stealth_stats_incr_udp(); \
176 } else { \
177 log a; \
178 } \
179 }
180 #else /* !IPFIREWALL */
181 #define log_in_vain_log(a) { log a; }
182 #endif /* !IPFIREWALL */
183
184 static int udp_getstat SYSCTL_HANDLER_ARGS;
185 struct udpstat udpstat; /* from udp_var.h */
186 SYSCTL_PROC(_net_inet_udp, UDPCTL_STATS, stats,
187 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
188 0, 0, udp_getstat, "S,udpstat",
189 "UDP statistics (struct udpstat, netinet/udp_var.h)");
190
191 SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount,
192 CTLFLAG_RD | CTLFLAG_LOCKED, &udbinfo.ipi_count, 0,
193 "Number of active PCBs");
194
195 __private_extern__ int udp_use_randomport = 1;
196 SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports,
197 CTLFLAG_RW | CTLFLAG_LOCKED, &udp_use_randomport, 0,
198 "Randomize UDP port numbers");
199
200 #if INET6
201 struct udp_in6 {
202 struct sockaddr_in6 uin6_sin;
203 u_char uin6_init_done : 1;
204 };
205 struct udp_ip6 {
206 struct ip6_hdr uip6_ip6;
207 u_char uip6_init_done : 1;
208 };
209
210 int udp_abort(struct socket *);
211 int udp_attach(struct socket *, int, struct proc *);
212 int udp_bind(struct socket *, struct sockaddr *, struct proc *);
213 int udp_connect(struct socket *, struct sockaddr *, struct proc *);
214 int udp_connectx(struct socket *, struct sockaddr *,
215 struct sockaddr *, struct proc *, uint32_t, sae_associd_t,
216 sae_connid_t *, uint32_t, void *, uint32_t, struct uio *, user_ssize_t *);
217 int udp_detach(struct socket *);
218 int udp_disconnect(struct socket *);
219 int udp_disconnectx(struct socket *, sae_associd_t, sae_connid_t);
220 int udp_send(struct socket *, int, struct mbuf *, struct sockaddr *,
221 struct mbuf *, struct proc *);
222 static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int,
223 struct sockaddr_in *, struct udp_in6 *, struct udp_ip6 *, struct ifnet *);
224 #else /* !INET6 */
225 static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int,
226 struct sockaddr_in *, struct ifnet *);
227 #endif /* !INET6 */
228 static int udp_input_checksum(struct mbuf *, struct udphdr *, int, int);
229 int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
230 struct mbuf *, struct proc *);
231 static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
232 static void udp_gc(struct inpcbinfo *);
233
234 struct pr_usrreqs udp_usrreqs = {
235 .pru_abort = udp_abort,
236 .pru_attach = udp_attach,
237 .pru_bind = udp_bind,
238 .pru_connect = udp_connect,
239 .pru_connectx = udp_connectx,
240 .pru_control = in_control,
241 .pru_detach = udp_detach,
242 .pru_disconnect = udp_disconnect,
243 .pru_disconnectx = udp_disconnectx,
244 .pru_peeraddr = in_getpeeraddr,
245 .pru_send = udp_send,
246 .pru_shutdown = udp_shutdown,
247 .pru_sockaddr = in_getsockaddr,
248 .pru_sosend = sosend,
249 .pru_soreceive = soreceive,
250 .pru_soreceive_list = soreceive_list,
251 };
252
253 void
254 udp_init(struct protosw *pp, struct domain *dp)
255 {
256 #pragma unused(dp)
257 static int udp_initialized = 0;
258 vm_size_t str_size;
259 struct inpcbinfo *pcbinfo;
260
261 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
262
263 if (udp_initialized) {
264 return;
265 }
266 udp_initialized = 1;
267 uint32_t pool_size = (nmbclusters << MCLSHIFT) >> MBSHIFT;
268 if (pool_size >= 96) {
269 /* Improves 10GbE UDP performance. */
270 udp_recvspace = 786896;
271 }
272 LIST_INIT(&udb);
273 udbinfo.ipi_listhead = &udb;
274 udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB,
275 &udbinfo.ipi_hashmask);
276 udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB,
277 &udbinfo.ipi_porthashmask);
278 str_size = (vm_size_t) sizeof(struct inpcb);
279 udbinfo.ipi_zone = zinit(str_size, 80000 * str_size, 8192, "udpcb");
280
281 pcbinfo = &udbinfo;
282 /*
283 * allocate lock group attribute and group for udp pcb mutexes
284 */
285 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
286 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("udppcb",
287 pcbinfo->ipi_lock_grp_attr);
288 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
289 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
290 pcbinfo->ipi_lock_attr)) == NULL) {
291 panic("%s: unable to allocate PCB lock\n", __func__);
292 /* NOTREACHED */
293 }
294
295 udbinfo.ipi_gc = udp_gc;
296 in_pcbinfo_attach(&udbinfo);
297 }
298
299 void
300 udp_input(struct mbuf *m, int iphlen)
301 {
302 struct ip *ip;
303 struct udphdr *uh;
304 struct inpcb *inp;
305 struct mbuf *opts = NULL;
306 int len, isbroadcast;
307 struct ip save_ip;
308 struct sockaddr *append_sa;
309 struct inpcbinfo *pcbinfo = &udbinfo;
310 struct sockaddr_in udp_in;
311 struct ip_moptions *imo = NULL;
312 int foundmembership = 0, ret = 0;
313 #if INET6
314 struct udp_in6 udp_in6;
315 struct udp_ip6 udp_ip6;
316 #endif /* INET6 */
317 struct ifnet *ifp = m->m_pkthdr.rcvif;
318 boolean_t cell = IFNET_IS_CELLULAR(ifp);
319 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
320 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
321
322 bzero(&udp_in, sizeof(udp_in));
323 udp_in.sin_len = sizeof(struct sockaddr_in);
324 udp_in.sin_family = AF_INET;
325 #if INET6
326 bzero(&udp_in6, sizeof(udp_in6));
327 udp_in6.uin6_sin.sin6_len = sizeof(struct sockaddr_in6);
328 udp_in6.uin6_sin.sin6_family = AF_INET6;
329 #endif /* INET6 */
330
331 udpstat.udps_ipackets++;
332
333 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
334
335 /* Expect 32-bit aligned data pointer on strict-align platforms */
336 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
337
338 /*
339 * Strip IP options, if any; should skip this,
340 * make available to user, and use on returned packets,
341 * but we don't yet have a way to check the checksum
342 * with options still present.
343 */
344 if (iphlen > sizeof(struct ip)) {
345 ip_stripoptions(m);
346 iphlen = sizeof(struct ip);
347 }
348
349 /*
350 * Get IP and UDP header together in first mbuf.
351 */
352 ip = mtod(m, struct ip *);
353 if (m->m_len < iphlen + sizeof(struct udphdr)) {
354 m = m_pullup(m, iphlen + sizeof(struct udphdr));
355 if (m == NULL) {
356 udpstat.udps_hdrops++;
357 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
358 0, 0, 0, 0, 0);
359 return;
360 }
361 ip = mtod(m, struct ip *);
362 }
363 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
364
365 /* destination port of 0 is illegal, based on RFC768. */
366 if (uh->uh_dport == 0) {
367 IF_UDP_STATINC(ifp, port0);
368 goto bad;
369 }
370
371 KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
372 ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
373
374 /*
375 * Make mbuf data length reflect UDP length.
376 * If not enough data to reflect UDP length, drop.
377 */
378 len = ntohs((u_short)uh->uh_ulen);
379 if (ip->ip_len != len) {
380 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
381 udpstat.udps_badlen++;
382 IF_UDP_STATINC(ifp, badlength);
383 goto bad;
384 }
385 m_adj(m, len - ip->ip_len);
386 /* ip->ip_len = len; */
387 }
388 /*
389 * Save a copy of the IP header in case we want restore it
390 * for sending an ICMP error message in response.
391 */
392 save_ip = *ip;
393
394 /*
395 * Checksum extended UDP header and data.
396 */
397 if (udp_input_checksum(m, uh, iphlen, len)) {
398 goto bad;
399 }
400
401 isbroadcast = in_broadcast(ip->ip_dst, ifp);
402
403 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || isbroadcast) {
404 int reuse_sock = 0, mcast_delivered = 0;
405
406 lck_rw_lock_shared(pcbinfo->ipi_lock);
407 /*
408 * Deliver a multicast or broadcast datagram to *all* sockets
409 * for which the local and remote addresses and ports match
410 * those of the incoming datagram. This allows more than
411 * one process to receive multi/broadcasts on the same port.
412 * (This really ought to be done for unicast datagrams as
413 * well, but that would cause problems with existing
414 * applications that open both address-specific sockets and
415 * a wildcard socket listening to the same port -- they would
416 * end up receiving duplicates of every unicast datagram.
417 * Those applications open the multiple sockets to overcome an
418 * inadequacy of the UDP socket interface, but for backwards
419 * compatibility we avoid the problem here rather than
420 * fixing the interface. Maybe 4.5BSD will remedy this?)
421 */
422
423 /*
424 * Construct sockaddr format source address.
425 */
426 udp_in.sin_port = uh->uh_sport;
427 udp_in.sin_addr = ip->ip_src;
428 /*
429 * Locate pcb(s) for datagram.
430 * (Algorithm copied from raw_intr().)
431 */
432 #if INET6
433 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
434 #endif /* INET6 */
435 LIST_FOREACH(inp, &udb, inp_list) {
436 #if IPSEC
437 int skipit;
438 #endif /* IPSEC */
439
440 if (inp->inp_socket == NULL) {
441 continue;
442 }
443 if (inp != sotoinpcb(inp->inp_socket)) {
444 panic("%s: bad so back ptr inp=%p\n",
445 __func__, inp);
446 /* NOTREACHED */
447 }
448 #if INET6
449 if ((inp->inp_vflag & INP_IPV4) == 0) {
450 continue;
451 }
452 #endif /* INET6 */
453 if (inp_restricted_recv(inp, ifp)) {
454 continue;
455 }
456
457 if ((inp->inp_moptions == NULL) &&
458 (ntohl(ip->ip_dst.s_addr) !=
459 INADDR_ALLHOSTS_GROUP) && (isbroadcast == 0)) {
460 continue;
461 }
462
463 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
464 WNT_STOPUSING) {
465 continue;
466 }
467
468 udp_lock(inp->inp_socket, 1, 0);
469
470 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
471 WNT_STOPUSING) {
472 udp_unlock(inp->inp_socket, 1, 0);
473 continue;
474 }
475
476 if (inp->inp_lport != uh->uh_dport) {
477 udp_unlock(inp->inp_socket, 1, 0);
478 continue;
479 }
480 if (inp->inp_laddr.s_addr != INADDR_ANY) {
481 if (inp->inp_laddr.s_addr !=
482 ip->ip_dst.s_addr) {
483 udp_unlock(inp->inp_socket, 1, 0);
484 continue;
485 }
486 }
487 if (inp->inp_faddr.s_addr != INADDR_ANY) {
488 if (inp->inp_faddr.s_addr !=
489 ip->ip_src.s_addr ||
490 inp->inp_fport != uh->uh_sport) {
491 udp_unlock(inp->inp_socket, 1, 0);
492 continue;
493 }
494 }
495
496 if (isbroadcast == 0 && (ntohl(ip->ip_dst.s_addr) !=
497 INADDR_ALLHOSTS_GROUP)) {
498 struct sockaddr_in group;
499 int blocked;
500
501 if ((imo = inp->inp_moptions) == NULL) {
502 udp_unlock(inp->inp_socket, 1, 0);
503 continue;
504 }
505 IMO_LOCK(imo);
506
507 bzero(&group, sizeof(struct sockaddr_in));
508 group.sin_len = sizeof(struct sockaddr_in);
509 group.sin_family = AF_INET;
510 group.sin_addr = ip->ip_dst;
511
512 blocked = imo_multi_filter(imo, ifp,
513 &group, &udp_in);
514 if (blocked == MCAST_PASS) {
515 foundmembership = 1;
516 }
517
518 IMO_UNLOCK(imo);
519 if (!foundmembership) {
520 udp_unlock(inp->inp_socket, 1, 0);
521 if (blocked == MCAST_NOTSMEMBER ||
522 blocked == MCAST_MUTED) {
523 udpstat.udps_filtermcast++;
524 }
525 continue;
526 }
527 foundmembership = 0;
528 }
529
530 reuse_sock = (inp->inp_socket->so_options &
531 (SO_REUSEPORT | SO_REUSEADDR));
532
533 #if NECP
534 skipit = 0;
535 if (!necp_socket_is_allowed_to_send_recv_v4(inp,
536 uh->uh_dport, uh->uh_sport, &ip->ip_dst,
537 &ip->ip_src, ifp, NULL, NULL, NULL)) {
538 /* do not inject data to pcb */
539 skipit = 1;
540 }
541 if (skipit == 0)
542 #endif /* NECP */
543 {
544 struct mbuf *n = NULL;
545
546 if (reuse_sock) {
547 n = m_copy(m, 0, M_COPYALL);
548 }
549 #if INET6
550 udp_append(inp, ip, m,
551 iphlen + sizeof(struct udphdr),
552 &udp_in, &udp_in6, &udp_ip6, ifp);
553 #else /* !INET6 */
554 udp_append(inp, ip, m,
555 iphlen + sizeof(struct udphdr),
556 &udp_in, ifp);
557 #endif /* !INET6 */
558 mcast_delivered++;
559
560 m = n;
561 }
562 udp_unlock(inp->inp_socket, 1, 0);
563
564 /*
565 * Don't look for additional matches if this one does
566 * not have either the SO_REUSEPORT or SO_REUSEADDR
567 * socket options set. This heuristic avoids searching
568 * through all pcbs in the common case of a non-shared
569 * port. It assumes that an application will never
570 * clear these options after setting them.
571 */
572 if (reuse_sock == 0 || m == NULL) {
573 break;
574 }
575
576 /*
577 * Expect 32-bit aligned data pointer on strict-align
578 * platforms.
579 */
580 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
581 /*
582 * Recompute IP and UDP header pointers for new mbuf
583 */
584 ip = mtod(m, struct ip *);
585 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
586 }
587 lck_rw_done(pcbinfo->ipi_lock);
588
589 if (mcast_delivered == 0) {
590 /*
591 * No matching pcb found; discard datagram.
592 * (No need to send an ICMP Port Unreachable
593 * for a broadcast or multicast datgram.)
594 */
595 udpstat.udps_noportbcast++;
596 IF_UDP_STATINC(ifp, port_unreach);
597 goto bad;
598 }
599
600 /* free the extra copy of mbuf or skipped by IPsec */
601 if (m != NULL) {
602 m_freem(m);
603 }
604 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
605 return;
606 }
607
608 #if IPSEC
609 /*
610 * UDP to port 4500 with a payload where the first four bytes are
611 * not zero is a UDP encapsulated IPsec packet. Packets where
612 * the payload is one byte and that byte is 0xFF are NAT keepalive
613 * packets. Decapsulate the ESP packet and carry on with IPsec input
614 * or discard the NAT keep-alive.
615 */
616 if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 &&
617 (uh->uh_dport == ntohs((u_short)esp_udp_encap_port) ||
618 uh->uh_sport == ntohs((u_short)esp_udp_encap_port))) {
619 /*
620 * Check if ESP or keepalive:
621 * 1. If the destination port of the incoming packet is 4500.
622 * 2. If the source port of the incoming packet is 4500,
623 * then check the SADB to match IP address and port.
624 */
625 bool check_esp = true;
626 if (uh->uh_dport != ntohs((u_short)esp_udp_encap_port)) {
627 check_esp = key_checksa_present(AF_INET, (caddr_t)&ip->ip_dst,
628 (caddr_t)&ip->ip_src, uh->uh_dport,
629 uh->uh_sport);
630 }
631
632 if (check_esp) {
633 int payload_len = len - sizeof(struct udphdr) > 4 ? 4 :
634 len - sizeof(struct udphdr);
635
636 if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) {
637 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) +
638 payload_len)) == NULL) {
639 udpstat.udps_hdrops++;
640 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
641 0, 0, 0, 0, 0);
642 return;
643 }
644 /*
645 * Expect 32-bit aligned data pointer on strict-align
646 * platforms.
647 */
648 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
649
650 ip = mtod(m, struct ip *);
651 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
652 }
653 /* Check for NAT keepalive packet */
654 if (payload_len == 1 && *(u_int8_t *)
655 ((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
656 m_freem(m);
657 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
658 0, 0, 0, 0, 0);
659 return;
660 } else if (payload_len == 4 && *(u_int32_t *)(void *)
661 ((caddr_t)uh + sizeof(struct udphdr)) != 0) {
662 /* UDP encapsulated IPsec packet to pass through NAT */
663 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
664 0, 0, 0, 0, 0);
665 /* preserve the udp header */
666 esp4_input(m, iphlen + sizeof(struct udphdr));
667 return;
668 }
669 }
670 }
671 #endif /* IPSEC */
672
673 /*
674 * Locate pcb for datagram.
675 */
676 inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
677 ip->ip_dst, uh->uh_dport, 1, ifp);
678 if (inp == NULL) {
679 IF_UDP_STATINC(ifp, port_unreach);
680
681 if (udp_log_in_vain) {
682 char buf[MAX_IPv4_STR_LEN];
683 char buf2[MAX_IPv4_STR_LEN];
684
685 /* check src and dst address */
686 if (udp_log_in_vain < 3) {
687 log(LOG_INFO, "Connection attempt to "
688 "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET,
689 &ip->ip_dst, buf, sizeof(buf)),
690 ntohs(uh->uh_dport), inet_ntop(AF_INET,
691 &ip->ip_src, buf2, sizeof(buf2)),
692 ntohs(uh->uh_sport));
693 } else if (!(m->m_flags & (M_BCAST | M_MCAST)) &&
694 ip->ip_dst.s_addr != ip->ip_src.s_addr) {
695 log_in_vain_log((LOG_INFO,
696 "Stealth Mode connection attempt to "
697 "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET,
698 &ip->ip_dst, buf, sizeof(buf)),
699 ntohs(uh->uh_dport), inet_ntop(AF_INET,
700 &ip->ip_src, buf2, sizeof(buf2)),
701 ntohs(uh->uh_sport)))
702 }
703 }
704 udpstat.udps_noport++;
705 if (m->m_flags & (M_BCAST | M_MCAST)) {
706 udpstat.udps_noportbcast++;
707 goto bad;
708 }
709 #if ICMP_BANDLIM
710 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) {
711 goto bad;
712 }
713 #endif /* ICMP_BANDLIM */
714 if (blackhole) {
715 if (ifp && ifp->if_type != IFT_LOOP) {
716 goto bad;
717 }
718 }
719 *ip = save_ip;
720 ip->ip_len += iphlen;
721 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
722 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
723 return;
724 }
725 udp_lock(inp->inp_socket, 1, 0);
726
727 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
728 udp_unlock(inp->inp_socket, 1, 0);
729 IF_UDP_STATINC(ifp, cleanup);
730 goto bad;
731 }
732 #if NECP
733 if (!necp_socket_is_allowed_to_send_recv_v4(inp, uh->uh_dport,
734 uh->uh_sport, &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
735 udp_unlock(inp->inp_socket, 1, 0);
736 IF_UDP_STATINC(ifp, badipsec);
737 goto bad;
738 }
739 #endif /* NECP */
740
741 /*
742 * Construct sockaddr format source address.
743 * Stuff source address and datagram in user buffer.
744 */
745 udp_in.sin_port = uh->uh_sport;
746 udp_in.sin_addr = ip->ip_src;
747 if ((inp->inp_flags & INP_CONTROLOPTS) != 0 ||
748 (inp->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
749 (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
750 (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
751 #if INET6
752 if (inp->inp_vflag & INP_IPV6) {
753 int savedflags;
754
755 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
756 savedflags = inp->inp_flags;
757 inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
758 ret = ip6_savecontrol(inp, m, &opts);
759 inp->inp_flags = savedflags;
760 } else
761 #endif /* INET6 */
762 {
763 ret = ip_savecontrol(inp, &opts, ip, m);
764 }
765 if (ret != 0) {
766 udp_unlock(inp->inp_socket, 1, 0);
767 goto bad;
768 }
769 }
770 m_adj(m, iphlen + sizeof(struct udphdr));
771
772 KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport,
773 save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen);
774
775 #if INET6
776 if (inp->inp_vflag & INP_IPV6) {
777 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
778 append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
779 } else
780 #endif /* INET6 */
781 {
782 append_sa = (struct sockaddr *)&udp_in;
783 }
784 if (nstat_collect) {
785 INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1);
786 INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, m->m_pkthdr.len);
787 inp_set_activity_bitmap(inp);
788 }
789 so_recv_data_stat(inp->inp_socket, m, 0);
790 if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa,
791 m, opts, NULL) == 0) {
792 udpstat.udps_fullsock++;
793 } else {
794 sorwakeup(inp->inp_socket);
795 }
796 udp_unlock(inp->inp_socket, 1, 0);
797 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
798 return;
799 bad:
800 m_freem(m);
801 if (opts) {
802 m_freem(opts);
803 }
804 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
805 }
806
807 #if INET6
808 static void
809 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
810 {
811 bzero(ip6, sizeof(*ip6));
812
813 ip6->ip6_vfc = IPV6_VERSION;
814 ip6->ip6_plen = ip->ip_len;
815 ip6->ip6_nxt = ip->ip_p;
816 ip6->ip6_hlim = ip->ip_ttl;
817 if (ip->ip_src.s_addr) {
818 ip6->ip6_src.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
819 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
820 }
821 if (ip->ip_dst.s_addr) {
822 ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
823 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
824 }
825 }
826 #endif /* INET6 */
827
828 /*
829 * subroutine of udp_input(), mainly for source code readability.
830 */
831 static void
832 #if INET6
833 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
834 struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6,
835 struct udp_ip6 *pudp_ip6, struct ifnet *ifp)
836 #else /* !INET6 */
837 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
838 struct sockaddr_in *pudp_in, struct ifnet *ifp)
839 #endif /* !INET6 */
840 {
841 struct sockaddr *append_sa;
842 struct mbuf *opts = 0;
843 boolean_t cell = IFNET_IS_CELLULAR(ifp);
844 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
845 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
846 int ret = 0;
847
848 #if CONFIG_MACF_NET
849 if (mac_inpcb_check_deliver(last, n, AF_INET, SOCK_DGRAM) != 0) {
850 m_freem(n);
851 return;
852 }
853 #endif /* CONFIG_MACF_NET */
854 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
855 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
856 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
857 (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
858 #if INET6
859 if (last->inp_vflag & INP_IPV6) {
860 int savedflags;
861
862 if (pudp_ip6->uip6_init_done == 0) {
863 ip_2_ip6_hdr(&pudp_ip6->uip6_ip6, ip);
864 pudp_ip6->uip6_init_done = 1;
865 }
866 savedflags = last->inp_flags;
867 last->inp_flags &= ~INP_UNMAPPABLEOPTS;
868 ret = ip6_savecontrol(last, n, &opts);
869 if (ret != 0) {
870 last->inp_flags = savedflags;
871 goto error;
872 }
873 last->inp_flags = savedflags;
874 } else
875 #endif /* INET6 */
876 {
877 ret = ip_savecontrol(last, &opts, ip, n);
878 if (ret != 0) {
879 goto error;
880 }
881 }
882 }
883 #if INET6
884 if (last->inp_vflag & INP_IPV6) {
885 if (pudp_in6->uin6_init_done == 0) {
886 in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin);
887 pudp_in6->uin6_init_done = 1;
888 }
889 append_sa = (struct sockaddr *)&pudp_in6->uin6_sin;
890 } else
891 #endif /* INET6 */
892 append_sa = (struct sockaddr *)pudp_in;
893 if (nstat_collect) {
894 INP_ADD_STAT(last, cell, wifi, wired, rxpackets, 1);
895 INP_ADD_STAT(last, cell, wifi, wired, rxbytes,
896 n->m_pkthdr.len);
897 inp_set_activity_bitmap(last);
898 }
899 so_recv_data_stat(last->inp_socket, n, 0);
900 m_adj(n, off);
901 if (sbappendaddr(&last->inp_socket->so_rcv, append_sa,
902 n, opts, NULL) == 0) {
903 udpstat.udps_fullsock++;
904 } else {
905 sorwakeup(last->inp_socket);
906 }
907 return;
908 error:
909 m_freem(n);
910 m_freem(opts);
911 }
912
913 /*
914 * Notify a udp user of an asynchronous error;
915 * just wake up so that he can collect error status.
916 */
917 void
918 udp_notify(struct inpcb *inp, int errno)
919 {
920 inp->inp_socket->so_error = errno;
921 sorwakeup(inp->inp_socket);
922 sowwakeup(inp->inp_socket);
923 }
924
925 void
926 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * ifp)
927 {
928 struct ip *ip = vip;
929 void (*notify)(struct inpcb *, int) = udp_notify;
930 struct in_addr faddr;
931 struct inpcb *inp = NULL;
932
933 faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
934 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
935 return;
936 }
937
938 if (PRC_IS_REDIRECT(cmd)) {
939 ip = 0;
940 notify = in_rtchange;
941 } else if (cmd == PRC_HOSTDEAD) {
942 ip = 0;
943 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
944 return;
945 }
946 if (ip) {
947 struct udphdr uh;
948
949 bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof(uh));
950 inp = in_pcblookup_hash(&udbinfo, faddr, uh.uh_dport,
951 ip->ip_src, uh.uh_sport, 0, NULL);
952 if (inp != NULL && inp->inp_socket != NULL) {
953 udp_lock(inp->inp_socket, 1, 0);
954 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
955 WNT_STOPUSING) {
956 udp_unlock(inp->inp_socket, 1, 0);
957 return;
958 }
959 (*notify)(inp, inetctlerrmap[cmd]);
960 udp_unlock(inp->inp_socket, 1, 0);
961 }
962 } else {
963 in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
964 }
965 }
966
967 int
968 udp_ctloutput(struct socket *so, struct sockopt *sopt)
969 {
970 int error = 0, optval = 0;
971 struct inpcb *inp;
972
973 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
974 if (sopt->sopt_level != IPPROTO_UDP &&
975 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) {
976 return ip_ctloutput(so, sopt);
977 }
978
979 inp = sotoinpcb(so);
980
981 switch (sopt->sopt_dir) {
982 case SOPT_SET:
983 switch (sopt->sopt_name) {
984 case UDP_NOCKSUM:
985 /* This option is settable only for UDP over IPv4 */
986 if (!(inp->inp_vflag & INP_IPV4)) {
987 error = EINVAL;
988 break;
989 }
990
991 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
992 sizeof(optval))) != 0) {
993 break;
994 }
995
996 if (optval != 0) {
997 inp->inp_flags |= INP_UDP_NOCKSUM;
998 } else {
999 inp->inp_flags &= ~INP_UDP_NOCKSUM;
1000 }
1001 break;
1002 case UDP_KEEPALIVE_OFFLOAD:
1003 {
1004 struct udp_keepalive_offload ka;
1005 /*
1006 * If the socket is not connected, the stack will
1007 * not know the destination address to put in the
1008 * keepalive datagram. Return an error now instead
1009 * of failing later.
1010 */
1011 if (!(so->so_state & SS_ISCONNECTED)) {
1012 error = EINVAL;
1013 break;
1014 }
1015 if (sopt->sopt_valsize != sizeof(ka)) {
1016 error = EINVAL;
1017 break;
1018 }
1019 if ((error = sooptcopyin(sopt, &ka, sizeof(ka),
1020 sizeof(ka))) != 0) {
1021 break;
1022 }
1023
1024 /* application should specify the type */
1025 if (ka.ka_type == 0) {
1026 return EINVAL;
1027 }
1028
1029 if (ka.ka_interval == 0) {
1030 /*
1031 * if interval is 0, disable the offload
1032 * mechanism
1033 */
1034 if (inp->inp_keepalive_data != NULL) {
1035 FREE(inp->inp_keepalive_data,
1036 M_TEMP);
1037 }
1038 inp->inp_keepalive_data = NULL;
1039 inp->inp_keepalive_datalen = 0;
1040 inp->inp_keepalive_interval = 0;
1041 inp->inp_keepalive_type = 0;
1042 inp->inp_flags2 &= ~INP2_KEEPALIVE_OFFLOAD;
1043 } else {
1044 if (inp->inp_keepalive_data != NULL) {
1045 FREE(inp->inp_keepalive_data,
1046 M_TEMP);
1047 inp->inp_keepalive_data = NULL;
1048 }
1049
1050 inp->inp_keepalive_datalen = min(
1051 ka.ka_data_len,
1052 UDP_KEEPALIVE_OFFLOAD_DATA_SIZE);
1053 if (inp->inp_keepalive_datalen > 0) {
1054 MALLOC(inp->inp_keepalive_data,
1055 u_int8_t *,
1056 inp->inp_keepalive_datalen,
1057 M_TEMP, M_WAITOK);
1058 if (inp->inp_keepalive_data == NULL) {
1059 inp->inp_keepalive_datalen = 0;
1060 error = ENOMEM;
1061 break;
1062 }
1063 bcopy(ka.ka_data,
1064 inp->inp_keepalive_data,
1065 inp->inp_keepalive_datalen);
1066 } else {
1067 inp->inp_keepalive_datalen = 0;
1068 }
1069 inp->inp_keepalive_interval =
1070 min(UDP_KEEPALIVE_INTERVAL_MAX_SECONDS,
1071 ka.ka_interval);
1072 inp->inp_keepalive_type = ka.ka_type;
1073 inp->inp_flags2 |= INP2_KEEPALIVE_OFFLOAD;
1074 }
1075 break;
1076 }
1077 case SO_FLUSH:
1078 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
1079 sizeof(optval))) != 0) {
1080 break;
1081 }
1082
1083 error = inp_flush(inp, optval);
1084 break;
1085
1086 default:
1087 error = ENOPROTOOPT;
1088 break;
1089 }
1090 break;
1091
1092 case SOPT_GET:
1093 switch (sopt->sopt_name) {
1094 case UDP_NOCKSUM:
1095 optval = inp->inp_flags & INP_UDP_NOCKSUM;
1096 break;
1097
1098 default:
1099 error = ENOPROTOOPT;
1100 break;
1101 }
1102 if (error == 0) {
1103 error = sooptcopyout(sopt, &optval, sizeof(optval));
1104 }
1105 break;
1106 }
1107 return error;
1108 }
1109
1110 static int
1111 udp_pcblist SYSCTL_HANDLER_ARGS
1112 {
1113 #pragma unused(oidp, arg1, arg2)
1114 int error, i, n;
1115 struct inpcb *inp, **inp_list;
1116 inp_gen_t gencnt;
1117 struct xinpgen xig;
1118
1119 /*
1120 * The process of preparing the TCB list is too time-consuming and
1121 * resource-intensive to repeat twice on every request.
1122 */
1123 lck_rw_lock_exclusive(udbinfo.ipi_lock);
1124 if (req->oldptr == USER_ADDR_NULL) {
1125 n = udbinfo.ipi_count;
1126 req->oldidx = 2 * (sizeof(xig))
1127 + (n + n / 8) * sizeof(struct xinpcb);
1128 lck_rw_done(udbinfo.ipi_lock);
1129 return 0;
1130 }
1131
1132 if (req->newptr != USER_ADDR_NULL) {
1133 lck_rw_done(udbinfo.ipi_lock);
1134 return EPERM;
1135 }
1136
1137 /*
1138 * OK, now we're committed to doing something.
1139 */
1140 gencnt = udbinfo.ipi_gencnt;
1141 n = udbinfo.ipi_count;
1142
1143 bzero(&xig, sizeof(xig));
1144 xig.xig_len = sizeof(xig);
1145 xig.xig_count = n;
1146 xig.xig_gen = gencnt;
1147 xig.xig_sogen = so_gencnt;
1148 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1149 if (error) {
1150 lck_rw_done(udbinfo.ipi_lock);
1151 return error;
1152 }
1153 /*
1154 * We are done if there is no pcb
1155 */
1156 if (n == 0) {
1157 lck_rw_done(udbinfo.ipi_lock);
1158 return 0;
1159 }
1160
1161 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1162 if (inp_list == 0) {
1163 lck_rw_done(udbinfo.ipi_lock);
1164 return ENOMEM;
1165 }
1166
1167 for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
1168 inp = LIST_NEXT(inp, inp_list)) {
1169 if (inp->inp_gencnt <= gencnt &&
1170 inp->inp_state != INPCB_STATE_DEAD) {
1171 inp_list[i++] = inp;
1172 }
1173 }
1174 n = i;
1175
1176 error = 0;
1177 for (i = 0; i < n; i++) {
1178 struct xinpcb xi;
1179
1180 inp = inp_list[i];
1181
1182 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1183 continue;
1184 }
1185 udp_lock(inp->inp_socket, 1, 0);
1186 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1187 udp_unlock(inp->inp_socket, 1, 0);
1188 continue;
1189 }
1190 if (inp->inp_gencnt > gencnt) {
1191 udp_unlock(inp->inp_socket, 1, 0);
1192 continue;
1193 }
1194
1195 bzero(&xi, sizeof(xi));
1196 xi.xi_len = sizeof(xi);
1197 /* XXX should avoid extra copy */
1198 inpcb_to_compat(inp, &xi.xi_inp);
1199 if (inp->inp_socket) {
1200 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1201 }
1202
1203 udp_unlock(inp->inp_socket, 1, 0);
1204
1205 error = SYSCTL_OUT(req, &xi, sizeof(xi));
1206 }
1207 if (!error) {
1208 /*
1209 * Give the user an updated idea of our state.
1210 * If the generation differs from what we told
1211 * her before, she knows that something happened
1212 * while we were processing this request, and it
1213 * might be necessary to retry.
1214 */
1215 bzero(&xig, sizeof(xig));
1216 xig.xig_len = sizeof(xig);
1217 xig.xig_gen = udbinfo.ipi_gencnt;
1218 xig.xig_sogen = so_gencnt;
1219 xig.xig_count = udbinfo.ipi_count;
1220 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1221 }
1222 FREE(inp_list, M_TEMP);
1223 lck_rw_done(udbinfo.ipi_lock);
1224 return error;
1225 }
1226
1227 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
1228 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist,
1229 "S,xinpcb", "List of active UDP sockets");
1230
1231 #if !CONFIG_EMBEDDED
1232
1233 static int
1234 udp_pcblist64 SYSCTL_HANDLER_ARGS
1235 {
1236 #pragma unused(oidp, arg1, arg2)
1237 int error, i, n;
1238 struct inpcb *inp, **inp_list;
1239 inp_gen_t gencnt;
1240 struct xinpgen xig;
1241
1242 /*
1243 * The process of preparing the TCB list is too time-consuming and
1244 * resource-intensive to repeat twice on every request.
1245 */
1246 lck_rw_lock_shared(udbinfo.ipi_lock);
1247 if (req->oldptr == USER_ADDR_NULL) {
1248 n = udbinfo.ipi_count;
1249 req->oldidx =
1250 2 * (sizeof(xig)) + (n + n / 8) * sizeof(struct xinpcb64);
1251 lck_rw_done(udbinfo.ipi_lock);
1252 return 0;
1253 }
1254
1255 if (req->newptr != USER_ADDR_NULL) {
1256 lck_rw_done(udbinfo.ipi_lock);
1257 return EPERM;
1258 }
1259
1260 /*
1261 * OK, now we're committed to doing something.
1262 */
1263 gencnt = udbinfo.ipi_gencnt;
1264 n = udbinfo.ipi_count;
1265
1266 bzero(&xig, sizeof(xig));
1267 xig.xig_len = sizeof(xig);
1268 xig.xig_count = n;
1269 xig.xig_gen = gencnt;
1270 xig.xig_sogen = so_gencnt;
1271 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1272 if (error) {
1273 lck_rw_done(udbinfo.ipi_lock);
1274 return error;
1275 }
1276 /*
1277 * We are done if there is no pcb
1278 */
1279 if (n == 0) {
1280 lck_rw_done(udbinfo.ipi_lock);
1281 return 0;
1282 }
1283
1284 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1285 if (inp_list == 0) {
1286 lck_rw_done(udbinfo.ipi_lock);
1287 return ENOMEM;
1288 }
1289
1290 for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
1291 inp = LIST_NEXT(inp, inp_list)) {
1292 if (inp->inp_gencnt <= gencnt &&
1293 inp->inp_state != INPCB_STATE_DEAD) {
1294 inp_list[i++] = inp;
1295 }
1296 }
1297 n = i;
1298
1299 error = 0;
1300 for (i = 0; i < n; i++) {
1301 struct xinpcb64 xi;
1302
1303 inp = inp_list[i];
1304
1305 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1306 continue;
1307 }
1308 udp_lock(inp->inp_socket, 1, 0);
1309 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1310 udp_unlock(inp->inp_socket, 1, 0);
1311 continue;
1312 }
1313 if (inp->inp_gencnt > gencnt) {
1314 udp_unlock(inp->inp_socket, 1, 0);
1315 continue;
1316 }
1317
1318 bzero(&xi, sizeof(xi));
1319 xi.xi_len = sizeof(xi);
1320 inpcb_to_xinpcb64(inp, &xi);
1321 if (inp->inp_socket) {
1322 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1323 }
1324
1325 udp_unlock(inp->inp_socket, 1, 0);
1326
1327 error = SYSCTL_OUT(req, &xi, sizeof(xi));
1328 }
1329 if (!error) {
1330 /*
1331 * Give the user an updated idea of our state.
1332 * If the generation differs from what we told
1333 * her before, she knows that something happened
1334 * while we were processing this request, and it
1335 * might be necessary to retry.
1336 */
1337 bzero(&xig, sizeof(xig));
1338 xig.xig_len = sizeof(xig);
1339 xig.xig_gen = udbinfo.ipi_gencnt;
1340 xig.xig_sogen = so_gencnt;
1341 xig.xig_count = udbinfo.ipi_count;
1342 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1343 }
1344 FREE(inp_list, M_TEMP);
1345 lck_rw_done(udbinfo.ipi_lock);
1346 return error;
1347 }
1348
1349 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64,
1350 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist64,
1351 "S,xinpcb64", "List of active UDP sockets");
1352
1353 #endif /* !CONFIG_EMBEDDED */
1354
1355 static int
1356 udp_pcblist_n SYSCTL_HANDLER_ARGS
1357 {
1358 #pragma unused(oidp, arg1, arg2)
1359 return get_pcblist_n(IPPROTO_UDP, req, &udbinfo);
1360 }
1361
1362 SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist_n,
1363 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist_n,
1364 "S,xinpcb_n", "List of active UDP sockets");
1365
1366 __private_extern__ void
1367 udp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags,
1368 bitstr_t *bitfield)
1369 {
1370 inpcb_get_ports_used(ifindex, protocol, flags, bitfield,
1371 &udbinfo);
1372 }
1373
1374 __private_extern__ uint32_t
1375 udp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
1376 {
1377 return inpcb_count_opportunistic(ifindex, &udbinfo, flags);
1378 }
1379
1380 __private_extern__ uint32_t
1381 udp_find_anypcb_byaddr(struct ifaddr *ifa)
1382 {
1383 return inpcb_find_anypcb_byaddr(ifa, &udbinfo);
1384 }
1385
1386 static int
1387 udp_check_pktinfo(struct mbuf *control, struct ifnet **outif,
1388 struct in_addr *laddr)
1389 {
1390 struct cmsghdr *cm = 0;
1391 struct in_pktinfo *pktinfo;
1392 struct ifnet *ifp;
1393
1394 if (outif != NULL) {
1395 *outif = NULL;
1396 }
1397
1398 /*
1399 * XXX: Currently, we assume all the optional information is stored
1400 * in a single mbuf.
1401 */
1402 if (control->m_next) {
1403 return EINVAL;
1404 }
1405
1406 if (control->m_len < CMSG_LEN(0)) {
1407 return EINVAL;
1408 }
1409
1410 for (cm = M_FIRST_CMSGHDR(control);
1411 is_cmsg_valid(control, cm);
1412 cm = M_NXT_CMSGHDR(control, cm)) {
1413 if (cm->cmsg_level != IPPROTO_IP ||
1414 cm->cmsg_type != IP_PKTINFO) {
1415 continue;
1416 }
1417
1418 if (cm->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) {
1419 return EINVAL;
1420 }
1421
1422 pktinfo = (struct in_pktinfo *)(void *)CMSG_DATA(cm);
1423
1424 /* Check for a valid ifindex in pktinfo */
1425 ifnet_head_lock_shared();
1426
1427 if (pktinfo->ipi_ifindex > if_index) {
1428 ifnet_head_done();
1429 return ENXIO;
1430 }
1431
1432 /*
1433 * If ipi_ifindex is specified it takes precedence
1434 * over ipi_spec_dst.
1435 */
1436 if (pktinfo->ipi_ifindex) {
1437 ifp = ifindex2ifnet[pktinfo->ipi_ifindex];
1438 if (ifp == NULL) {
1439 ifnet_head_done();
1440 return ENXIO;
1441 }
1442 if (outif != NULL) {
1443 ifnet_reference(ifp);
1444 *outif = ifp;
1445 }
1446 ifnet_head_done();
1447 laddr->s_addr = INADDR_ANY;
1448 break;
1449 }
1450
1451 ifnet_head_done();
1452
1453 /*
1454 * Use the provided ipi_spec_dst address for temp
1455 * source address.
1456 */
1457 *laddr = pktinfo->ipi_spec_dst;
1458 break;
1459 }
1460 return 0;
1461 }
1462
1463 int
1464 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
1465 struct mbuf *control, struct proc *p)
1466 {
1467 struct udpiphdr *ui;
1468 int len = m->m_pkthdr.len;
1469 struct sockaddr_in *sin;
1470 struct in_addr origladdr, laddr, faddr, pi_laddr;
1471 u_short lport, fport;
1472 int error = 0, udp_dodisconnect = 0, pktinfo = 0;
1473 struct socket *so = inp->inp_socket;
1474 int soopts = 0;
1475 struct mbuf *inpopts;
1476 struct ip_moptions *mopts;
1477 struct route ro;
1478 struct ip_out_args ipoa;
1479 #if CONTENT_FILTER
1480 struct m_tag *cfil_tag = NULL;
1481 bool cfil_faddr_use = false;
1482 uint32_t cfil_so_state_change_cnt = 0;
1483 short cfil_so_options = 0;
1484 struct sockaddr *cfil_faddr = NULL;
1485 #endif
1486
1487 bzero(&ipoa, sizeof(ipoa));
1488 ipoa.ipoa_boundif = IFSCOPE_NONE;
1489 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
1490
1491 struct ifnet *outif = NULL;
1492 struct flowadv *adv = &ipoa.ipoa_flowadv;
1493 int sotc = SO_TC_UNSPEC;
1494 int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1495 struct ifnet *origoutifp = NULL;
1496 int flowadv = 0;
1497 int tos = IPTOS_UNSPEC;
1498
1499 /* Enable flow advisory only when connected */
1500 flowadv = (so->so_state & SS_ISCONNECTED) ? 1 : 0;
1501 pi_laddr.s_addr = INADDR_ANY;
1502
1503 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1504
1505 socket_lock_assert_owned(so);
1506
1507 #if CONTENT_FILTER
1508 /*
1509 * If socket is subject to UDP Content Filter and no addr is passed in,
1510 * retrieve CFIL saved state from mbuf and use it if necessary.
1511 */
1512 if (so->so_cfil_db && !addr) {
1513 cfil_tag = cfil_udp_get_socket_state(m, &cfil_so_state_change_cnt, &cfil_so_options, &cfil_faddr);
1514 if (cfil_tag) {
1515 sin = (struct sockaddr_in *)(void *)cfil_faddr;
1516 if (inp && inp->inp_faddr.s_addr == INADDR_ANY) {
1517 /*
1518 * Socket is unconnected, simply use the saved faddr as 'addr' to go through
1519 * the connect/disconnect logic.
1520 */
1521 addr = (struct sockaddr *)cfil_faddr;
1522 } else if ((so->so_state_change_cnt != cfil_so_state_change_cnt) &&
1523 (inp->inp_fport != sin->sin_port ||
1524 inp->inp_faddr.s_addr != sin->sin_addr.s_addr)) {
1525 /*
1526 * Socket is connected but socket state and dest addr/port changed.
1527 * We need to use the saved faddr info.
1528 */
1529 cfil_faddr_use = true;
1530 }
1531 }
1532 }
1533 #endif
1534
1535 if (control != NULL) {
1536 tos = so_tos_from_control(control);
1537 sotc = so_tc_from_control(control, &netsvctype);
1538 VERIFY(outif == NULL);
1539 error = udp_check_pktinfo(control, &outif, &pi_laddr);
1540 m_freem(control);
1541 control = NULL;
1542 if (error) {
1543 goto release;
1544 }
1545 pktinfo++;
1546 if (outif != NULL) {
1547 ipoa.ipoa_boundif = outif->if_index;
1548 }
1549 }
1550 if (sotc == SO_TC_UNSPEC) {
1551 sotc = so->so_traffic_class;
1552 netsvctype = so->so_netsvctype;
1553 }
1554
1555 KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
1556 inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
1557 (htons((u_short)len + sizeof(struct udphdr))));
1558
1559 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1560 error = EMSGSIZE;
1561 goto release;
1562 }
1563
1564 if (flowadv && INP_WAIT_FOR_IF_FEEDBACK(inp)) {
1565 /*
1566 * The socket is flow-controlled, drop the packets
1567 * until the inp is not flow controlled
1568 */
1569 error = ENOBUFS;
1570 goto release;
1571 }
1572 /*
1573 * If socket was bound to an ifindex, tell ip_output about it.
1574 * If the ancillary IP_PKTINFO option contains an interface index,
1575 * it takes precedence over the one specified by IP_BOUND_IF.
1576 */
1577 if (ipoa.ipoa_boundif == IFSCOPE_NONE &&
1578 (inp->inp_flags & INP_BOUND_IF)) {
1579 VERIFY(inp->inp_boundifp != NULL);
1580 ifnet_reference(inp->inp_boundifp); /* for this routine */
1581 if (outif != NULL) {
1582 ifnet_release(outif);
1583 }
1584 outif = inp->inp_boundifp;
1585 ipoa.ipoa_boundif = outif->if_index;
1586 }
1587 if (INP_NO_CELLULAR(inp)) {
1588 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1589 }
1590 if (INP_NO_EXPENSIVE(inp)) {
1591 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1592 }
1593 if (INP_NO_CONSTRAINED(inp)) {
1594 ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1595 }
1596 if (INP_AWDL_UNRESTRICTED(inp)) {
1597 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1598 }
1599 ipoa.ipoa_sotc = sotc;
1600 ipoa.ipoa_netsvctype = netsvctype;
1601 soopts |= IP_OUTARGS;
1602
1603 /*
1604 * If there was a routing change, discard cached route and check
1605 * that we have a valid source address. Reacquire a new source
1606 * address if INADDR_ANY was specified.
1607 *
1608 * If we are using cfil saved state, go through this cache cleanup
1609 * so that we can get a new route.
1610 */
1611 if (ROUTE_UNUSABLE(&inp->inp_route)
1612 #if CONTENT_FILTER
1613 || cfil_faddr_use
1614 #endif
1615 ) {
1616 struct in_ifaddr *ia = NULL;
1617
1618 ROUTE_RELEASE(&inp->inp_route);
1619
1620 /* src address is gone? */
1621 if (inp->inp_laddr.s_addr != INADDR_ANY &&
1622 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
1623 if (!(inp->inp_flags & INP_INADDR_ANY) ||
1624 (so->so_state & SS_ISCONNECTED)) {
1625 /*
1626 * Rdar://5448998
1627 * If the source address is gone, return an
1628 * error if:
1629 * - the source was specified
1630 * - the socket was already connected
1631 */
1632 soevent(so, (SO_FILT_HINT_LOCKED |
1633 SO_FILT_HINT_NOSRCADDR));
1634 error = EADDRNOTAVAIL;
1635 goto release;
1636 } else {
1637 /* new src will be set later */
1638 inp->inp_laddr.s_addr = INADDR_ANY;
1639 inp->inp_last_outifp = NULL;
1640 }
1641 }
1642 if (ia != NULL) {
1643 IFA_REMREF(&ia->ia_ifa);
1644 }
1645 }
1646
1647 /*
1648 * IP_PKTINFO option check. If a temporary scope or src address
1649 * is provided, use it for this packet only and make sure we forget
1650 * it after sending this datagram.
1651 */
1652 if (pi_laddr.s_addr != INADDR_ANY ||
1653 (ipoa.ipoa_boundif != IFSCOPE_NONE && pktinfo)) {
1654 /* temp src address for this datagram only */
1655 laddr = pi_laddr;
1656 origladdr.s_addr = INADDR_ANY;
1657 /* we don't want to keep the laddr or route */
1658 udp_dodisconnect = 1;
1659 /* remember we don't care about src addr */
1660 inp->inp_flags |= INP_INADDR_ANY;
1661 } else {
1662 origladdr = laddr = inp->inp_laddr;
1663 }
1664
1665 origoutifp = inp->inp_last_outifp;
1666 faddr = inp->inp_faddr;
1667 lport = inp->inp_lport;
1668 fport = inp->inp_fport;
1669
1670 #if CONTENT_FILTER
1671 if (cfil_faddr_use) {
1672 faddr = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_addr;
1673 fport = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_port;
1674 }
1675 #endif
1676
1677 if (addr) {
1678 sin = (struct sockaddr_in *)(void *)addr;
1679 if (faddr.s_addr != INADDR_ANY) {
1680 error = EISCONN;
1681 goto release;
1682 }
1683 if (lport == 0) {
1684 /*
1685 * In case we don't have a local port set, go through
1686 * the full connect. We don't have a local port yet
1687 * (i.e., we can't be looked up), so it's not an issue
1688 * if the input runs at the same time we do this.
1689 */
1690 /* if we have a source address specified, use that */
1691 if (pi_laddr.s_addr != INADDR_ANY) {
1692 inp->inp_laddr = pi_laddr;
1693 }
1694 /*
1695 * If a scope is specified, use it. Scope from
1696 * IP_PKTINFO takes precendence over the the scope
1697 * set via INP_BOUND_IF.
1698 */
1699 error = in_pcbconnect(inp, addr, p, ipoa.ipoa_boundif,
1700 &outif);
1701 if (error) {
1702 goto release;
1703 }
1704
1705 laddr = inp->inp_laddr;
1706 lport = inp->inp_lport;
1707 faddr = inp->inp_faddr;
1708 fport = inp->inp_fport;
1709 udp_dodisconnect = 1;
1710
1711 /* synch up in case in_pcbladdr() overrides */
1712 if (outif != NULL && ipoa.ipoa_boundif != IFSCOPE_NONE) {
1713 ipoa.ipoa_boundif = outif->if_index;
1714 }
1715 } else {
1716 /*
1717 * Fast path case
1718 *
1719 * We have a full address and a local port; use those
1720 * info to build the packet without changing the pcb
1721 * and interfering with the input path. See 3851370.
1722 *
1723 * Scope from IP_PKTINFO takes precendence over the
1724 * the scope set via INP_BOUND_IF.
1725 */
1726 if (laddr.s_addr == INADDR_ANY) {
1727 if ((error = in_pcbladdr(inp, addr, &laddr,
1728 ipoa.ipoa_boundif, &outif, 0)) != 0) {
1729 goto release;
1730 }
1731 /*
1732 * from pcbconnect: remember we don't
1733 * care about src addr.
1734 */
1735 inp->inp_flags |= INP_INADDR_ANY;
1736
1737 /* synch up in case in_pcbladdr() overrides */
1738 if (outif != NULL &&
1739 ipoa.ipoa_boundif != IFSCOPE_NONE) {
1740 ipoa.ipoa_boundif = outif->if_index;
1741 }
1742 }
1743
1744 faddr = sin->sin_addr;
1745 fport = sin->sin_port;
1746 }
1747 } else {
1748 if (faddr.s_addr == INADDR_ANY) {
1749 error = ENOTCONN;
1750 goto release;
1751 }
1752 }
1753
1754 #if CONFIG_MACF_NET
1755 mac_mbuf_label_associate_inpcb(inp, m);
1756 #endif /* CONFIG_MACF_NET */
1757
1758 if (inp->inp_flowhash == 0) {
1759 inp->inp_flowhash = inp_calc_flowhash(inp);
1760 }
1761
1762 if (fport == htons(53) && !(so->so_flags1 & SOF1_DNS_COUNTED)) {
1763 so->so_flags1 |= SOF1_DNS_COUNTED;
1764 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_dns);
1765 }
1766
1767 /*
1768 * Calculate data length and get a mbuf
1769 * for UDP and IP headers.
1770 */
1771 M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT, 1);
1772 if (m == 0) {
1773 error = ENOBUFS;
1774 goto abort;
1775 }
1776
1777 /*
1778 * Fill in mbuf with extended UDP header
1779 * and addresses and length put into network format.
1780 */
1781 ui = mtod(m, struct udpiphdr *);
1782 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1783 ui->ui_pr = IPPROTO_UDP;
1784 ui->ui_src = laddr;
1785 ui->ui_dst = faddr;
1786 ui->ui_sport = lport;
1787 ui->ui_dport = fport;
1788 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1789
1790 /*
1791 * Set up checksum to pseudo header checksum and output datagram.
1792 *
1793 * Treat flows to be CLAT46'd as IPv6 flow and compute checksum
1794 * no matter what, as IPv6 mandates checksum for UDP.
1795 *
1796 * Here we only compute the one's complement sum of the pseudo header.
1797 * The payload computation and final complement is delayed to much later
1798 * in IP processing to decide if remaining computation needs to be done
1799 * through offload.
1800 *
1801 * That is communicated by setting CSUM_UDP in csum_flags.
1802 * The offset of checksum from the start of ULP header is communicated
1803 * through csum_data.
1804 *
1805 * Note since this already contains the pseudo checksum header, any
1806 * later operation at IP layer that modify the values used here must
1807 * update the checksum as well (for example NAT etc).
1808 */
1809 if ((inp->inp_flags2 & INP2_CLAT46_FLOW) ||
1810 (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM))) {
1811 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
1812 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1813 m->m_pkthdr.csum_flags = (CSUM_UDP | CSUM_ZERO_INVERT);
1814 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1815 } else {
1816 ui->ui_sum = 0;
1817 }
1818 ((struct ip *)ui)->ip_len = sizeof(struct udpiphdr) + len;
1819 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1820 if (tos != IPTOS_UNSPEC) {
1821 ((struct ip *)ui)->ip_tos = (uint8_t)(tos & IPTOS_MASK);
1822 } else {
1823 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1824 }
1825 udpstat.udps_opackets++;
1826
1827 KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport,
1828 ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen);
1829
1830 #if NECP
1831 {
1832 necp_kernel_policy_id policy_id;
1833 necp_kernel_policy_id skip_policy_id;
1834 u_int32_t route_rule_id;
1835
1836 /*
1837 * We need a route to perform NECP route rule checks
1838 */
1839 if (net_qos_policy_restricted != 0 &&
1840 ROUTE_UNUSABLE(&inp->inp_route)) {
1841 struct sockaddr_in to;
1842 struct sockaddr_in from;
1843
1844 ROUTE_RELEASE(&inp->inp_route);
1845
1846 bzero(&from, sizeof(struct sockaddr_in));
1847 from.sin_family = AF_INET;
1848 from.sin_len = sizeof(struct sockaddr_in);
1849 from.sin_addr = laddr;
1850
1851 bzero(&to, sizeof(struct sockaddr_in));
1852 to.sin_family = AF_INET;
1853 to.sin_len = sizeof(struct sockaddr_in);
1854 to.sin_addr = faddr;
1855
1856 inp->inp_route.ro_dst.sa_family = AF_INET;
1857 inp->inp_route.ro_dst.sa_len = sizeof(struct sockaddr_in);
1858 ((struct sockaddr_in *)(void *)&inp->inp_route.ro_dst)->sin_addr =
1859 faddr;
1860
1861 rtalloc_scoped(&inp->inp_route, ipoa.ipoa_boundif);
1862
1863 inp_update_necp_policy(inp, (struct sockaddr *)&from,
1864 (struct sockaddr *)&to, ipoa.ipoa_boundif);
1865 inp->inp_policyresult.results.qos_marking_gencount = 0;
1866 }
1867
1868 if (!necp_socket_is_allowed_to_send_recv_v4(inp, lport, fport,
1869 &laddr, &faddr, NULL, &policy_id, &route_rule_id, &skip_policy_id)) {
1870 error = EHOSTUNREACH;
1871 goto abort;
1872 }
1873
1874 necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id);
1875
1876 if (net_qos_policy_restricted != 0) {
1877 necp_socket_update_qos_marking(inp,
1878 inp->inp_route.ro_rt, NULL, route_rule_id);
1879 }
1880 }
1881 #endif /* NECP */
1882 if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1883 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1884 }
1885
1886 #if IPSEC
1887 if (inp->inp_sp != NULL && ipsec_setsocket(m, inp->inp_socket) != 0) {
1888 error = ENOBUFS;
1889 goto abort;
1890 }
1891 #endif /* IPSEC */
1892
1893 inpopts = inp->inp_options;
1894 #if CONTENT_FILTER
1895 if (cfil_tag && (inp->inp_socket->so_options != cfil_so_options)) {
1896 soopts |= (cfil_so_options & (SO_DONTROUTE | SO_BROADCAST));
1897 } else
1898 #endif
1899 soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST));
1900
1901 mopts = inp->inp_moptions;
1902 if (mopts != NULL) {
1903 IMO_LOCK(mopts);
1904 IMO_ADDREF_LOCKED(mopts);
1905 if (IN_MULTICAST(ntohl(ui->ui_dst.s_addr)) &&
1906 mopts->imo_multicast_ifp != NULL) {
1907 /* no reference needed */
1908 inp->inp_last_outifp = mopts->imo_multicast_ifp;
1909 }
1910 IMO_UNLOCK(mopts);
1911 }
1912
1913 /* Copy the cached route and take an extra reference */
1914 inp_route_copyout(inp, &ro);
1915
1916 set_packet_service_class(m, so, sotc, 0);
1917 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
1918 m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
1919 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
1920 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC);
1921 if (flowadv) {
1922 m->m_pkthdr.pkt_flags |= PKTF_FLOW_ADV;
1923 }
1924 m->m_pkthdr.tx_udp_pid = so->last_pid;
1925 if (so->so_flags & SOF_DELEGATED) {
1926 m->m_pkthdr.tx_udp_e_pid = so->e_pid;
1927 } else {
1928 m->m_pkthdr.tx_udp_e_pid = 0;
1929 }
1930
1931 if (ipoa.ipoa_boundif != IFSCOPE_NONE) {
1932 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1933 }
1934
1935 if (laddr.s_addr != INADDR_ANY) {
1936 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
1937 }
1938
1939 inp->inp_sndinprog_cnt++;
1940
1941 socket_unlock(so, 0);
1942 error = ip_output(m, inpopts, &ro, soopts, mopts, &ipoa);
1943 m = NULL;
1944 socket_lock(so, 0);
1945 if (mopts != NULL) {
1946 IMO_REMREF(mopts);
1947 }
1948
1949 if (error == 0 && nstat_collect) {
1950 boolean_t cell, wifi, wired;
1951
1952 if (ro.ro_rt != NULL) {
1953 cell = IFNET_IS_CELLULAR(ro.ro_rt->rt_ifp);
1954 wifi = (!cell && IFNET_IS_WIFI(ro.ro_rt->rt_ifp));
1955 wired = (!wifi && IFNET_IS_WIRED(ro.ro_rt->rt_ifp));
1956 } else {
1957 cell = wifi = wired = FALSE;
1958 }
1959 INP_ADD_STAT(inp, cell, wifi, wired, txpackets, 1);
1960 INP_ADD_STAT(inp, cell, wifi, wired, txbytes, len);
1961 inp_set_activity_bitmap(inp);
1962 }
1963
1964 if (flowadv && (adv->code == FADV_FLOW_CONTROLLED ||
1965 adv->code == FADV_SUSPENDED)) {
1966 /*
1967 * return a hint to the application that
1968 * the packet has been dropped
1969 */
1970 error = ENOBUFS;
1971 inp_set_fc_state(inp, adv->code);
1972 }
1973
1974 VERIFY(inp->inp_sndinprog_cnt > 0);
1975 if (--inp->inp_sndinprog_cnt == 0) {
1976 inp->inp_flags &= ~(INP_FC_FEEDBACK);
1977 if (inp->inp_sndingprog_waiters > 0) {
1978 wakeup(&inp->inp_sndinprog_cnt);
1979 }
1980 }
1981
1982 /* Synchronize PCB cached route */
1983 inp_route_copyin(inp, &ro);
1984
1985 abort:
1986 if (udp_dodisconnect) {
1987 /* Always discard the cached route for unconnected socket */
1988 ROUTE_RELEASE(&inp->inp_route);
1989 in_pcbdisconnect(inp);
1990 inp->inp_laddr = origladdr; /* XXX rehash? */
1991 /* no reference needed */
1992 inp->inp_last_outifp = origoutifp;
1993 } else if (inp->inp_route.ro_rt != NULL) {
1994 struct rtentry *rt = inp->inp_route.ro_rt;
1995 struct ifnet *outifp;
1996
1997 if (rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) {
1998 rt = NULL; /* unusable */
1999 }
2000 #if CONTENT_FILTER
2001 /*
2002 * Discard temporary route for cfil case
2003 */
2004 if (cfil_faddr_use) {
2005 rt = NULL; /* unusable */
2006 }
2007 #endif
2008
2009 /*
2010 * Always discard if it is a multicast or broadcast route.
2011 */
2012 if (rt == NULL) {
2013 ROUTE_RELEASE(&inp->inp_route);
2014 }
2015
2016 /*
2017 * If the destination route is unicast, update outifp with
2018 * that of the route interface used by IP.
2019 */
2020 if (rt != NULL &&
2021 (outifp = rt->rt_ifp) != inp->inp_last_outifp) {
2022 inp->inp_last_outifp = outifp; /* no reference needed */
2023
2024 so->so_pktheadroom = P2ROUNDUP(
2025 sizeof(struct udphdr) +
2026 sizeof(struct ip) +
2027 ifnet_hdrlen(outifp) +
2028 ifnet_mbuf_packetpreamblelen(outifp),
2029 sizeof(u_int32_t));
2030 }
2031 } else {
2032 ROUTE_RELEASE(&inp->inp_route);
2033 }
2034
2035 /*
2036 * If output interface was cellular/expensive, and this socket is
2037 * denied access to it, generate an event.
2038 */
2039 if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
2040 (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) {
2041 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
2042 }
2043
2044 release:
2045 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0);
2046
2047 if (m != NULL) {
2048 m_freem(m);
2049 }
2050
2051 if (outif != NULL) {
2052 ifnet_release(outif);
2053 }
2054
2055 #if CONTENT_FILTER
2056 if (cfil_tag) {
2057 m_tag_free(cfil_tag);
2058 }
2059 #endif
2060
2061 return error;
2062 }
2063
2064 u_int32_t udp_sendspace = 9216; /* really max datagram size */
2065 /* 187 1K datagrams (approx 192 KB) */
2066 u_int32_t udp_recvspace = 187 * (1024 +
2067 #if INET6
2068 sizeof(struct sockaddr_in6)
2069 #else /* !INET6 */
2070 sizeof(struct sockaddr_in)
2071 #endif /* !INET6 */
2072 );
2073
2074 /* Check that the values of udp send and recv space do not exceed sb_max */
2075 static int
2076 sysctl_udp_sospace(struct sysctl_oid *oidp, void *arg1, int arg2,
2077 struct sysctl_req *req)
2078 {
2079 #pragma unused(arg1, arg2)
2080 u_int32_t new_value = 0, *space_p = NULL;
2081 int changed = 0, error = 0;
2082 u_quad_t sb_effective_max = (sb_max / (MSIZE + MCLBYTES)) * MCLBYTES;
2083
2084 switch (oidp->oid_number) {
2085 case UDPCTL_RECVSPACE:
2086 space_p = &udp_recvspace;
2087 break;
2088 case UDPCTL_MAXDGRAM:
2089 space_p = &udp_sendspace;
2090 break;
2091 default:
2092 return EINVAL;
2093 }
2094 error = sysctl_io_number(req, *space_p, sizeof(u_int32_t),
2095 &new_value, &changed);
2096 if (changed) {
2097 if (new_value > 0 && new_value <= sb_effective_max) {
2098 *space_p = new_value;
2099 } else {
2100 error = ERANGE;
2101 }
2102 }
2103 return error;
2104 }
2105
2106 SYSCTL_PROC(_net_inet_udp, UDPCTL_RECVSPACE, recvspace,
2107 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_recvspace, 0,
2108 &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size");
2109
2110 SYSCTL_PROC(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram,
2111 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_sendspace, 0,
2112 &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size");
2113
2114 int
2115 udp_abort(struct socket *so)
2116 {
2117 struct inpcb *inp;
2118
2119 inp = sotoinpcb(so);
2120 if (inp == NULL) {
2121 panic("%s: so=%p null inp\n", __func__, so);
2122 /* NOTREACHED */
2123 }
2124 soisdisconnected(so);
2125 in_pcbdetach(inp);
2126 return 0;
2127 }
2128
2129 int
2130 udp_attach(struct socket *so, int proto, struct proc *p)
2131 {
2132 #pragma unused(proto)
2133 struct inpcb *inp;
2134 int error;
2135
2136 inp = sotoinpcb(so);
2137 if (inp != NULL) {
2138 panic("%s so=%p inp=%p\n", __func__, so, inp);
2139 /* NOTREACHED */
2140 }
2141 error = in_pcballoc(so, &udbinfo, p);
2142 if (error != 0) {
2143 return error;
2144 }
2145 error = soreserve(so, udp_sendspace, udp_recvspace);
2146 if (error != 0) {
2147 return error;
2148 }
2149 inp = (struct inpcb *)so->so_pcb;
2150 inp->inp_vflag |= INP_IPV4;
2151 inp->inp_ip_ttl = ip_defttl;
2152 if (nstat_collect) {
2153 nstat_udp_new_pcb(inp);
2154 }
2155 return 0;
2156 }
2157
2158 int
2159 udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
2160 {
2161 struct inpcb *inp;
2162 int error;
2163
2164 if (nam->sa_family != 0 && nam->sa_family != AF_INET &&
2165 nam->sa_family != AF_INET6) {
2166 return EAFNOSUPPORT;
2167 }
2168
2169 inp = sotoinpcb(so);
2170 if (inp == NULL) {
2171 return EINVAL;
2172 }
2173 error = in_pcbbind(inp, nam, p);
2174
2175 #if NECP
2176 /* Update NECP client with bind result if not in middle of connect */
2177 if (error == 0 &&
2178 (inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS) &&
2179 !uuid_is_null(inp->necp_client_uuid)) {
2180 socket_unlock(so, 0);
2181 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2182 socket_lock(so, 0);
2183 }
2184 #endif /* NECP */
2185
2186 return error;
2187 }
2188
2189 int
2190 udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
2191 {
2192 struct inpcb *inp;
2193 int error;
2194
2195 inp = sotoinpcb(so);
2196 if (inp == NULL) {
2197 return EINVAL;
2198 }
2199 if (inp->inp_faddr.s_addr != INADDR_ANY) {
2200 return EISCONN;
2201 }
2202
2203 if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
2204 so->so_flags1 |= SOF1_CONNECT_COUNTED;
2205 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
2206 }
2207
2208 #if NECP
2209 #if FLOW_DIVERT
2210 if (necp_socket_should_use_flow_divert(inp)) {
2211 uint32_t fd_ctl_unit =
2212 necp_socket_get_flow_divert_control_unit(inp);
2213 if (fd_ctl_unit > 0) {
2214 error = flow_divert_pcb_init(so, fd_ctl_unit);
2215 if (error == 0) {
2216 error = flow_divert_connect_out(so, nam, p);
2217 }
2218 } else {
2219 error = ENETDOWN;
2220 }
2221 return error;
2222 }
2223 #endif /* FLOW_DIVERT */
2224 #endif /* NECP */
2225
2226 error = in_pcbconnect(inp, nam, p, IFSCOPE_NONE, NULL);
2227 if (error == 0) {
2228 #if NECP
2229 /* Update NECP client with connected five-tuple */
2230 if (!uuid_is_null(inp->necp_client_uuid)) {
2231 socket_unlock(so, 0);
2232 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2233 socket_lock(so, 0);
2234 }
2235 #endif /* NECP */
2236
2237 soisconnected(so);
2238 if (inp->inp_flowhash == 0) {
2239 inp->inp_flowhash = inp_calc_flowhash(inp);
2240 }
2241 }
2242 return error;
2243 }
2244
2245 int
2246 udp_connectx_common(struct socket *so, int af, struct sockaddr *src, struct sockaddr *dst,
2247 struct proc *p, uint32_t ifscope, sae_associd_t aid, sae_connid_t *pcid,
2248 uint32_t flags, void *arg, uint32_t arglen,
2249 struct uio *uio, user_ssize_t *bytes_written)
2250 {
2251 #pragma unused(aid, flags, arg, arglen)
2252 struct inpcb *inp = sotoinpcb(so);
2253 int error = 0;
2254 user_ssize_t datalen = 0;
2255
2256 if (inp == NULL) {
2257 return EINVAL;
2258 }
2259
2260 VERIFY(dst != NULL);
2261
2262 ASSERT(!(inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS));
2263 inp->inp_flags2 |= INP2_CONNECT_IN_PROGRESS;
2264
2265 #if NECP
2266 inp_update_necp_policy(inp, src, dst, ifscope);
2267 #endif /* NECP */
2268
2269 /* bind socket to the specified interface, if requested */
2270 if (ifscope != IFSCOPE_NONE &&
2271 (error = inp_bindif(inp, ifscope, NULL)) != 0) {
2272 goto done;
2273 }
2274
2275 /* if source address and/or port is specified, bind to it */
2276 if (src != NULL) {
2277 error = sobindlock(so, src, 0); /* already locked */
2278 if (error != 0) {
2279 goto done;
2280 }
2281 }
2282
2283 switch (af) {
2284 case AF_INET:
2285 error = udp_connect(so, dst, p);
2286 break;
2287 #if INET6
2288 case AF_INET6:
2289 error = udp6_connect(so, dst, p);
2290 break;
2291 #endif /* INET6 */
2292 default:
2293 VERIFY(0);
2294 /* NOTREACHED */
2295 }
2296
2297 if (error != 0) {
2298 goto done;
2299 }
2300
2301 /*
2302 * If there is data, copy it. DATA_IDEMPOTENT is ignored.
2303 * CONNECT_RESUME_ON_READ_WRITE is ignored.
2304 */
2305 if (uio != NULL) {
2306 socket_unlock(so, 0);
2307
2308 VERIFY(bytes_written != NULL);
2309
2310 datalen = uio_resid(uio);
2311 error = so->so_proto->pr_usrreqs->pru_sosend(so, NULL,
2312 (uio_t)uio, NULL, NULL, 0);
2313 socket_lock(so, 0);
2314
2315 /* If error returned is EMSGSIZE, for example, disconnect */
2316 if (error == 0 || error == EWOULDBLOCK) {
2317 *bytes_written = datalen - uio_resid(uio);
2318 } else {
2319 (void) so->so_proto->pr_usrreqs->pru_disconnectx(so,
2320 SAE_ASSOCID_ANY, SAE_CONNID_ANY);
2321 }
2322 /*
2323 * mask the EWOULDBLOCK error so that the caller
2324 * knows that atleast the connect was successful.
2325 */
2326 if (error == EWOULDBLOCK) {
2327 error = 0;
2328 }
2329 }
2330
2331 if (error == 0 && pcid != NULL) {
2332 *pcid = 1; /* there is only 1 connection for UDP */
2333 }
2334 done:
2335 inp->inp_flags2 &= ~INP2_CONNECT_IN_PROGRESS;
2336 return error;
2337 }
2338
2339 int
2340 udp_connectx(struct socket *so, struct sockaddr *src,
2341 struct sockaddr *dst, struct proc *p, uint32_t ifscope,
2342 sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
2343 uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written)
2344 {
2345 return udp_connectx_common(so, AF_INET, src, dst,
2346 p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written);
2347 }
2348
2349 int
2350 udp_detach(struct socket *so)
2351 {
2352 struct inpcb *inp;
2353
2354 inp = sotoinpcb(so);
2355 if (inp == NULL) {
2356 panic("%s: so=%p null inp\n", __func__, so);
2357 /* NOTREACHED */
2358 }
2359
2360 /*
2361 * If this is a socket that does not want to wakeup the device
2362 * for it's traffic, the application might be waiting for
2363 * close to complete before going to sleep. Send a notification
2364 * for this kind of sockets
2365 */
2366 if (so->so_options & SO_NOWAKEFROMSLEEP) {
2367 socket_post_kev_msg_closed(so);
2368 }
2369
2370 in_pcbdetach(inp);
2371 inp->inp_state = INPCB_STATE_DEAD;
2372 return 0;
2373 }
2374
2375 int
2376 udp_disconnect(struct socket *so)
2377 {
2378 struct inpcb *inp;
2379
2380 inp = sotoinpcb(so);
2381 if (inp == NULL
2382 #if NECP
2383 || (necp_socket_should_use_flow_divert(inp))
2384 #endif /* NECP */
2385 ) {
2386 return inp == NULL ? EINVAL : EPROTOTYPE;
2387 }
2388 if (inp->inp_faddr.s_addr == INADDR_ANY) {
2389 return ENOTCONN;
2390 }
2391
2392 in_pcbdisconnect(inp);
2393
2394 /* reset flow controlled state, just in case */
2395 inp_reset_fc_state(inp);
2396
2397 inp->inp_laddr.s_addr = INADDR_ANY;
2398 so->so_state &= ~SS_ISCONNECTED; /* XXX */
2399 inp->inp_last_outifp = NULL;
2400
2401 return 0;
2402 }
2403
2404 int
2405 udp_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid)
2406 {
2407 #pragma unused(cid)
2408 if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) {
2409 return EINVAL;
2410 }
2411
2412 return udp_disconnect(so);
2413 }
2414
2415 int
2416 udp_send(struct socket *so, int flags, struct mbuf *m,
2417 struct sockaddr *addr, struct mbuf *control, struct proc *p)
2418 {
2419 #ifndef FLOW_DIVERT
2420 #pragma unused(flags)
2421 #endif /* !(FLOW_DIVERT) */
2422 struct inpcb *inp;
2423
2424 inp = sotoinpcb(so);
2425 if (inp == NULL) {
2426 if (m != NULL) {
2427 m_freem(m);
2428 }
2429 if (control != NULL) {
2430 m_freem(control);
2431 }
2432 return EINVAL;
2433 }
2434
2435 #if NECP
2436 #if FLOW_DIVERT
2437 if (necp_socket_should_use_flow_divert(inp)) {
2438 /* Implicit connect */
2439 return flow_divert_implicit_data_out(so, flags, m, addr,
2440 control, p);
2441 }
2442 #endif /* FLOW_DIVERT */
2443 #endif /* NECP */
2444
2445 return udp_output(inp, m, addr, control, p);
2446 }
2447
2448 int
2449 udp_shutdown(struct socket *so)
2450 {
2451 struct inpcb *inp;
2452
2453 inp = sotoinpcb(so);
2454 if (inp == NULL) {
2455 return EINVAL;
2456 }
2457 socantsendmore(so);
2458 return 0;
2459 }
2460
2461 int
2462 udp_lock(struct socket *so, int refcount, void *debug)
2463 {
2464 void *lr_saved;
2465
2466 if (debug == NULL) {
2467 lr_saved = __builtin_return_address(0);
2468 } else {
2469 lr_saved = debug;
2470 }
2471
2472 if (so->so_pcb != NULL) {
2473 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
2474 LCK_MTX_ASSERT_NOTOWNED);
2475 lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
2476 } else {
2477 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2478 so, lr_saved, solockhistory_nr(so));
2479 /* NOTREACHED */
2480 }
2481 if (refcount) {
2482 so->so_usecount++;
2483 }
2484
2485 so->lock_lr[so->next_lock_lr] = lr_saved;
2486 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2487 return 0;
2488 }
2489
2490 int
2491 udp_unlock(struct socket *so, int refcount, void *debug)
2492 {
2493 void *lr_saved;
2494
2495 if (debug == NULL) {
2496 lr_saved = __builtin_return_address(0);
2497 } else {
2498 lr_saved = debug;
2499 }
2500
2501 if (refcount) {
2502 VERIFY(so->so_usecount > 0);
2503 so->so_usecount--;
2504 }
2505 if (so->so_pcb == NULL) {
2506 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2507 so, lr_saved, solockhistory_nr(so));
2508 /* NOTREACHED */
2509 } else {
2510 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
2511 LCK_MTX_ASSERT_OWNED);
2512 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2513 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2514 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
2515 }
2516 return 0;
2517 }
2518
2519 lck_mtx_t *
2520 udp_getlock(struct socket *so, int flags)
2521 {
2522 #pragma unused(flags)
2523 struct inpcb *inp = sotoinpcb(so);
2524
2525 if (so->so_pcb == NULL) {
2526 panic("%s: so=%p NULL so_pcb lrh= %s\n", __func__,
2527 so, solockhistory_nr(so));
2528 /* NOTREACHED */
2529 }
2530 return &inp->inpcb_mtx;
2531 }
2532
2533 /*
2534 * UDP garbage collector callback (inpcb_timer_func_t).
2535 *
2536 * Returns > 0 to keep timer active.
2537 */
2538 static void
2539 udp_gc(struct inpcbinfo *ipi)
2540 {
2541 struct inpcb *inp, *inpnxt;
2542 struct socket *so;
2543
2544 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
2545 if (udp_gc_done == TRUE) {
2546 udp_gc_done = FALSE;
2547 /* couldn't get the lock, must lock next time */
2548 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2549 return;
2550 }
2551 lck_rw_lock_exclusive(ipi->ipi_lock);
2552 }
2553
2554 udp_gc_done = TRUE;
2555
2556 for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
2557 inpnxt = inp->inp_list.le_next;
2558
2559 /*
2560 * Skip unless it's STOPUSING; garbage collector will
2561 * be triggered by in_pcb_checkstate() upon setting
2562 * wantcnt to that value. If the PCB is already dead,
2563 * keep gc active to anticipate wantcnt changing.
2564 */
2565 if (inp->inp_wantcnt != WNT_STOPUSING) {
2566 continue;
2567 }
2568
2569 /*
2570 * Skip if busy, no hurry for cleanup. Keep gc active
2571 * and try the lock again during next round.
2572 */
2573 if (!socket_try_lock(inp->inp_socket)) {
2574 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2575 continue;
2576 }
2577
2578 /*
2579 * Keep gc active unless usecount is 0.
2580 */
2581 so = inp->inp_socket;
2582 if (so->so_usecount == 0) {
2583 if (inp->inp_state != INPCB_STATE_DEAD) {
2584 #if INET6
2585 if (SOCK_CHECK_DOM(so, PF_INET6)) {
2586 in6_pcbdetach(inp);
2587 } else
2588 #endif /* INET6 */
2589 in_pcbdetach(inp);
2590 }
2591 in_pcbdispose(inp);
2592 } else {
2593 socket_unlock(so, 0);
2594 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2595 }
2596 }
2597 lck_rw_done(ipi->ipi_lock);
2598 }
2599
2600 static int
2601 udp_getstat SYSCTL_HANDLER_ARGS
2602 {
2603 #pragma unused(oidp, arg1, arg2)
2604 if (req->oldptr == USER_ADDR_NULL) {
2605 req->oldlen = (size_t)sizeof(struct udpstat);
2606 }
2607
2608 return SYSCTL_OUT(req, &udpstat, MIN(sizeof(udpstat), req->oldlen));
2609 }
2610
2611 void
2612 udp_in_cksum_stats(u_int32_t len)
2613 {
2614 udpstat.udps_rcv_swcsum++;
2615 udpstat.udps_rcv_swcsum_bytes += len;
2616 }
2617
2618 void
2619 udp_out_cksum_stats(u_int32_t len)
2620 {
2621 udpstat.udps_snd_swcsum++;
2622 udpstat.udps_snd_swcsum_bytes += len;
2623 }
2624
2625 #if INET6
2626 void
2627 udp_in6_cksum_stats(u_int32_t len)
2628 {
2629 udpstat.udps_rcv6_swcsum++;
2630 udpstat.udps_rcv6_swcsum_bytes += len;
2631 }
2632
2633 void
2634 udp_out6_cksum_stats(u_int32_t len)
2635 {
2636 udpstat.udps_snd6_swcsum++;
2637 udpstat.udps_snd6_swcsum_bytes += len;
2638 }
2639 #endif /* INET6 */
2640
2641 /*
2642 * Checksum extended UDP header and data.
2643 */
2644 static int
2645 udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen)
2646 {
2647 struct ifnet *ifp = m->m_pkthdr.rcvif;
2648 struct ip *ip = mtod(m, struct ip *);
2649 struct ipovly *ipov = (struct ipovly *)ip;
2650
2651 if (uh->uh_sum == 0) {
2652 udpstat.udps_nosum++;
2653 return 0;
2654 }
2655
2656 /* ip_stripoptions() must have been called before we get here */
2657 ASSERT((ip->ip_hl << 2) == sizeof(*ip));
2658
2659 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
2660 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
2661 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
2662 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
2663 uh->uh_sum = m->m_pkthdr.csum_rx_val;
2664 } else {
2665 uint32_t sum = m->m_pkthdr.csum_rx_val;
2666 uint32_t start = m->m_pkthdr.csum_rx_start;
2667 int32_t trailer = (m_pktlen(m) - (off + ulen));
2668
2669 /*
2670 * Perform 1's complement adjustment of octets
2671 * that got included/excluded in the hardware-
2672 * calculated checksum value. Ignore cases
2673 * where the value already includes the entire
2674 * IP header span, as the sum for those octets
2675 * would already be 0 by the time we get here;
2676 * IP has already performed its header checksum
2677 * checks. If we do need to adjust, restore
2678 * the original fields in the IP header when
2679 * computing the adjustment value. Also take
2680 * care of any trailing bytes and subtract out
2681 * their partial sum.
2682 */
2683 ASSERT(trailer >= 0);
2684 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
2685 ((start != 0 && start != off) || trailer != 0)) {
2686 uint32_t swbytes = (uint32_t)trailer;
2687
2688 if (start < off) {
2689 ip->ip_len += sizeof(*ip);
2690 #if BYTE_ORDER != BIG_ENDIAN
2691 HTONS(ip->ip_len);
2692 HTONS(ip->ip_off);
2693 #endif /* BYTE_ORDER != BIG_ENDIAN */
2694 }
2695 /* callee folds in sum */
2696 sum = m_adj_sum16(m, start, off, ulen, sum);
2697 if (off > start) {
2698 swbytes += (off - start);
2699 } else {
2700 swbytes += (start - off);
2701 }
2702
2703 if (start < off) {
2704 #if BYTE_ORDER != BIG_ENDIAN
2705 NTOHS(ip->ip_off);
2706 NTOHS(ip->ip_len);
2707 #endif /* BYTE_ORDER != BIG_ENDIAN */
2708 ip->ip_len -= sizeof(*ip);
2709 }
2710
2711 if (swbytes != 0) {
2712 udp_in_cksum_stats(swbytes);
2713 }
2714 if (trailer != 0) {
2715 m_adj(m, -trailer);
2716 }
2717 }
2718
2719 /* callee folds in sum */
2720 uh->uh_sum = in_pseudo(ip->ip_src.s_addr,
2721 ip->ip_dst.s_addr, sum + htonl(ulen + IPPROTO_UDP));
2722 }
2723 uh->uh_sum ^= 0xffff;
2724 } else {
2725 uint16_t ip_sum;
2726 char b[9];
2727
2728 bcopy(ipov->ih_x1, b, sizeof(ipov->ih_x1));
2729 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
2730 ip_sum = ipov->ih_len;
2731 ipov->ih_len = uh->uh_ulen;
2732 uh->uh_sum = in_cksum(m, ulen + sizeof(struct ip));
2733 bcopy(b, ipov->ih_x1, sizeof(ipov->ih_x1));
2734 ipov->ih_len = ip_sum;
2735
2736 udp_in_cksum_stats(ulen);
2737 }
2738
2739 if (uh->uh_sum != 0) {
2740 udpstat.udps_badsum++;
2741 IF_UDP_STATINC(ifp, badchksum);
2742 return -1;
2743 }
2744
2745 return 0;
2746 }
2747
2748 void
2749 udp_fill_keepalive_offload_frames(ifnet_t ifp,
2750 struct ifnet_keepalive_offload_frame *frames_array,
2751 u_int32_t frames_array_count, size_t frame_data_offset,
2752 u_int32_t *used_frames_count)
2753 {
2754 struct inpcb *inp;
2755 inp_gen_t gencnt;
2756 u_int32_t frame_index = *used_frames_count;
2757
2758 if (ifp == NULL || frames_array == NULL ||
2759 frames_array_count == 0 ||
2760 frame_index >= frames_array_count ||
2761 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2762 return;
2763 }
2764
2765 lck_rw_lock_shared(udbinfo.ipi_lock);
2766 gencnt = udbinfo.ipi_gencnt;
2767 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
2768 struct socket *so;
2769 u_int8_t *data;
2770 struct ifnet_keepalive_offload_frame *frame;
2771 struct mbuf *m = NULL;
2772
2773 if (frame_index >= frames_array_count) {
2774 break;
2775 }
2776
2777 if (inp->inp_gencnt > gencnt ||
2778 inp->inp_state == INPCB_STATE_DEAD) {
2779 continue;
2780 }
2781
2782 if ((so = inp->inp_socket) == NULL ||
2783 (so->so_state & SS_DEFUNCT)) {
2784 continue;
2785 }
2786 /*
2787 * check for keepalive offload flag without socket
2788 * lock to avoid a deadlock
2789 */
2790 if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
2791 continue;
2792 }
2793
2794 udp_lock(so, 1, 0);
2795 if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
2796 udp_unlock(so, 1, 0);
2797 continue;
2798 }
2799 if ((inp->inp_vflag & INP_IPV4) &&
2800 (inp->inp_laddr.s_addr == INADDR_ANY ||
2801 inp->inp_faddr.s_addr == INADDR_ANY)) {
2802 udp_unlock(so, 1, 0);
2803 continue;
2804 }
2805 if ((inp->inp_vflag & INP_IPV6) &&
2806 (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
2807 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
2808 udp_unlock(so, 1, 0);
2809 continue;
2810 }
2811 if (inp->inp_lport == 0 || inp->inp_fport == 0) {
2812 udp_unlock(so, 1, 0);
2813 continue;
2814 }
2815 if (inp->inp_last_outifp == NULL ||
2816 inp->inp_last_outifp->if_index != ifp->if_index) {
2817 udp_unlock(so, 1, 0);
2818 continue;
2819 }
2820 if ((inp->inp_vflag & INP_IPV4)) {
2821 if ((frame_data_offset + sizeof(struct udpiphdr) +
2822 inp->inp_keepalive_datalen) >
2823 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2824 udp_unlock(so, 1, 0);
2825 continue;
2826 }
2827 if ((sizeof(struct udpiphdr) +
2828 inp->inp_keepalive_datalen) > _MHLEN) {
2829 udp_unlock(so, 1, 0);
2830 continue;
2831 }
2832 } else {
2833 if ((frame_data_offset + sizeof(struct ip6_hdr) +
2834 sizeof(struct udphdr) +
2835 inp->inp_keepalive_datalen) >
2836 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2837 udp_unlock(so, 1, 0);
2838 continue;
2839 }
2840 if ((sizeof(struct ip6_hdr) + sizeof(struct udphdr) +
2841 inp->inp_keepalive_datalen) > _MHLEN) {
2842 udp_unlock(so, 1, 0);
2843 continue;
2844 }
2845 }
2846 MGETHDR(m, M_WAIT, MT_HEADER);
2847 if (m == NULL) {
2848 udp_unlock(so, 1, 0);
2849 continue;
2850 }
2851 /*
2852 * This inp has all the information that is needed to
2853 * generate an offload frame.
2854 */
2855 if (inp->inp_vflag & INP_IPV4) {
2856 struct ip *ip;
2857 struct udphdr *udp;
2858
2859 frame = &frames_array[frame_index];
2860 frame->length = frame_data_offset +
2861 sizeof(struct udpiphdr) +
2862 inp->inp_keepalive_datalen;
2863 frame->ether_type =
2864 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
2865 frame->interval = inp->inp_keepalive_interval;
2866 switch (inp->inp_keepalive_type) {
2867 case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY:
2868 frame->type =
2869 IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY;
2870 break;
2871 default:
2872 break;
2873 }
2874 data = mtod(m, u_int8_t *);
2875 bzero(data, sizeof(struct udpiphdr));
2876 ip = (__typeof__(ip))(void *)data;
2877 udp = (__typeof__(udp))(void *) (data +
2878 sizeof(struct ip));
2879 m->m_len = sizeof(struct udpiphdr);
2880 data = data + sizeof(struct udpiphdr);
2881 if (inp->inp_keepalive_datalen > 0 &&
2882 inp->inp_keepalive_data != NULL) {
2883 bcopy(inp->inp_keepalive_data, data,
2884 inp->inp_keepalive_datalen);
2885 m->m_len += inp->inp_keepalive_datalen;
2886 }
2887 m->m_pkthdr.len = m->m_len;
2888
2889 ip->ip_v = IPVERSION;
2890 ip->ip_hl = (sizeof(struct ip) >> 2);
2891 ip->ip_p = IPPROTO_UDP;
2892 ip->ip_len = htons(sizeof(struct udpiphdr) +
2893 (u_short)inp->inp_keepalive_datalen);
2894 ip->ip_ttl = inp->inp_ip_ttl;
2895 ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
2896 ip->ip_src = inp->inp_laddr;
2897 ip->ip_dst = inp->inp_faddr;
2898 ip->ip_sum = in_cksum_hdr_opt(ip);
2899
2900 udp->uh_sport = inp->inp_lport;
2901 udp->uh_dport = inp->inp_fport;
2902 udp->uh_ulen = htons(sizeof(struct udphdr) +
2903 (u_short)inp->inp_keepalive_datalen);
2904
2905 if (!(inp->inp_flags & INP_UDP_NOCKSUM)) {
2906 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
2907 ip->ip_dst.s_addr,
2908 htons(sizeof(struct udphdr) +
2909 (u_short)inp->inp_keepalive_datalen +
2910 IPPROTO_UDP));
2911 m->m_pkthdr.csum_flags =
2912 (CSUM_UDP | CSUM_ZERO_INVERT);
2913 m->m_pkthdr.csum_data = offsetof(struct udphdr,
2914 uh_sum);
2915 }
2916 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
2917 in_delayed_cksum(m);
2918 bcopy(m->m_data, frame->data + frame_data_offset,
2919 m->m_len);
2920 } else {
2921 struct ip6_hdr *ip6;
2922 struct udphdr *udp6;
2923
2924 VERIFY(inp->inp_vflag & INP_IPV6);
2925 frame = &frames_array[frame_index];
2926 frame->length = frame_data_offset +
2927 sizeof(struct ip6_hdr) +
2928 sizeof(struct udphdr) +
2929 inp->inp_keepalive_datalen;
2930 frame->ether_type =
2931 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
2932 frame->interval = inp->inp_keepalive_interval;
2933 switch (inp->inp_keepalive_type) {
2934 case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY:
2935 frame->type =
2936 IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY;
2937 break;
2938 default:
2939 break;
2940 }
2941 data = mtod(m, u_int8_t *);
2942 bzero(data, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
2943 ip6 = (__typeof__(ip6))(void *)data;
2944 udp6 = (__typeof__(udp6))(void *)(data +
2945 sizeof(struct ip6_hdr));
2946 m->m_len = sizeof(struct ip6_hdr) +
2947 sizeof(struct udphdr);
2948 data = data + (sizeof(struct ip6_hdr) +
2949 sizeof(struct udphdr));
2950 if (inp->inp_keepalive_datalen > 0 &&
2951 inp->inp_keepalive_data != NULL) {
2952 bcopy(inp->inp_keepalive_data, data,
2953 inp->inp_keepalive_datalen);
2954 m->m_len += inp->inp_keepalive_datalen;
2955 }
2956 m->m_pkthdr.len = m->m_len;
2957 ip6->ip6_flow = inp->inp_flow & IPV6_FLOWINFO_MASK;
2958 ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
2959 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2960 ip6->ip6_vfc |= IPV6_VERSION;
2961 ip6->ip6_nxt = IPPROTO_UDP;
2962 ip6->ip6_hlim = ip6_defhlim;
2963 ip6->ip6_plen = htons(sizeof(struct udphdr) +
2964 (u_short)inp->inp_keepalive_datalen);
2965 ip6->ip6_src = inp->in6p_laddr;
2966 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
2967 ip6->ip6_src.s6_addr16[1] = 0;
2968 }
2969
2970 ip6->ip6_dst = inp->in6p_faddr;
2971 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
2972 ip6->ip6_dst.s6_addr16[1] = 0;
2973 }
2974
2975 udp6->uh_sport = inp->in6p_lport;
2976 udp6->uh_dport = inp->in6p_fport;
2977 udp6->uh_ulen = htons(sizeof(struct udphdr) +
2978 (u_short)inp->inp_keepalive_datalen);
2979 if (!(inp->inp_flags & INP_UDP_NOCKSUM)) {
2980 udp6->uh_sum = in6_pseudo(&ip6->ip6_src,
2981 &ip6->ip6_dst,
2982 htonl(sizeof(struct udphdr) +
2983 (u_short)inp->inp_keepalive_datalen +
2984 IPPROTO_UDP));
2985 m->m_pkthdr.csum_flags =
2986 (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
2987 m->m_pkthdr.csum_data = offsetof(struct udphdr,
2988 uh_sum);
2989 }
2990 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
2991 in6_delayed_cksum(m);
2992 bcopy(m->m_data, frame->data + frame_data_offset,
2993 m->m_len);
2994 }
2995 if (m != NULL) {
2996 m_freem(m);
2997 m = NULL;
2998 }
2999 frame_index++;
3000 udp_unlock(so, 1, 0);
3001 }
3002 lck_rw_done(udbinfo.ipi_lock);
3003 *used_frames_count = frame_index;
3004 }