]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/netinet/udp_usrreq.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / netinet / udp_usrreq.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
61 */
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/malloc.h>
67#include <sys/mbuf.h>
68#include <sys/domain.h>
69#include <sys/protosw.h>
70#include <sys/socket.h>
71#include <sys/socketvar.h>
72#include <sys/sysctl.h>
73#include <sys/syslog.h>
74#include <sys/mcache.h>
75#include <net/ntstat.h>
76
77#include <kern/zalloc.h>
78#include <mach/boolean.h>
79
80#include <net/if.h>
81#include <net/if_types.h>
82#include <net/route.h>
83#include <net/dlil.h>
84#include <net/net_api_stats.h>
85
86#include <netinet/in.h>
87#include <netinet/in_systm.h>
88#include <netinet/in_tclass.h>
89#include <netinet/ip.h>
90#include <netinet/ip6.h>
91#include <netinet/in_pcb.h>
92#include <netinet/in_var.h>
93#include <netinet/ip_var.h>
94#include <netinet6/in6_pcb.h>
95#include <netinet6/ip6_var.h>
96#include <netinet6/udp6_var.h>
97#include <netinet/ip_icmp.h>
98#include <netinet/icmp_var.h>
99#include <netinet/udp.h>
100#include <netinet/udp_var.h>
101#include <sys/kdebug.h>
102
103#if IPSEC
104#include <netinet6/ipsec.h>
105#include <netinet6/esp.h>
106#include <netkey/key.h>
107extern int ipsec_bypass;
108extern int esp_udp_encap_port;
109#endif /* IPSEC */
110
111#if NECP
112#include <net/necp.h>
113#endif /* NECP */
114
115#if FLOW_DIVERT
116#include <netinet/flow_divert.h>
117#endif /* FLOW_DIVERT */
118
119#if CONTENT_FILTER
120#include <net/content_filter.h>
121#endif /* CONTENT_FILTER */
122
123#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0)
124#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2)
125#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1)
126#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3)
127#define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8))
128#define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1)
129
130/*
131 * UDP protocol implementation.
132 * Per RFC 768, August, 1980.
133 */
134#ifndef COMPAT_42
135static int udpcksum = 1;
136#else
137static int udpcksum = 0; /* XXX */
138#endif
139SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum,
140 CTLFLAG_RW | CTLFLAG_LOCKED, &udpcksum, 0, "");
141
142int udp_log_in_vain = 0;
143SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED,
144 &udp_log_in_vain, 0, "Log all incoming UDP packets");
145
146static int blackhole = 0;
147SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED,
148 &blackhole, 0, "Do not send port unreachables for refused connects");
149
150struct inpcbhead udb; /* from udp_var.h */
151#define udb6 udb /* for KAME src sync over BSD*'s */
152struct inpcbinfo udbinfo;
153
154#ifndef UDBHASHSIZE
155#define UDBHASHSIZE 16
156#endif
157
158/* Garbage collection performed during most recent udp_gc() run */
159static boolean_t udp_gc_done = FALSE;
160
161#define log_in_vain_log(a) { log a; }
162
163static int udp_getstat SYSCTL_HANDLER_ARGS;
164struct udpstat udpstat; /* from udp_var.h */
165SYSCTL_PROC(_net_inet_udp, UDPCTL_STATS, stats,
166 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
167 0, 0, udp_getstat, "S,udpstat",
168 "UDP statistics (struct udpstat, netinet/udp_var.h)");
169
170SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount,
171 CTLFLAG_RD | CTLFLAG_LOCKED, &udbinfo.ipi_count, 0,
172 "Number of active PCBs");
173
174__private_extern__ int udp_use_randomport = 1;
175SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports,
176 CTLFLAG_RW | CTLFLAG_LOCKED, &udp_use_randomport, 0,
177 "Randomize UDP port numbers");
178
179struct udp_in6 {
180 struct sockaddr_in6 uin6_sin;
181 u_char uin6_init_done : 1;
182};
183struct udp_ip6 {
184 struct ip6_hdr uip6_ip6;
185 u_char uip6_init_done : 1;
186};
187
188int udp_abort(struct socket *);
189int udp_attach(struct socket *, int, struct proc *);
190int udp_bind(struct socket *, struct sockaddr *, struct proc *);
191int udp_connect(struct socket *, struct sockaddr *, struct proc *);
192int udp_connectx(struct socket *, struct sockaddr *,
193 struct sockaddr *, struct proc *, uint32_t, sae_associd_t,
194 sae_connid_t *, uint32_t, void *, uint32_t, struct uio *, user_ssize_t *);
195int udp_detach(struct socket *);
196int udp_disconnect(struct socket *);
197int udp_disconnectx(struct socket *, sae_associd_t, sae_connid_t);
198int udp_send(struct socket *, int, struct mbuf *, struct sockaddr *,
199 struct mbuf *, struct proc *);
200static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int,
201 struct sockaddr_in *, struct udp_in6 *, struct udp_ip6 *, struct ifnet *);
202static int udp_input_checksum(struct mbuf *, struct udphdr *, int, int);
203int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
204 struct mbuf *, struct proc *);
205static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
206static void udp_gc(struct inpcbinfo *);
207
208struct pr_usrreqs udp_usrreqs = {
209 .pru_abort = udp_abort,
210 .pru_attach = udp_attach,
211 .pru_bind = udp_bind,
212 .pru_connect = udp_connect,
213 .pru_connectx = udp_connectx,
214 .pru_control = in_control,
215 .pru_detach = udp_detach,
216 .pru_disconnect = udp_disconnect,
217 .pru_disconnectx = udp_disconnectx,
218 .pru_peeraddr = in_getpeeraddr,
219 .pru_send = udp_send,
220 .pru_shutdown = udp_shutdown,
221 .pru_sockaddr = in_getsockaddr,
222 .pru_sosend = sosend,
223 .pru_soreceive = soreceive,
224 .pru_soreceive_list = soreceive_list,
225};
226
227void
228udp_init(struct protosw *pp, struct domain *dp)
229{
230#pragma unused(dp)
231 static int udp_initialized = 0;
232 struct inpcbinfo *pcbinfo;
233
234 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
235
236 if (udp_initialized) {
237 return;
238 }
239 udp_initialized = 1;
240 uint32_t pool_size = (nmbclusters << MCLSHIFT) >> MBSHIFT;
241 if (pool_size >= 96) {
242 /* Improves 10GbE UDP performance. */
243 udp_recvspace = 786896;
244 }
245 LIST_INIT(&udb);
246 udbinfo.ipi_listhead = &udb;
247 udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB,
248 &udbinfo.ipi_hashmask);
249 udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB,
250 &udbinfo.ipi_porthashmask);
251 udbinfo.ipi_zone = zone_create("udpcb", sizeof(struct inpcb), ZC_NONE);
252
253 pcbinfo = &udbinfo;
254 /*
255 * allocate lock group attribute and group for udp pcb mutexes
256 */
257 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
258 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("udppcb",
259 pcbinfo->ipi_lock_grp_attr);
260 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
261 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
262 pcbinfo->ipi_lock_attr)) == NULL) {
263 panic("%s: unable to allocate PCB lock\n", __func__);
264 /* NOTREACHED */
265 }
266
267 udbinfo.ipi_gc = udp_gc;
268 in_pcbinfo_attach(&udbinfo);
269}
270
271void
272udp_input(struct mbuf *m, int iphlen)
273{
274 struct ip *ip;
275 struct udphdr *uh;
276 struct inpcb *inp;
277 struct mbuf *opts = NULL;
278 int len, isbroadcast;
279 struct ip save_ip;
280 struct sockaddr *append_sa;
281 struct inpcbinfo *pcbinfo = &udbinfo;
282 struct sockaddr_in udp_in;
283 struct ip_moptions *imo = NULL;
284 int foundmembership = 0, ret = 0;
285 struct udp_in6 udp_in6;
286 struct udp_ip6 udp_ip6;
287 struct ifnet *ifp = m->m_pkthdr.rcvif;
288 boolean_t cell = IFNET_IS_CELLULAR(ifp);
289 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
290 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
291 u_int16_t pf_tag = 0;
292
293 bzero(&udp_in, sizeof(udp_in));
294 udp_in.sin_len = sizeof(struct sockaddr_in);
295 udp_in.sin_family = AF_INET;
296 bzero(&udp_in6, sizeof(udp_in6));
297 udp_in6.uin6_sin.sin6_len = sizeof(struct sockaddr_in6);
298 udp_in6.uin6_sin.sin6_family = AF_INET6;
299
300 if (m->m_flags & M_PKTHDR) {
301 pf_tag = m_pftag(m)->pftag_tag;
302 }
303
304 udpstat.udps_ipackets++;
305
306 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
307
308 /* Expect 32-bit aligned data pointer on strict-align platforms */
309 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
310
311 /*
312 * Strip IP options, if any; should skip this,
313 * make available to user, and use on returned packets,
314 * but we don't yet have a way to check the checksum
315 * with options still present.
316 */
317 if (iphlen > sizeof(struct ip)) {
318 ip_stripoptions(m);
319 iphlen = sizeof(struct ip);
320 }
321
322 /*
323 * Get IP and UDP header together in first mbuf.
324 */
325 ip = mtod(m, struct ip *);
326 if (m->m_len < iphlen + sizeof(struct udphdr)) {
327 m = m_pullup(m, iphlen + sizeof(struct udphdr));
328 if (m == NULL) {
329 udpstat.udps_hdrops++;
330 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
331 0, 0, 0, 0, 0);
332 return;
333 }
334 ip = mtod(m, struct ip *);
335 }
336 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
337
338 /* destination port of 0 is illegal, based on RFC768. */
339 if (uh->uh_dport == 0) {
340 IF_UDP_STATINC(ifp, port0);
341 goto bad;
342 }
343
344 KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
345 ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
346
347 /*
348 * Make mbuf data length reflect UDP length.
349 * If not enough data to reflect UDP length, drop.
350 */
351 len = ntohs((u_short)uh->uh_ulen);
352 if (ip->ip_len != len) {
353 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
354 udpstat.udps_badlen++;
355 IF_UDP_STATINC(ifp, badlength);
356 goto bad;
357 }
358 m_adj(m, len - ip->ip_len);
359 /* ip->ip_len = len; */
360 }
361 /*
362 * Save a copy of the IP header in case we want restore it
363 * for sending an ICMP error message in response.
364 */
365 save_ip = *ip;
366
367 /*
368 * Checksum extended UDP header and data.
369 */
370 if (udp_input_checksum(m, uh, iphlen, len)) {
371 goto bad;
372 }
373
374 isbroadcast = in_broadcast(ip->ip_dst, ifp);
375
376 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || isbroadcast) {
377 int reuse_sock = 0, mcast_delivered = 0;
378
379 lck_rw_lock_shared(pcbinfo->ipi_lock);
380 /*
381 * Deliver a multicast or broadcast datagram to *all* sockets
382 * for which the local and remote addresses and ports match
383 * those of the incoming datagram. This allows more than
384 * one process to receive multi/broadcasts on the same port.
385 * (This really ought to be done for unicast datagrams as
386 * well, but that would cause problems with existing
387 * applications that open both address-specific sockets and
388 * a wildcard socket listening to the same port -- they would
389 * end up receiving duplicates of every unicast datagram.
390 * Those applications open the multiple sockets to overcome an
391 * inadequacy of the UDP socket interface, but for backwards
392 * compatibility we avoid the problem here rather than
393 * fixing the interface. Maybe 4.5BSD will remedy this?)
394 */
395
396 /*
397 * Construct sockaddr format source address.
398 */
399 udp_in.sin_port = uh->uh_sport;
400 udp_in.sin_addr = ip->ip_src;
401 /*
402 * Locate pcb(s) for datagram.
403 * (Algorithm copied from raw_intr().)
404 */
405 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
406 LIST_FOREACH(inp, &udb, inp_list) {
407#if IPSEC
408 int skipit;
409#endif /* IPSEC */
410
411 if (inp->inp_socket == NULL) {
412 continue;
413 }
414 if (inp != sotoinpcb(inp->inp_socket)) {
415 panic("%s: bad so back ptr inp=%p\n",
416 __func__, inp);
417 /* NOTREACHED */
418 }
419 if ((inp->inp_vflag & INP_IPV4) == 0) {
420 continue;
421 }
422 if (inp_restricted_recv(inp, ifp)) {
423 continue;
424 }
425
426 if ((inp->inp_moptions == NULL) &&
427 (ntohl(ip->ip_dst.s_addr) !=
428 INADDR_ALLHOSTS_GROUP) && (isbroadcast == 0)) {
429 continue;
430 }
431
432 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
433 WNT_STOPUSING) {
434 continue;
435 }
436
437 udp_lock(inp->inp_socket, 1, 0);
438
439 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
440 WNT_STOPUSING) {
441 udp_unlock(inp->inp_socket, 1, 0);
442 continue;
443 }
444
445 if (inp->inp_lport != uh->uh_dport) {
446 udp_unlock(inp->inp_socket, 1, 0);
447 continue;
448 }
449 if (inp->inp_laddr.s_addr != INADDR_ANY) {
450 if (inp->inp_laddr.s_addr !=
451 ip->ip_dst.s_addr) {
452 udp_unlock(inp->inp_socket, 1, 0);
453 continue;
454 }
455 }
456 if (inp->inp_faddr.s_addr != INADDR_ANY) {
457 if (inp->inp_faddr.s_addr !=
458 ip->ip_src.s_addr ||
459 inp->inp_fport != uh->uh_sport) {
460 udp_unlock(inp->inp_socket, 1, 0);
461 continue;
462 }
463 }
464
465 if (isbroadcast == 0 && (ntohl(ip->ip_dst.s_addr) !=
466 INADDR_ALLHOSTS_GROUP)) {
467 struct sockaddr_in group;
468 int blocked;
469
470 if ((imo = inp->inp_moptions) == NULL) {
471 udp_unlock(inp->inp_socket, 1, 0);
472 continue;
473 }
474 IMO_LOCK(imo);
475
476 bzero(&group, sizeof(struct sockaddr_in));
477 group.sin_len = sizeof(struct sockaddr_in);
478 group.sin_family = AF_INET;
479 group.sin_addr = ip->ip_dst;
480
481 blocked = imo_multi_filter(imo, ifp,
482 &group, &udp_in);
483 if (blocked == MCAST_PASS) {
484 foundmembership = 1;
485 }
486
487 IMO_UNLOCK(imo);
488 if (!foundmembership) {
489 udp_unlock(inp->inp_socket, 1, 0);
490 if (blocked == MCAST_NOTSMEMBER ||
491 blocked == MCAST_MUTED) {
492 udpstat.udps_filtermcast++;
493 }
494 continue;
495 }
496 foundmembership = 0;
497 }
498
499 reuse_sock = (inp->inp_socket->so_options &
500 (SO_REUSEPORT | SO_REUSEADDR));
501
502#if NECP
503 skipit = 0;
504 if (!necp_socket_is_allowed_to_send_recv_v4(inp,
505 uh->uh_dport, uh->uh_sport, &ip->ip_dst,
506 &ip->ip_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) {
507 /* do not inject data to pcb */
508 skipit = 1;
509 }
510 if (skipit == 0)
511#endif /* NECP */
512 {
513 struct mbuf *n = NULL;
514
515 if (reuse_sock) {
516 n = m_copy(m, 0, M_COPYALL);
517 }
518 udp_append(inp, ip, m,
519 iphlen + sizeof(struct udphdr),
520 &udp_in, &udp_in6, &udp_ip6, ifp);
521 mcast_delivered++;
522
523 m = n;
524 }
525 udp_unlock(inp->inp_socket, 1, 0);
526
527 /*
528 * Don't look for additional matches if this one does
529 * not have either the SO_REUSEPORT or SO_REUSEADDR
530 * socket options set. This heuristic avoids searching
531 * through all pcbs in the common case of a non-shared
532 * port. It assumes that an application will never
533 * clear these options after setting them.
534 */
535 if (reuse_sock == 0 || m == NULL) {
536 break;
537 }
538
539 /*
540 * Expect 32-bit aligned data pointer on strict-align
541 * platforms.
542 */
543 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
544 /*
545 * Recompute IP and UDP header pointers for new mbuf
546 */
547 ip = mtod(m, struct ip *);
548 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
549 }
550 lck_rw_done(pcbinfo->ipi_lock);
551
552 if (mcast_delivered == 0) {
553 /*
554 * No matching pcb found; discard datagram.
555 * (No need to send an ICMP Port Unreachable
556 * for a broadcast or multicast datgram.)
557 */
558 udpstat.udps_noportbcast++;
559 IF_UDP_STATINC(ifp, port_unreach);
560 goto bad;
561 }
562
563 /* free the extra copy of mbuf or skipped by IPsec */
564 if (m != NULL) {
565 m_freem(m);
566 }
567 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
568 return;
569 }
570
571#if IPSEC
572 /*
573 * UDP to port 4500 with a payload where the first four bytes are
574 * not zero is a UDP encapsulated IPsec packet. Packets where
575 * the payload is one byte and that byte is 0xFF are NAT keepalive
576 * packets. Decapsulate the ESP packet and carry on with IPsec input
577 * or discard the NAT keep-alive.
578 */
579 if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 &&
580 (uh->uh_dport == ntohs((u_short)esp_udp_encap_port) ||
581 uh->uh_sport == ntohs((u_short)esp_udp_encap_port))) {
582 /*
583 * Check if ESP or keepalive:
584 * 1. If the destination port of the incoming packet is 4500.
585 * 2. If the source port of the incoming packet is 4500,
586 * then check the SADB to match IP address and port.
587 */
588 bool check_esp = true;
589 if (uh->uh_dport != ntohs((u_short)esp_udp_encap_port)) {
590 check_esp = key_checksa_present(AF_INET, (caddr_t)&ip->ip_dst,
591 (caddr_t)&ip->ip_src, uh->uh_dport,
592 uh->uh_sport);
593 }
594
595 if (check_esp) {
596 int payload_len = len - sizeof(struct udphdr) > 4 ? 4 :
597 len - sizeof(struct udphdr);
598
599 if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) {
600 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) +
601 payload_len)) == NULL) {
602 udpstat.udps_hdrops++;
603 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
604 0, 0, 0, 0, 0);
605 return;
606 }
607 /*
608 * Expect 32-bit aligned data pointer on strict-align
609 * platforms.
610 */
611 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
612
613 ip = mtod(m, struct ip *);
614 uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
615 }
616 /* Check for NAT keepalive packet */
617 if (payload_len == 1 && *(u_int8_t *)
618 ((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
619 m_freem(m);
620 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
621 0, 0, 0, 0, 0);
622 return;
623 } else if (payload_len == 4 && *(u_int32_t *)(void *)
624 ((caddr_t)uh + sizeof(struct udphdr)) != 0) {
625 /* UDP encapsulated IPsec packet to pass through NAT */
626 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END,
627 0, 0, 0, 0, 0);
628 /* preserve the udp header */
629 esp4_input(m, iphlen + sizeof(struct udphdr));
630 return;
631 }
632 }
633 }
634#endif /* IPSEC */
635
636 /*
637 * Locate pcb for datagram.
638 */
639 inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
640 ip->ip_dst, uh->uh_dport, 1, ifp);
641 if (inp == NULL) {
642 IF_UDP_STATINC(ifp, port_unreach);
643
644 if (udp_log_in_vain) {
645 char buf[MAX_IPv4_STR_LEN];
646 char buf2[MAX_IPv4_STR_LEN];
647
648 /* check src and dst address */
649 if (udp_log_in_vain < 3) {
650 log(LOG_INFO, "Connection attempt to "
651 "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET,
652 &ip->ip_dst, buf, sizeof(buf)),
653 ntohs(uh->uh_dport), inet_ntop(AF_INET,
654 &ip->ip_src, buf2, sizeof(buf2)),
655 ntohs(uh->uh_sport));
656 } else if (!(m->m_flags & (M_BCAST | M_MCAST)) &&
657 ip->ip_dst.s_addr != ip->ip_src.s_addr) {
658 log_in_vain_log((LOG_INFO,
659 "Stealth Mode connection attempt to "
660 "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET,
661 &ip->ip_dst, buf, sizeof(buf)),
662 ntohs(uh->uh_dport), inet_ntop(AF_INET,
663 &ip->ip_src, buf2, sizeof(buf2)),
664 ntohs(uh->uh_sport)))
665 }
666 }
667 udpstat.udps_noport++;
668 if (m->m_flags & (M_BCAST | M_MCAST)) {
669 udpstat.udps_noportbcast++;
670 goto bad;
671 }
672#if ICMP_BANDLIM
673 if (badport_bandlim(BANDLIM_ICMP_UNREACH)) {
674 goto bad;
675 }
676#endif /* ICMP_BANDLIM */
677 if (blackhole) {
678 if (ifp && ifp->if_type != IFT_LOOP) {
679 goto bad;
680 }
681 }
682 *ip = save_ip;
683 ip->ip_len += iphlen;
684 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
685 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
686 return;
687 }
688 udp_lock(inp->inp_socket, 1, 0);
689
690 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
691 udp_unlock(inp->inp_socket, 1, 0);
692 IF_UDP_STATINC(ifp, cleanup);
693 goto bad;
694 }
695#if NECP
696 if (!necp_socket_is_allowed_to_send_recv_v4(inp, uh->uh_dport,
697 uh->uh_sport, &ip->ip_dst, &ip->ip_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) {
698 udp_unlock(inp->inp_socket, 1, 0);
699 IF_UDP_STATINC(ifp, badipsec);
700 goto bad;
701 }
702#endif /* NECP */
703
704 /*
705 * Construct sockaddr format source address.
706 * Stuff source address and datagram in user buffer.
707 */
708 udp_in.sin_port = uh->uh_sport;
709 udp_in.sin_addr = ip->ip_src;
710 if ((inp->inp_flags & INP_CONTROLOPTS) != 0 ||
711#if CONTENT_FILTER
712 /* Content Filter needs to see local address */
713 (inp->inp_socket->so_cfil_db != NULL) ||
714#endif
715 (inp->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
716 (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
717 (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
718 if (inp->inp_vflag & INP_IPV6) {
719 int savedflags;
720
721 ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
722 savedflags = inp->inp_flags;
723 inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
724 ret = ip6_savecontrol(inp, m, &opts);
725 inp->inp_flags = savedflags;
726 } else {
727 ret = ip_savecontrol(inp, &opts, ip, m);
728 }
729 if (ret != 0) {
730 udp_unlock(inp->inp_socket, 1, 0);
731 goto bad;
732 }
733 }
734 m_adj(m, iphlen + sizeof(struct udphdr));
735
736 KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport,
737 save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen);
738
739 if (inp->inp_vflag & INP_IPV6) {
740 in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
741 append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
742 } else {
743 append_sa = (struct sockaddr *)&udp_in;
744 }
745 if (nstat_collect) {
746 INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1);
747 INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, m->m_pkthdr.len);
748 inp_set_activity_bitmap(inp);
749 }
750 so_recv_data_stat(inp->inp_socket, m, 0);
751 if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa,
752 m, opts, NULL) == 0) {
753 udpstat.udps_fullsock++;
754 } else {
755 sorwakeup(inp->inp_socket);
756 }
757 udp_unlock(inp->inp_socket, 1, 0);
758 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
759 return;
760bad:
761 m_freem(m);
762 if (opts) {
763 m_freem(opts);
764 }
765 KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
766}
767
768static void
769ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
770{
771 bzero(ip6, sizeof(*ip6));
772
773 ip6->ip6_vfc = IPV6_VERSION;
774 ip6->ip6_plen = ip->ip_len;
775 ip6->ip6_nxt = ip->ip_p;
776 ip6->ip6_hlim = ip->ip_ttl;
777 if (ip->ip_src.s_addr) {
778 ip6->ip6_src.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
779 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
780 }
781 if (ip->ip_dst.s_addr) {
782 ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
783 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
784 }
785}
786
787/*
788 * subroutine of udp_input(), mainly for source code readability.
789 */
790static void
791udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
792 struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6,
793 struct udp_ip6 *pudp_ip6, struct ifnet *ifp)
794{
795 struct sockaddr *append_sa;
796 struct mbuf *opts = 0;
797 boolean_t cell = IFNET_IS_CELLULAR(ifp);
798 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
799 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
800 int ret = 0;
801
802 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
803#if CONTENT_FILTER
804 /* Content Filter needs to see local address */
805 (last->inp_socket->so_cfil_db != NULL) ||
806#endif
807 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
808 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
809 (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
810 if (last->inp_vflag & INP_IPV6) {
811 int savedflags;
812
813 if (pudp_ip6->uip6_init_done == 0) {
814 ip_2_ip6_hdr(&pudp_ip6->uip6_ip6, ip);
815 pudp_ip6->uip6_init_done = 1;
816 }
817 savedflags = last->inp_flags;
818 last->inp_flags &= ~INP_UNMAPPABLEOPTS;
819 ret = ip6_savecontrol(last, n, &opts);
820 if (ret != 0) {
821 last->inp_flags = savedflags;
822 goto error;
823 }
824 last->inp_flags = savedflags;
825 } else {
826 ret = ip_savecontrol(last, &opts, ip, n);
827 if (ret != 0) {
828 goto error;
829 }
830 }
831 }
832 if (last->inp_vflag & INP_IPV6) {
833 if (pudp_in6->uin6_init_done == 0) {
834 in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin);
835 pudp_in6->uin6_init_done = 1;
836 }
837 append_sa = (struct sockaddr *)&pudp_in6->uin6_sin;
838 } else {
839 append_sa = (struct sockaddr *)pudp_in;
840 }
841 if (nstat_collect) {
842 INP_ADD_STAT(last, cell, wifi, wired, rxpackets, 1);
843 INP_ADD_STAT(last, cell, wifi, wired, rxbytes,
844 n->m_pkthdr.len);
845 inp_set_activity_bitmap(last);
846 }
847 so_recv_data_stat(last->inp_socket, n, 0);
848 m_adj(n, off);
849 if (sbappendaddr(&last->inp_socket->so_rcv, append_sa,
850 n, opts, NULL) == 0) {
851 udpstat.udps_fullsock++;
852 } else {
853 sorwakeup(last->inp_socket);
854 }
855 return;
856error:
857 m_freem(n);
858 m_freem(opts);
859}
860
861/*
862 * Notify a udp user of an asynchronous error;
863 * just wake up so that he can collect error status.
864 */
865void
866udp_notify(struct inpcb *inp, int errno)
867{
868 inp->inp_socket->so_error = (u_short)errno;
869 sorwakeup(inp->inp_socket);
870 sowwakeup(inp->inp_socket);
871}
872
873void
874udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * ifp)
875{
876 struct ip *ip = vip;
877 void (*notify)(struct inpcb *, int) = udp_notify;
878 struct in_addr faddr;
879 struct inpcb *inp = NULL;
880
881 faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
882 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
883 return;
884 }
885
886 if (PRC_IS_REDIRECT(cmd)) {
887 ip = 0;
888 notify = in_rtchange;
889 } else if (cmd == PRC_HOSTDEAD) {
890 ip = 0;
891 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
892 return;
893 }
894 if (ip) {
895 struct udphdr uh;
896 struct icmp *icp = NULL;
897
898 bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof(uh));
899 inp = in_pcblookup_hash(&udbinfo, faddr, uh.uh_dport,
900 ip->ip_src, uh.uh_sport, 0, NULL);
901 icp = (struct icmp *)(void *)((caddr_t)ip - offsetof(struct icmp, icmp_ip));
902
903 if (inp != NULL && inp->inp_socket != NULL) {
904 udp_lock(inp->inp_socket, 1, 0);
905 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
906 WNT_STOPUSING) {
907 udp_unlock(inp->inp_socket, 1, 0);
908 return;
909 }
910 if (cmd == PRC_MSGSIZE && !uuid_is_null(inp->necp_client_uuid)) {
911 uuid_t null_uuid;
912 uuid_clear(null_uuid);
913 necp_update_flow_protoctl_event(null_uuid, inp->necp_client_uuid,
914 PRC_MSGSIZE, ntohs(icp->icmp_nextmtu), 0);
915 } else {
916 (*notify)(inp, inetctlerrmap[cmd]);
917 }
918 udp_unlock(inp->inp_socket, 1, 0);
919 }
920 } else {
921 in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
922 }
923}
924
925int
926udp_ctloutput(struct socket *so, struct sockopt *sopt)
927{
928 int error = 0, optval = 0;
929 struct inpcb *inp;
930
931 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
932 if (sopt->sopt_level != IPPROTO_UDP &&
933 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) {
934 return ip_ctloutput(so, sopt);
935 }
936
937 inp = sotoinpcb(so);
938
939 switch (sopt->sopt_dir) {
940 case SOPT_SET:
941 switch (sopt->sopt_name) {
942 case UDP_NOCKSUM:
943 /* This option is settable only for UDP over IPv4 */
944 if (!(inp->inp_vflag & INP_IPV4)) {
945 error = EINVAL;
946 break;
947 }
948
949 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
950 sizeof(optval))) != 0) {
951 break;
952 }
953
954 if (optval != 0) {
955 inp->inp_flags |= INP_UDP_NOCKSUM;
956 } else {
957 inp->inp_flags &= ~INP_UDP_NOCKSUM;
958 }
959 break;
960 case UDP_KEEPALIVE_OFFLOAD:
961 {
962 struct udp_keepalive_offload ka;
963 /*
964 * If the socket is not connected, the stack will
965 * not know the destination address to put in the
966 * keepalive datagram. Return an error now instead
967 * of failing later.
968 */
969 if (!(so->so_state & SS_ISCONNECTED)) {
970 error = EINVAL;
971 break;
972 }
973 if (sopt->sopt_valsize != sizeof(ka)) {
974 error = EINVAL;
975 break;
976 }
977 if ((error = sooptcopyin(sopt, &ka, sizeof(ka),
978 sizeof(ka))) != 0) {
979 break;
980 }
981
982 /* application should specify the type */
983 if (ka.ka_type == 0) {
984 return EINVAL;
985 }
986
987 if (ka.ka_interval == 0) {
988 /*
989 * if interval is 0, disable the offload
990 * mechanism
991 */
992 if (inp->inp_keepalive_data != NULL) {
993 FREE(inp->inp_keepalive_data,
994 M_TEMP);
995 }
996 inp->inp_keepalive_data = NULL;
997 inp->inp_keepalive_datalen = 0;
998 inp->inp_keepalive_interval = 0;
999 inp->inp_keepalive_type = 0;
1000 inp->inp_flags2 &= ~INP2_KEEPALIVE_OFFLOAD;
1001 } else {
1002 if (inp->inp_keepalive_data != NULL) {
1003 FREE(inp->inp_keepalive_data,
1004 M_TEMP);
1005 inp->inp_keepalive_data = NULL;
1006 }
1007
1008 inp->inp_keepalive_datalen = (uint8_t)min(
1009 ka.ka_data_len,
1010 UDP_KEEPALIVE_OFFLOAD_DATA_SIZE);
1011 if (inp->inp_keepalive_datalen > 0) {
1012 MALLOC(inp->inp_keepalive_data,
1013 u_int8_t *,
1014 inp->inp_keepalive_datalen,
1015 M_TEMP, M_WAITOK);
1016 if (inp->inp_keepalive_data == NULL) {
1017 inp->inp_keepalive_datalen = 0;
1018 error = ENOMEM;
1019 break;
1020 }
1021 bcopy(ka.ka_data,
1022 inp->inp_keepalive_data,
1023 inp->inp_keepalive_datalen);
1024 } else {
1025 inp->inp_keepalive_datalen = 0;
1026 }
1027 inp->inp_keepalive_interval = (uint8_t)
1028 min(UDP_KEEPALIVE_INTERVAL_MAX_SECONDS,
1029 ka.ka_interval);
1030 inp->inp_keepalive_type = ka.ka_type;
1031 inp->inp_flags2 |= INP2_KEEPALIVE_OFFLOAD;
1032 }
1033 break;
1034 }
1035 case SO_FLUSH:
1036 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
1037 sizeof(optval))) != 0) {
1038 break;
1039 }
1040
1041 error = inp_flush(inp, optval);
1042 break;
1043
1044 default:
1045 error = ENOPROTOOPT;
1046 break;
1047 }
1048 break;
1049
1050 case SOPT_GET:
1051 switch (sopt->sopt_name) {
1052 case UDP_NOCKSUM:
1053 optval = inp->inp_flags & INP_UDP_NOCKSUM;
1054 break;
1055
1056 default:
1057 error = ENOPROTOOPT;
1058 break;
1059 }
1060 if (error == 0) {
1061 error = sooptcopyout(sopt, &optval, sizeof(optval));
1062 }
1063 break;
1064 }
1065 return error;
1066}
1067
1068static int
1069udp_pcblist SYSCTL_HANDLER_ARGS
1070{
1071#pragma unused(oidp, arg1, arg2)
1072 int error, i, n;
1073 struct inpcb *inp, **inp_list;
1074 inp_gen_t gencnt;
1075 struct xinpgen xig;
1076
1077 /*
1078 * The process of preparing the TCB list is too time-consuming and
1079 * resource-intensive to repeat twice on every request.
1080 */
1081 lck_rw_lock_exclusive(udbinfo.ipi_lock);
1082 if (req->oldptr == USER_ADDR_NULL) {
1083 n = udbinfo.ipi_count;
1084 req->oldidx = 2 * (sizeof(xig))
1085 + (n + n / 8) * sizeof(struct xinpcb);
1086 lck_rw_done(udbinfo.ipi_lock);
1087 return 0;
1088 }
1089
1090 if (req->newptr != USER_ADDR_NULL) {
1091 lck_rw_done(udbinfo.ipi_lock);
1092 return EPERM;
1093 }
1094
1095 /*
1096 * OK, now we're committed to doing something.
1097 */
1098 gencnt = udbinfo.ipi_gencnt;
1099 n = udbinfo.ipi_count;
1100
1101 bzero(&xig, sizeof(xig));
1102 xig.xig_len = sizeof(xig);
1103 xig.xig_count = n;
1104 xig.xig_gen = gencnt;
1105 xig.xig_sogen = so_gencnt;
1106 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1107 if (error) {
1108 lck_rw_done(udbinfo.ipi_lock);
1109 return error;
1110 }
1111 /*
1112 * We are done if there is no pcb
1113 */
1114 if (n == 0) {
1115 lck_rw_done(udbinfo.ipi_lock);
1116 return 0;
1117 }
1118
1119 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1120 if (inp_list == 0) {
1121 lck_rw_done(udbinfo.ipi_lock);
1122 return ENOMEM;
1123 }
1124
1125 for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
1126 inp = LIST_NEXT(inp, inp_list)) {
1127 if (inp->inp_gencnt <= gencnt &&
1128 inp->inp_state != INPCB_STATE_DEAD) {
1129 inp_list[i++] = inp;
1130 }
1131 }
1132 n = i;
1133
1134 error = 0;
1135 for (i = 0; i < n; i++) {
1136 struct xinpcb xi;
1137
1138 inp = inp_list[i];
1139
1140 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1141 continue;
1142 }
1143 udp_lock(inp->inp_socket, 1, 0);
1144 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1145 udp_unlock(inp->inp_socket, 1, 0);
1146 continue;
1147 }
1148 if (inp->inp_gencnt > gencnt) {
1149 udp_unlock(inp->inp_socket, 1, 0);
1150 continue;
1151 }
1152
1153 bzero(&xi, sizeof(xi));
1154 xi.xi_len = sizeof(xi);
1155 /* XXX should avoid extra copy */
1156 inpcb_to_compat(inp, &xi.xi_inp);
1157 if (inp->inp_socket) {
1158 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1159 }
1160
1161 udp_unlock(inp->inp_socket, 1, 0);
1162
1163 error = SYSCTL_OUT(req, &xi, sizeof(xi));
1164 }
1165 if (!error) {
1166 /*
1167 * Give the user an updated idea of our state.
1168 * If the generation differs from what we told
1169 * her before, she knows that something happened
1170 * while we were processing this request, and it
1171 * might be necessary to retry.
1172 */
1173 bzero(&xig, sizeof(xig));
1174 xig.xig_len = sizeof(xig);
1175 xig.xig_gen = udbinfo.ipi_gencnt;
1176 xig.xig_sogen = so_gencnt;
1177 xig.xig_count = udbinfo.ipi_count;
1178 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1179 }
1180 FREE(inp_list, M_TEMP);
1181 lck_rw_done(udbinfo.ipi_lock);
1182 return error;
1183}
1184
1185SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
1186 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist,
1187 "S,xinpcb", "List of active UDP sockets");
1188
1189#if XNU_TARGET_OS_OSX
1190
1191static int
1192udp_pcblist64 SYSCTL_HANDLER_ARGS
1193{
1194#pragma unused(oidp, arg1, arg2)
1195 int error, i, n;
1196 struct inpcb *inp, **inp_list;
1197 inp_gen_t gencnt;
1198 struct xinpgen xig;
1199
1200 /*
1201 * The process of preparing the TCB list is too time-consuming and
1202 * resource-intensive to repeat twice on every request.
1203 */
1204 lck_rw_lock_shared(udbinfo.ipi_lock);
1205 if (req->oldptr == USER_ADDR_NULL) {
1206 n = udbinfo.ipi_count;
1207 req->oldidx =
1208 2 * (sizeof(xig)) + (n + n / 8) * sizeof(struct xinpcb64);
1209 lck_rw_done(udbinfo.ipi_lock);
1210 return 0;
1211 }
1212
1213 if (req->newptr != USER_ADDR_NULL) {
1214 lck_rw_done(udbinfo.ipi_lock);
1215 return EPERM;
1216 }
1217
1218 /*
1219 * OK, now we're committed to doing something.
1220 */
1221 gencnt = udbinfo.ipi_gencnt;
1222 n = udbinfo.ipi_count;
1223
1224 bzero(&xig, sizeof(xig));
1225 xig.xig_len = sizeof(xig);
1226 xig.xig_count = n;
1227 xig.xig_gen = gencnt;
1228 xig.xig_sogen = so_gencnt;
1229 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1230 if (error) {
1231 lck_rw_done(udbinfo.ipi_lock);
1232 return error;
1233 }
1234 /*
1235 * We are done if there is no pcb
1236 */
1237 if (n == 0) {
1238 lck_rw_done(udbinfo.ipi_lock);
1239 return 0;
1240 }
1241
1242 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1243 if (inp_list == 0) {
1244 lck_rw_done(udbinfo.ipi_lock);
1245 return ENOMEM;
1246 }
1247
1248 for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
1249 inp = LIST_NEXT(inp, inp_list)) {
1250 if (inp->inp_gencnt <= gencnt &&
1251 inp->inp_state != INPCB_STATE_DEAD) {
1252 inp_list[i++] = inp;
1253 }
1254 }
1255 n = i;
1256
1257 error = 0;
1258 for (i = 0; i < n; i++) {
1259 struct xinpcb64 xi;
1260
1261 inp = inp_list[i];
1262
1263 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1264 continue;
1265 }
1266 udp_lock(inp->inp_socket, 1, 0);
1267 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1268 udp_unlock(inp->inp_socket, 1, 0);
1269 continue;
1270 }
1271 if (inp->inp_gencnt > gencnt) {
1272 udp_unlock(inp->inp_socket, 1, 0);
1273 continue;
1274 }
1275
1276 bzero(&xi, sizeof(xi));
1277 xi.xi_len = sizeof(xi);
1278 inpcb_to_xinpcb64(inp, &xi);
1279 if (inp->inp_socket) {
1280 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1281 }
1282
1283 udp_unlock(inp->inp_socket, 1, 0);
1284
1285 error = SYSCTL_OUT(req, &xi, sizeof(xi));
1286 }
1287 if (!error) {
1288 /*
1289 * Give the user an updated idea of our state.
1290 * If the generation differs from what we told
1291 * her before, she knows that something happened
1292 * while we were processing this request, and it
1293 * might be necessary to retry.
1294 */
1295 bzero(&xig, sizeof(xig));
1296 xig.xig_len = sizeof(xig);
1297 xig.xig_gen = udbinfo.ipi_gencnt;
1298 xig.xig_sogen = so_gencnt;
1299 xig.xig_count = udbinfo.ipi_count;
1300 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1301 }
1302 FREE(inp_list, M_TEMP);
1303 lck_rw_done(udbinfo.ipi_lock);
1304 return error;
1305}
1306
1307SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64,
1308 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist64,
1309 "S,xinpcb64", "List of active UDP sockets");
1310
1311#endif /* XNU_TARGET_OS_OSX */
1312
1313static int
1314udp_pcblist_n SYSCTL_HANDLER_ARGS
1315{
1316#pragma unused(oidp, arg1, arg2)
1317 return get_pcblist_n(IPPROTO_UDP, req, &udbinfo);
1318}
1319
1320SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist_n,
1321 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist_n,
1322 "S,xinpcb_n", "List of active UDP sockets");
1323
1324__private_extern__ void
1325udp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags,
1326 bitstr_t *bitfield)
1327{
1328 inpcb_get_ports_used(ifindex, protocol, flags, bitfield,
1329 &udbinfo);
1330}
1331
1332__private_extern__ uint32_t
1333udp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
1334{
1335 return inpcb_count_opportunistic(ifindex, &udbinfo, flags);
1336}
1337
1338__private_extern__ uint32_t
1339udp_find_anypcb_byaddr(struct ifaddr *ifa)
1340{
1341 return inpcb_find_anypcb_byaddr(ifa, &udbinfo);
1342}
1343
1344static int
1345udp_check_pktinfo(struct mbuf *control, struct ifnet **outif,
1346 struct in_addr *laddr)
1347{
1348 struct cmsghdr *cm = 0;
1349 struct in_pktinfo *pktinfo;
1350 struct ifnet *ifp;
1351
1352 if (outif != NULL) {
1353 *outif = NULL;
1354 }
1355
1356 /*
1357 * XXX: Currently, we assume all the optional information is stored
1358 * in a single mbuf.
1359 */
1360 if (control->m_next) {
1361 return EINVAL;
1362 }
1363
1364 if (control->m_len < CMSG_LEN(0)) {
1365 return EINVAL;
1366 }
1367
1368 for (cm = M_FIRST_CMSGHDR(control);
1369 is_cmsg_valid(control, cm);
1370 cm = M_NXT_CMSGHDR(control, cm)) {
1371 if (cm->cmsg_level != IPPROTO_IP ||
1372 cm->cmsg_type != IP_PKTINFO) {
1373 continue;
1374 }
1375
1376 if (cm->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) {
1377 return EINVAL;
1378 }
1379
1380 pktinfo = (struct in_pktinfo *)(void *)CMSG_DATA(cm);
1381
1382 /* Check for a valid ifindex in pktinfo */
1383 ifnet_head_lock_shared();
1384
1385 if (pktinfo->ipi_ifindex > if_index) {
1386 ifnet_head_done();
1387 return ENXIO;
1388 }
1389
1390 /*
1391 * If ipi_ifindex is specified it takes precedence
1392 * over ipi_spec_dst.
1393 */
1394 if (pktinfo->ipi_ifindex) {
1395 ifp = ifindex2ifnet[pktinfo->ipi_ifindex];
1396 if (ifp == NULL) {
1397 ifnet_head_done();
1398 return ENXIO;
1399 }
1400 if (outif != NULL) {
1401 ifnet_reference(ifp);
1402 *outif = ifp;
1403 }
1404 ifnet_head_done();
1405 laddr->s_addr = INADDR_ANY;
1406 break;
1407 }
1408
1409 ifnet_head_done();
1410
1411 /*
1412 * Use the provided ipi_spec_dst address for temp
1413 * source address.
1414 */
1415 *laddr = pktinfo->ipi_spec_dst;
1416 break;
1417 }
1418 return 0;
1419}
1420
1421int
1422udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
1423 struct mbuf *control, struct proc *p)
1424{
1425 struct udpiphdr *ui;
1426 int len = m->m_pkthdr.len;
1427 struct sockaddr_in *sin;
1428 struct in_addr origladdr, laddr, faddr, pi_laddr;
1429 u_short lport, fport;
1430 int error = 0, udp_dodisconnect = 0, pktinfo = 0;
1431 struct socket *so = inp->inp_socket;
1432 int soopts = 0;
1433 struct mbuf *inpopts;
1434 struct ip_moptions *mopts;
1435 struct route ro;
1436 struct ip_out_args ipoa;
1437 bool sndinprog_cnt_used = false;
1438#if CONTENT_FILTER
1439 struct m_tag *cfil_tag = NULL;
1440 bool cfil_faddr_use = false;
1441 uint32_t cfil_so_state_change_cnt = 0;
1442 uint32_t cfil_so_options = 0;
1443 struct sockaddr *cfil_faddr = NULL;
1444#endif
1445 bool check_qos_marking_again = (so->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE) ? FALSE : TRUE;
1446
1447 bzero(&ipoa, sizeof(ipoa));
1448 ipoa.ipoa_boundif = IFSCOPE_NONE;
1449 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
1450
1451 struct ifnet *outif = NULL;
1452 struct flowadv *adv = &ipoa.ipoa_flowadv;
1453 int sotc = SO_TC_UNSPEC;
1454 int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1455 struct ifnet *origoutifp = NULL;
1456 int flowadv = 0;
1457 int tos = IPTOS_UNSPEC;
1458
1459 /* Enable flow advisory only when connected */
1460 flowadv = (so->so_state & SS_ISCONNECTED) ? 1 : 0;
1461 pi_laddr.s_addr = INADDR_ANY;
1462
1463 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1464
1465 socket_lock_assert_owned(so);
1466
1467#if CONTENT_FILTER
1468 /*
1469 * If socket is subject to UDP Content Filter and no addr is passed in,
1470 * retrieve CFIL saved state from mbuf and use it if necessary.
1471 */
1472 if (so->so_cfil_db && !addr) {
1473 cfil_tag = cfil_dgram_get_socket_state(m, &cfil_so_state_change_cnt, &cfil_so_options, &cfil_faddr, NULL);
1474 if (cfil_tag) {
1475 sin = (struct sockaddr_in *)(void *)cfil_faddr;
1476 if (inp && inp->inp_faddr.s_addr == INADDR_ANY) {
1477 /*
1478 * Socket is unconnected, simply use the saved faddr as 'addr' to go through
1479 * the connect/disconnect logic.
1480 */
1481 addr = (struct sockaddr *)cfil_faddr;
1482 } else if ((so->so_state_change_cnt != cfil_so_state_change_cnt) &&
1483 (inp->inp_fport != sin->sin_port ||
1484 inp->inp_faddr.s_addr != sin->sin_addr.s_addr)) {
1485 /*
1486 * Socket is connected but socket state and dest addr/port changed.
1487 * We need to use the saved faddr info.
1488 */
1489 cfil_faddr_use = true;
1490 }
1491 }
1492 }
1493#endif
1494
1495 if (control != NULL) {
1496 tos = so_tos_from_control(control);
1497 sotc = so_tc_from_control(control, &netsvctype);
1498 VERIFY(outif == NULL);
1499 error = udp_check_pktinfo(control, &outif, &pi_laddr);
1500 m_freem(control);
1501 control = NULL;
1502 if (error) {
1503 goto release;
1504 }
1505 pktinfo++;
1506 if (outif != NULL) {
1507 ipoa.ipoa_boundif = outif->if_index;
1508 }
1509 }
1510 if (sotc == SO_TC_UNSPEC) {
1511 sotc = so->so_traffic_class;
1512 netsvctype = so->so_netsvctype;
1513 }
1514
1515 KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
1516 inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
1517 (htons((u_short)len + sizeof(struct udphdr))));
1518
1519 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1520 error = EMSGSIZE;
1521 goto release;
1522 }
1523
1524 if (flowadv && INP_WAIT_FOR_IF_FEEDBACK(inp)) {
1525 /*
1526 * The socket is flow-controlled, drop the packets
1527 * until the inp is not flow controlled
1528 */
1529 error = ENOBUFS;
1530 goto release;
1531 }
1532 /*
1533 * If socket was bound to an ifindex, tell ip_output about it.
1534 * If the ancillary IP_PKTINFO option contains an interface index,
1535 * it takes precedence over the one specified by IP_BOUND_IF.
1536 */
1537 if (ipoa.ipoa_boundif == IFSCOPE_NONE &&
1538 (inp->inp_flags & INP_BOUND_IF)) {
1539 VERIFY(inp->inp_boundifp != NULL);
1540 ifnet_reference(inp->inp_boundifp); /* for this routine */
1541 if (outif != NULL) {
1542 ifnet_release(outif);
1543 }
1544 outif = inp->inp_boundifp;
1545 ipoa.ipoa_boundif = outif->if_index;
1546 }
1547 if (INP_NO_CELLULAR(inp)) {
1548 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1549 }
1550 if (INP_NO_EXPENSIVE(inp)) {
1551 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1552 }
1553 if (INP_NO_CONSTRAINED(inp)) {
1554 ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1555 }
1556 if (INP_AWDL_UNRESTRICTED(inp)) {
1557 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1558 }
1559 ipoa.ipoa_sotc = sotc;
1560 ipoa.ipoa_netsvctype = netsvctype;
1561 soopts |= IP_OUTARGS;
1562
1563 /*
1564 * If there was a routing change, discard cached route and check
1565 * that we have a valid source address. Reacquire a new source
1566 * address if INADDR_ANY was specified.
1567 *
1568 * If we are using cfil saved state, go through this cache cleanup
1569 * so that we can get a new route.
1570 */
1571 if (ROUTE_UNUSABLE(&inp->inp_route)
1572#if CONTENT_FILTER
1573 || cfil_faddr_use
1574#endif
1575 ) {
1576 struct in_ifaddr *ia = NULL;
1577
1578 ROUTE_RELEASE(&inp->inp_route);
1579
1580 /* src address is gone? */
1581 if (inp->inp_laddr.s_addr != INADDR_ANY &&
1582 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
1583 if (!(inp->inp_flags & INP_INADDR_ANY) ||
1584 (so->so_state & SS_ISCONNECTED)) {
1585 /*
1586 * Rdar://5448998
1587 * If the source address is gone, return an
1588 * error if:
1589 * - the source was specified
1590 * - the socket was already connected
1591 */
1592 soevent(so, (SO_FILT_HINT_LOCKED |
1593 SO_FILT_HINT_NOSRCADDR));
1594 error = EADDRNOTAVAIL;
1595 goto release;
1596 } else {
1597 /* new src will be set later */
1598 inp->inp_laddr.s_addr = INADDR_ANY;
1599 inp->inp_last_outifp = NULL;
1600 }
1601 }
1602 if (ia != NULL) {
1603 IFA_REMREF(&ia->ia_ifa);
1604 }
1605 }
1606
1607 /*
1608 * IP_PKTINFO option check. If a temporary scope or src address
1609 * is provided, use it for this packet only and make sure we forget
1610 * it after sending this datagram.
1611 */
1612 if (pi_laddr.s_addr != INADDR_ANY ||
1613 (ipoa.ipoa_boundif != IFSCOPE_NONE && pktinfo)) {
1614 /* temp src address for this datagram only */
1615 laddr = pi_laddr;
1616 origladdr.s_addr = INADDR_ANY;
1617 /* we don't want to keep the laddr or route */
1618 udp_dodisconnect = 1;
1619 /* remember we don't care about src addr */
1620 inp->inp_flags |= INP_INADDR_ANY;
1621 } else {
1622 origladdr = laddr = inp->inp_laddr;
1623 }
1624
1625 origoutifp = inp->inp_last_outifp;
1626 faddr = inp->inp_faddr;
1627 lport = inp->inp_lport;
1628 fport = inp->inp_fport;
1629
1630#if CONTENT_FILTER
1631 if (cfil_faddr_use) {
1632 faddr = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_addr;
1633 fport = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_port;
1634 }
1635#endif
1636 inp->inp_sndinprog_cnt++;
1637 sndinprog_cnt_used = true;
1638
1639 if (addr) {
1640 sin = (struct sockaddr_in *)(void *)addr;
1641 if (faddr.s_addr != INADDR_ANY) {
1642 error = EISCONN;
1643 goto release;
1644 }
1645 if (lport == 0) {
1646 /*
1647 * In case we don't have a local port set, go through
1648 * the full connect. We don't have a local port yet
1649 * (i.e., we can't be looked up), so it's not an issue
1650 * if the input runs at the same time we do this.
1651 */
1652 /* if we have a source address specified, use that */
1653 if (pi_laddr.s_addr != INADDR_ANY) {
1654 inp->inp_laddr = pi_laddr;
1655 }
1656 /*
1657 * If a scope is specified, use it. Scope from
1658 * IP_PKTINFO takes precendence over the the scope
1659 * set via INP_BOUND_IF.
1660 */
1661 error = in_pcbconnect(inp, addr, p, ipoa.ipoa_boundif,
1662 &outif);
1663 if (error) {
1664 goto release;
1665 }
1666
1667 laddr = inp->inp_laddr;
1668 lport = inp->inp_lport;
1669 faddr = inp->inp_faddr;
1670 fport = inp->inp_fport;
1671 udp_dodisconnect = 1;
1672
1673 /* synch up in case in_pcbladdr() overrides */
1674 if (outif != NULL && ipoa.ipoa_boundif != IFSCOPE_NONE) {
1675 ipoa.ipoa_boundif = outif->if_index;
1676 }
1677 } else {
1678 /*
1679 * Fast path case
1680 *
1681 * We have a full address and a local port; use those
1682 * info to build the packet without changing the pcb
1683 * and interfering with the input path. See 3851370.
1684 *
1685 * Scope from IP_PKTINFO takes precendence over the
1686 * the scope set via INP_BOUND_IF.
1687 */
1688 if (laddr.s_addr == INADDR_ANY) {
1689 if ((error = in_pcbladdr(inp, addr, &laddr,
1690 ipoa.ipoa_boundif, &outif, 0)) != 0) {
1691 goto release;
1692 }
1693 /*
1694 * from pcbconnect: remember we don't
1695 * care about src addr.
1696 */
1697 inp->inp_flags |= INP_INADDR_ANY;
1698
1699 /* synch up in case in_pcbladdr() overrides */
1700 if (outif != NULL &&
1701 ipoa.ipoa_boundif != IFSCOPE_NONE) {
1702 ipoa.ipoa_boundif = outif->if_index;
1703 }
1704 }
1705
1706 faddr = sin->sin_addr;
1707 fport = sin->sin_port;
1708 }
1709 } else {
1710 if (faddr.s_addr == INADDR_ANY) {
1711 error = ENOTCONN;
1712 goto release;
1713 }
1714 }
1715
1716 if (inp->inp_flowhash == 0) {
1717 inp->inp_flowhash = inp_calc_flowhash(inp);
1718 }
1719
1720 if (fport == htons(53) && !(so->so_flags1 & SOF1_DNS_COUNTED)) {
1721 so->so_flags1 |= SOF1_DNS_COUNTED;
1722 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_dns);
1723 }
1724
1725 /*
1726 * Calculate data length and get a mbuf
1727 * for UDP and IP headers.
1728 */
1729 M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT, 1);
1730 if (m == 0) {
1731 error = ENOBUFS;
1732 goto abort;
1733 }
1734
1735 /*
1736 * Fill in mbuf with extended UDP header
1737 * and addresses and length put into network format.
1738 */
1739 ui = mtod(m, struct udpiphdr *);
1740 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1741 ui->ui_pr = IPPROTO_UDP;
1742 ui->ui_src = laddr;
1743 ui->ui_dst = faddr;
1744 ui->ui_sport = lport;
1745 ui->ui_dport = fport;
1746 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1747
1748 /*
1749 * Set the Don't Fragment bit in the IP header.
1750 */
1751 if (inp->inp_flags2 & INP2_DONTFRAG) {
1752 struct ip *ip;
1753
1754 ip = (struct ip *)&ui->ui_i;
1755 ip->ip_off |= IP_DF;
1756 }
1757
1758 /*
1759 * Set up checksum to pseudo header checksum and output datagram.
1760 *
1761 * Treat flows to be CLAT46'd as IPv6 flow and compute checksum
1762 * no matter what, as IPv6 mandates checksum for UDP.
1763 *
1764 * Here we only compute the one's complement sum of the pseudo header.
1765 * The payload computation and final complement is delayed to much later
1766 * in IP processing to decide if remaining computation needs to be done
1767 * through offload.
1768 *
1769 * That is communicated by setting CSUM_UDP in csum_flags.
1770 * The offset of checksum from the start of ULP header is communicated
1771 * through csum_data.
1772 *
1773 * Note since this already contains the pseudo checksum header, any
1774 * later operation at IP layer that modify the values used here must
1775 * update the checksum as well (for example NAT etc).
1776 */
1777 if ((inp->inp_flags2 & INP2_CLAT46_FLOW) ||
1778 (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM))) {
1779 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
1780 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1781 m->m_pkthdr.csum_flags = (CSUM_UDP | CSUM_ZERO_INVERT);
1782 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1783 } else {
1784 ui->ui_sum = 0;
1785 }
1786 ((struct ip *)ui)->ip_len = (uint16_t)(sizeof(struct udpiphdr) + len);
1787 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1788 if (tos != IPTOS_UNSPEC) {
1789 ((struct ip *)ui)->ip_tos = (uint8_t)(tos & IPTOS_MASK);
1790 } else {
1791 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1792 }
1793 udpstat.udps_opackets++;
1794
1795 KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport,
1796 ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen);
1797
1798#if NECP
1799 {
1800 necp_kernel_policy_id policy_id;
1801 necp_kernel_policy_id skip_policy_id;
1802 u_int32_t route_rule_id;
1803 u_int32_t pass_flags;
1804
1805 /*
1806 * We need a route to perform NECP route rule checks
1807 */
1808 if (net_qos_policy_restricted != 0 &&
1809 ROUTE_UNUSABLE(&inp->inp_route)) {
1810 struct sockaddr_in to;
1811 struct sockaddr_in from;
1812
1813 ROUTE_RELEASE(&inp->inp_route);
1814
1815 bzero(&from, sizeof(struct sockaddr_in));
1816 from.sin_family = AF_INET;
1817 from.sin_len = sizeof(struct sockaddr_in);
1818 from.sin_addr = laddr;
1819
1820 bzero(&to, sizeof(struct sockaddr_in));
1821 to.sin_family = AF_INET;
1822 to.sin_len = sizeof(struct sockaddr_in);
1823 to.sin_addr = faddr;
1824
1825 inp->inp_route.ro_dst.sa_family = AF_INET;
1826 inp->inp_route.ro_dst.sa_len = sizeof(struct sockaddr_in);
1827 ((struct sockaddr_in *)(void *)&inp->inp_route.ro_dst)->sin_addr =
1828 faddr;
1829
1830 rtalloc_scoped(&inp->inp_route, ipoa.ipoa_boundif);
1831
1832 inp_update_necp_policy(inp, (struct sockaddr *)&from,
1833 (struct sockaddr *)&to, ipoa.ipoa_boundif);
1834 inp->inp_policyresult.results.qos_marking_gencount = 0;
1835 }
1836
1837 if (!necp_socket_is_allowed_to_send_recv_v4(inp, lport, fport,
1838 &laddr, &faddr, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) {
1839 error = EHOSTUNREACH;
1840 goto abort;
1841 }
1842
1843 necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id, pass_flags);
1844
1845 if (net_qos_policy_restricted != 0) {
1846 necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, route_rule_id);
1847 }
1848 }
1849#endif /* NECP */
1850 if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1851 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1852 }
1853 if (check_qos_marking_again) {
1854 ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY;
1855 }
1856 ipoa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount;
1857
1858#if IPSEC
1859 if (inp->inp_sp != NULL && ipsec_setsocket(m, inp->inp_socket) != 0) {
1860 error = ENOBUFS;
1861 goto abort;
1862 }
1863#endif /* IPSEC */
1864
1865 inpopts = inp->inp_options;
1866#if CONTENT_FILTER
1867 if (cfil_tag && (inp->inp_socket->so_options != cfil_so_options)) {
1868 soopts |= (cfil_so_options & (SO_DONTROUTE | SO_BROADCAST));
1869 } else
1870#endif
1871 soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST));
1872
1873 mopts = inp->inp_moptions;
1874 if (mopts != NULL) {
1875 IMO_LOCK(mopts);
1876 IMO_ADDREF_LOCKED(mopts);
1877 if (IN_MULTICAST(ntohl(ui->ui_dst.s_addr)) &&
1878 mopts->imo_multicast_ifp != NULL) {
1879 /* no reference needed */
1880 inp->inp_last_outifp = mopts->imo_multicast_ifp;
1881 }
1882 IMO_UNLOCK(mopts);
1883 }
1884
1885 /* Copy the cached route and take an extra reference */
1886 inp_route_copyout(inp, &ro);
1887
1888 set_packet_service_class(m, so, sotc, 0);
1889 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
1890 m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
1891 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
1892 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC);
1893 if (flowadv) {
1894 m->m_pkthdr.pkt_flags |= PKTF_FLOW_ADV;
1895 }
1896 m->m_pkthdr.tx_udp_pid = so->last_pid;
1897 if (so->so_flags & SOF_DELEGATED) {
1898 m->m_pkthdr.tx_udp_e_pid = so->e_pid;
1899 } else {
1900 m->m_pkthdr.tx_udp_e_pid = 0;
1901 }
1902
1903 if (ipoa.ipoa_boundif != IFSCOPE_NONE) {
1904 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1905 }
1906
1907 if (laddr.s_addr != INADDR_ANY) {
1908 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
1909 }
1910
1911 socket_unlock(so, 0);
1912 error = ip_output(m, inpopts, &ro, soopts, mopts, &ipoa);
1913 m = NULL;
1914 socket_lock(so, 0);
1915 if (mopts != NULL) {
1916 IMO_REMREF(mopts);
1917 }
1918
1919 if (check_qos_marking_again) {
1920 inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount;
1921
1922 if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) {
1923 inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
1924 } else {
1925 inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
1926 }
1927 }
1928
1929 if (error == 0 && nstat_collect) {
1930 boolean_t cell, wifi, wired;
1931
1932 if (ro.ro_rt != NULL) {
1933 cell = IFNET_IS_CELLULAR(ro.ro_rt->rt_ifp);
1934 wifi = (!cell && IFNET_IS_WIFI(ro.ro_rt->rt_ifp));
1935 wired = (!wifi && IFNET_IS_WIRED(ro.ro_rt->rt_ifp));
1936 } else {
1937 cell = wifi = wired = FALSE;
1938 }
1939 INP_ADD_STAT(inp, cell, wifi, wired, txpackets, 1);
1940 INP_ADD_STAT(inp, cell, wifi, wired, txbytes, len);
1941 inp_set_activity_bitmap(inp);
1942 }
1943
1944 if (flowadv && (adv->code == FADV_FLOW_CONTROLLED ||
1945 adv->code == FADV_SUSPENDED)) {
1946 /*
1947 * return a hint to the application that
1948 * the packet has been dropped
1949 */
1950 error = ENOBUFS;
1951 inp_set_fc_state(inp, adv->code);
1952 }
1953
1954 /* Synchronize PCB cached route */
1955 inp_route_copyin(inp, &ro);
1956
1957abort:
1958 if (udp_dodisconnect) {
1959 /* Always discard the cached route for unconnected socket */
1960 ROUTE_RELEASE(&inp->inp_route);
1961 in_pcbdisconnect(inp);
1962 inp->inp_laddr = origladdr; /* XXX rehash? */
1963 /* no reference needed */
1964 inp->inp_last_outifp = origoutifp;
1965 } else if (inp->inp_route.ro_rt != NULL) {
1966 struct rtentry *rt = inp->inp_route.ro_rt;
1967 struct ifnet *outifp;
1968
1969 if (rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) {
1970 rt = NULL; /* unusable */
1971 }
1972#if CONTENT_FILTER
1973 /*
1974 * Discard temporary route for cfil case
1975 */
1976 if (cfil_faddr_use) {
1977 rt = NULL; /* unusable */
1978 }
1979#endif
1980
1981 /*
1982 * Always discard if it is a multicast or broadcast route.
1983 */
1984 if (rt == NULL) {
1985 ROUTE_RELEASE(&inp->inp_route);
1986 }
1987
1988 /*
1989 * If the destination route is unicast, update outifp with
1990 * that of the route interface used by IP.
1991 */
1992 if (rt != NULL &&
1993 (outifp = rt->rt_ifp) != inp->inp_last_outifp) {
1994 inp->inp_last_outifp = outifp; /* no reference needed */
1995
1996 so->so_pktheadroom = (uint16_t)P2ROUNDUP(
1997 sizeof(struct udphdr) +
1998 sizeof(struct ip) +
1999 ifnet_hdrlen(outifp) +
2000 ifnet_mbuf_packetpreamblelen(outifp),
2001 sizeof(u_int32_t));
2002 }
2003 } else {
2004 ROUTE_RELEASE(&inp->inp_route);
2005 }
2006
2007 /*
2008 * If output interface was cellular/expensive, and this socket is
2009 * denied access to it, generate an event.
2010 */
2011 if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
2012 (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) {
2013 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
2014 }
2015
2016release:
2017 KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0);
2018
2019 if (m != NULL) {
2020 m_freem(m);
2021 }
2022
2023 if (outif != NULL) {
2024 ifnet_release(outif);
2025 }
2026
2027#if CONTENT_FILTER
2028 if (cfil_tag) {
2029 m_tag_free(cfil_tag);
2030 }
2031#endif
2032 if (sndinprog_cnt_used) {
2033 VERIFY(inp->inp_sndinprog_cnt > 0);
2034 if (--inp->inp_sndinprog_cnt == 0) {
2035 inp->inp_flags &= ~(INP_FC_FEEDBACK);
2036 if (inp->inp_sndingprog_waiters > 0) {
2037 wakeup(&inp->inp_sndinprog_cnt);
2038 }
2039 }
2040 sndinprog_cnt_used = false;
2041 }
2042
2043 return error;
2044}
2045
2046u_int32_t udp_sendspace = 9216; /* really max datagram size */
2047/* 187 1K datagrams (approx 192 KB) */
2048u_int32_t udp_recvspace = 187 * (1024 + sizeof(struct sockaddr_in6));
2049
2050/* Check that the values of udp send and recv space do not exceed sb_max */
2051static int
2052sysctl_udp_sospace(struct sysctl_oid *oidp, void *arg1, int arg2,
2053 struct sysctl_req *req)
2054{
2055#pragma unused(arg1, arg2)
2056 u_int32_t new_value = 0, *space_p = NULL;
2057 int changed = 0, error = 0;
2058 u_quad_t sb_effective_max = (sb_max / (MSIZE + MCLBYTES)) * MCLBYTES;
2059
2060 switch (oidp->oid_number) {
2061 case UDPCTL_RECVSPACE:
2062 space_p = &udp_recvspace;
2063 break;
2064 case UDPCTL_MAXDGRAM:
2065 space_p = &udp_sendspace;
2066 break;
2067 default:
2068 return EINVAL;
2069 }
2070 error = sysctl_io_number(req, *space_p, sizeof(u_int32_t),
2071 &new_value, &changed);
2072 if (changed) {
2073 if (new_value > 0 && new_value <= sb_effective_max) {
2074 *space_p = new_value;
2075 } else {
2076 error = ERANGE;
2077 }
2078 }
2079 return error;
2080}
2081
2082SYSCTL_PROC(_net_inet_udp, UDPCTL_RECVSPACE, recvspace,
2083 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_recvspace, 0,
2084 &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size");
2085
2086SYSCTL_PROC(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram,
2087 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_sendspace, 0,
2088 &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size");
2089
2090int
2091udp_abort(struct socket *so)
2092{
2093 struct inpcb *inp;
2094
2095 inp = sotoinpcb(so);
2096 if (inp == NULL) {
2097 panic("%s: so=%p null inp\n", __func__, so);
2098 /* NOTREACHED */
2099 }
2100 soisdisconnected(so);
2101 in_pcbdetach(inp);
2102 return 0;
2103}
2104
2105int
2106udp_attach(struct socket *so, int proto, struct proc *p)
2107{
2108#pragma unused(proto)
2109 struct inpcb *inp;
2110 int error;
2111
2112 inp = sotoinpcb(so);
2113 if (inp != NULL) {
2114 panic("%s so=%p inp=%p\n", __func__, so, inp);
2115 /* NOTREACHED */
2116 }
2117 error = in_pcballoc(so, &udbinfo, p);
2118 if (error != 0) {
2119 return error;
2120 }
2121 error = soreserve(so, udp_sendspace, udp_recvspace);
2122 if (error != 0) {
2123 return error;
2124 }
2125 inp = (struct inpcb *)so->so_pcb;
2126 inp->inp_vflag |= INP_IPV4;
2127 inp->inp_ip_ttl = (uint8_t)ip_defttl;
2128 if (nstat_collect) {
2129 nstat_udp_new_pcb(inp);
2130 }
2131 return 0;
2132}
2133
2134int
2135udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
2136{
2137 struct inpcb *inp;
2138 int error;
2139
2140 if (nam->sa_family != 0 && nam->sa_family != AF_INET &&
2141 nam->sa_family != AF_INET6) {
2142 return EAFNOSUPPORT;
2143 }
2144
2145 inp = sotoinpcb(so);
2146 if (inp == NULL) {
2147 return EINVAL;
2148 }
2149 error = in_pcbbind(inp, nam, p);
2150
2151#if NECP
2152 /* Update NECP client with bind result if not in middle of connect */
2153 if (error == 0 &&
2154 (inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS) &&
2155 !uuid_is_null(inp->necp_client_uuid)) {
2156 socket_unlock(so, 0);
2157 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2158 socket_lock(so, 0);
2159 }
2160#endif /* NECP */
2161
2162 return error;
2163}
2164
2165int
2166udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
2167{
2168 struct inpcb *inp;
2169 int error;
2170
2171 inp = sotoinpcb(so);
2172 if (inp == NULL) {
2173 return EINVAL;
2174 }
2175 if (inp->inp_faddr.s_addr != INADDR_ANY) {
2176 return EISCONN;
2177 }
2178
2179 if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
2180 so->so_flags1 |= SOF1_CONNECT_COUNTED;
2181 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
2182 }
2183
2184#if NECP
2185#if FLOW_DIVERT
2186 if (necp_socket_should_use_flow_divert(inp)) {
2187 error = flow_divert_pcb_init(so);
2188 if (error == 0) {
2189 error = flow_divert_connect_out(so, nam, p);
2190 }
2191 return error;
2192 }
2193#endif /* FLOW_DIVERT */
2194#endif /* NECP */
2195
2196 error = in_pcbconnect(inp, nam, p, IFSCOPE_NONE, NULL);
2197 if (error == 0) {
2198#if NECP
2199 /* Update NECP client with connected five-tuple */
2200 if (!uuid_is_null(inp->necp_client_uuid)) {
2201 socket_unlock(so, 0);
2202 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2203 socket_lock(so, 0);
2204 }
2205#endif /* NECP */
2206
2207 soisconnected(so);
2208 if (inp->inp_flowhash == 0) {
2209 inp->inp_flowhash = inp_calc_flowhash(inp);
2210 }
2211 }
2212 return error;
2213}
2214
2215int
2216udp_connectx_common(struct socket *so, int af, struct sockaddr *src, struct sockaddr *dst,
2217 struct proc *p, uint32_t ifscope, sae_associd_t aid, sae_connid_t *pcid,
2218 uint32_t flags, void *arg, uint32_t arglen,
2219 struct uio *uio, user_ssize_t *bytes_written)
2220{
2221#pragma unused(aid, flags, arg, arglen)
2222 struct inpcb *inp = sotoinpcb(so);
2223 int error = 0;
2224 user_ssize_t datalen = 0;
2225
2226 if (inp == NULL) {
2227 return EINVAL;
2228 }
2229
2230 VERIFY(dst != NULL);
2231
2232 ASSERT(!(inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS));
2233 inp->inp_flags2 |= INP2_CONNECT_IN_PROGRESS;
2234
2235#if NECP
2236 inp_update_necp_policy(inp, src, dst, ifscope);
2237#endif /* NECP */
2238
2239 /* bind socket to the specified interface, if requested */
2240 if (ifscope != IFSCOPE_NONE &&
2241 (error = inp_bindif(inp, ifscope, NULL)) != 0) {
2242 goto done;
2243 }
2244
2245 /* if source address and/or port is specified, bind to it */
2246 if (src != NULL) {
2247 error = sobindlock(so, src, 0); /* already locked */
2248 if (error != 0) {
2249 goto done;
2250 }
2251 }
2252
2253 switch (af) {
2254 case AF_INET:
2255 error = udp_connect(so, dst, p);
2256 break;
2257 case AF_INET6:
2258 error = udp6_connect(so, dst, p);
2259 break;
2260 default:
2261 VERIFY(0);
2262 /* NOTREACHED */
2263 }
2264
2265 if (error != 0) {
2266 goto done;
2267 }
2268
2269 /*
2270 * If there is data, copy it. DATA_IDEMPOTENT is ignored.
2271 * CONNECT_RESUME_ON_READ_WRITE is ignored.
2272 */
2273 if (uio != NULL) {
2274 socket_unlock(so, 0);
2275
2276 VERIFY(bytes_written != NULL);
2277
2278 datalen = uio_resid(uio);
2279 error = so->so_proto->pr_usrreqs->pru_sosend(so, NULL,
2280 (uio_t)uio, NULL, NULL, 0);
2281 socket_lock(so, 0);
2282
2283 /* If error returned is EMSGSIZE, for example, disconnect */
2284 if (error == 0 || error == EWOULDBLOCK) {
2285 *bytes_written = datalen - uio_resid(uio);
2286 } else {
2287 (void) so->so_proto->pr_usrreqs->pru_disconnectx(so,
2288 SAE_ASSOCID_ANY, SAE_CONNID_ANY);
2289 }
2290 /*
2291 * mask the EWOULDBLOCK error so that the caller
2292 * knows that atleast the connect was successful.
2293 */
2294 if (error == EWOULDBLOCK) {
2295 error = 0;
2296 }
2297 }
2298
2299 if (error == 0 && pcid != NULL) {
2300 *pcid = 1; /* there is only 1 connection for UDP */
2301 }
2302done:
2303 inp->inp_flags2 &= ~INP2_CONNECT_IN_PROGRESS;
2304 return error;
2305}
2306
2307int
2308udp_connectx(struct socket *so, struct sockaddr *src,
2309 struct sockaddr *dst, struct proc *p, uint32_t ifscope,
2310 sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
2311 uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written)
2312{
2313 return udp_connectx_common(so, AF_INET, src, dst,
2314 p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written);
2315}
2316
2317int
2318udp_detach(struct socket *so)
2319{
2320 struct inpcb *inp;
2321
2322 inp = sotoinpcb(so);
2323 if (inp == NULL) {
2324 panic("%s: so=%p null inp\n", __func__, so);
2325 /* NOTREACHED */
2326 }
2327
2328 /*
2329 * If this is a socket that does not want to wakeup the device
2330 * for it's traffic, the application might be waiting for
2331 * close to complete before going to sleep. Send a notification
2332 * for this kind of sockets
2333 */
2334 if (so->so_options & SO_NOWAKEFROMSLEEP) {
2335 socket_post_kev_msg_closed(so);
2336 }
2337
2338 in_pcbdetach(inp);
2339 inp->inp_state = INPCB_STATE_DEAD;
2340 return 0;
2341}
2342
2343int
2344udp_disconnect(struct socket *so)
2345{
2346 struct inpcb *inp;
2347
2348 inp = sotoinpcb(so);
2349 if (inp == NULL) {
2350 return EINVAL;
2351 }
2352 if (inp->inp_faddr.s_addr == INADDR_ANY) {
2353 return ENOTCONN;
2354 }
2355
2356 in_pcbdisconnect(inp);
2357
2358 /* reset flow controlled state, just in case */
2359 inp_reset_fc_state(inp);
2360
2361 inp->inp_laddr.s_addr = INADDR_ANY;
2362 so->so_state &= ~SS_ISCONNECTED; /* XXX */
2363 inp->inp_last_outifp = NULL;
2364
2365 return 0;
2366}
2367
2368int
2369udp_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid)
2370{
2371#pragma unused(cid)
2372 if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) {
2373 return EINVAL;
2374 }
2375
2376 return udp_disconnect(so);
2377}
2378
2379int
2380udp_send(struct socket *so, int flags, struct mbuf *m,
2381 struct sockaddr *addr, struct mbuf *control, struct proc *p)
2382{
2383#ifndef FLOW_DIVERT
2384#pragma unused(flags)
2385#endif /* !(FLOW_DIVERT) */
2386 struct inpcb *inp;
2387
2388 inp = sotoinpcb(so);
2389 if (inp == NULL) {
2390 if (m != NULL) {
2391 m_freem(m);
2392 }
2393 if (control != NULL) {
2394 m_freem(control);
2395 }
2396 return EINVAL;
2397 }
2398
2399#if NECP
2400#if FLOW_DIVERT
2401 if (necp_socket_should_use_flow_divert(inp)) {
2402 /* Implicit connect */
2403 return flow_divert_implicit_data_out(so, flags, m, addr,
2404 control, p);
2405 }
2406#endif /* FLOW_DIVERT */
2407#endif /* NECP */
2408
2409 return udp_output(inp, m, addr, control, p);
2410}
2411
2412int
2413udp_shutdown(struct socket *so)
2414{
2415 struct inpcb *inp;
2416
2417 inp = sotoinpcb(so);
2418 if (inp == NULL) {
2419 return EINVAL;
2420 }
2421 socantsendmore(so);
2422 return 0;
2423}
2424
2425int
2426udp_lock(struct socket *so, int refcount, void *debug)
2427{
2428 void *lr_saved;
2429
2430 if (debug == NULL) {
2431 lr_saved = __builtin_return_address(0);
2432 } else {
2433 lr_saved = debug;
2434 }
2435
2436 if (so->so_pcb != NULL) {
2437 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
2438 LCK_MTX_ASSERT_NOTOWNED);
2439 lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
2440 } else {
2441 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2442 so, lr_saved, solockhistory_nr(so));
2443 /* NOTREACHED */
2444 }
2445 if (refcount) {
2446 so->so_usecount++;
2447 }
2448
2449 so->lock_lr[so->next_lock_lr] = lr_saved;
2450 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2451 return 0;
2452}
2453
2454int
2455udp_unlock(struct socket *so, int refcount, void *debug)
2456{
2457 void *lr_saved;
2458
2459 if (debug == NULL) {
2460 lr_saved = __builtin_return_address(0);
2461 } else {
2462 lr_saved = debug;
2463 }
2464
2465 if (refcount) {
2466 VERIFY(so->so_usecount > 0);
2467 so->so_usecount--;
2468 }
2469 if (so->so_pcb == NULL) {
2470 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
2471 so, lr_saved, solockhistory_nr(so));
2472 /* NOTREACHED */
2473 } else {
2474 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
2475 LCK_MTX_ASSERT_OWNED);
2476 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2477 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2478 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
2479 }
2480 return 0;
2481}
2482
2483lck_mtx_t *
2484udp_getlock(struct socket *so, int flags)
2485{
2486#pragma unused(flags)
2487 struct inpcb *inp = sotoinpcb(so);
2488
2489 if (so->so_pcb == NULL) {
2490 panic("%s: so=%p NULL so_pcb lrh= %s\n", __func__,
2491 so, solockhistory_nr(so));
2492 /* NOTREACHED */
2493 }
2494 return &inp->inpcb_mtx;
2495}
2496
2497/*
2498 * UDP garbage collector callback (inpcb_timer_func_t).
2499 *
2500 * Returns > 0 to keep timer active.
2501 */
2502static void
2503udp_gc(struct inpcbinfo *ipi)
2504{
2505 struct inpcb *inp, *inpnxt;
2506 struct socket *so;
2507
2508 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
2509 if (udp_gc_done == TRUE) {
2510 udp_gc_done = FALSE;
2511 /* couldn't get the lock, must lock next time */
2512 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2513 return;
2514 }
2515 lck_rw_lock_exclusive(ipi->ipi_lock);
2516 }
2517
2518 udp_gc_done = TRUE;
2519
2520 for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
2521 inpnxt = inp->inp_list.le_next;
2522
2523 /*
2524 * Skip unless it's STOPUSING; garbage collector will
2525 * be triggered by in_pcb_checkstate() upon setting
2526 * wantcnt to that value. If the PCB is already dead,
2527 * keep gc active to anticipate wantcnt changing.
2528 */
2529 if (inp->inp_wantcnt != WNT_STOPUSING) {
2530 continue;
2531 }
2532
2533 /*
2534 * Skip if busy, no hurry for cleanup. Keep gc active
2535 * and try the lock again during next round.
2536 */
2537 if (!socket_try_lock(inp->inp_socket)) {
2538 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2539 continue;
2540 }
2541
2542 /*
2543 * Keep gc active unless usecount is 0.
2544 */
2545 so = inp->inp_socket;
2546 if (so->so_usecount == 0) {
2547 if (inp->inp_state != INPCB_STATE_DEAD) {
2548 if (SOCK_CHECK_DOM(so, PF_INET6)) {
2549 in6_pcbdetach(inp);
2550 } else {
2551 in_pcbdetach(inp);
2552 }
2553 }
2554 in_pcbdispose(inp);
2555 } else {
2556 socket_unlock(so, 0);
2557 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
2558 }
2559 }
2560 lck_rw_done(ipi->ipi_lock);
2561}
2562
2563static int
2564udp_getstat SYSCTL_HANDLER_ARGS
2565{
2566#pragma unused(oidp, arg1, arg2)
2567 if (req->oldptr == USER_ADDR_NULL) {
2568 req->oldlen = (size_t)sizeof(struct udpstat);
2569 }
2570
2571 return SYSCTL_OUT(req, &udpstat, MIN(sizeof(udpstat), req->oldlen));
2572}
2573
2574void
2575udp_in_cksum_stats(u_int32_t len)
2576{
2577 udpstat.udps_rcv_swcsum++;
2578 udpstat.udps_rcv_swcsum_bytes += len;
2579}
2580
2581void
2582udp_out_cksum_stats(u_int32_t len)
2583{
2584 udpstat.udps_snd_swcsum++;
2585 udpstat.udps_snd_swcsum_bytes += len;
2586}
2587
2588void
2589udp_in6_cksum_stats(u_int32_t len)
2590{
2591 udpstat.udps_rcv6_swcsum++;
2592 udpstat.udps_rcv6_swcsum_bytes += len;
2593}
2594
2595void
2596udp_out6_cksum_stats(u_int32_t len)
2597{
2598 udpstat.udps_snd6_swcsum++;
2599 udpstat.udps_snd6_swcsum_bytes += len;
2600}
2601
2602/*
2603 * Checksum extended UDP header and data.
2604 */
2605static int
2606udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen)
2607{
2608 struct ifnet *ifp = m->m_pkthdr.rcvif;
2609 struct ip *ip = mtod(m, struct ip *);
2610 struct ipovly *ipov = (struct ipovly *)ip;
2611
2612 if (uh->uh_sum == 0) {
2613 udpstat.udps_nosum++;
2614 return 0;
2615 }
2616
2617 /* ip_stripoptions() must have been called before we get here */
2618 ASSERT((ip->ip_hl << 2) == sizeof(*ip));
2619
2620 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
2621 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
2622 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
2623 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
2624 uh->uh_sum = m->m_pkthdr.csum_rx_val;
2625 } else {
2626 uint32_t sum = m->m_pkthdr.csum_rx_val;
2627 uint32_t start = m->m_pkthdr.csum_rx_start;
2628 int32_t trailer = (m_pktlen(m) - (off + ulen));
2629
2630 /*
2631 * Perform 1's complement adjustment of octets
2632 * that got included/excluded in the hardware-
2633 * calculated checksum value. Ignore cases
2634 * where the value already includes the entire
2635 * IP header span, as the sum for those octets
2636 * would already be 0 by the time we get here;
2637 * IP has already performed its header checksum
2638 * checks. If we do need to adjust, restore
2639 * the original fields in the IP header when
2640 * computing the adjustment value. Also take
2641 * care of any trailing bytes and subtract out
2642 * their partial sum.
2643 */
2644 ASSERT(trailer >= 0);
2645 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
2646 ((start != 0 && start != off) || trailer != 0)) {
2647 uint32_t swbytes = (uint32_t)trailer;
2648
2649 if (start < off) {
2650 ip->ip_len += sizeof(*ip);
2651#if BYTE_ORDER != BIG_ENDIAN
2652 HTONS(ip->ip_len);
2653 HTONS(ip->ip_off);
2654#endif /* BYTE_ORDER != BIG_ENDIAN */
2655 }
2656 /* callee folds in sum */
2657 sum = m_adj_sum16(m, start, off, ulen, sum);
2658 if (off > start) {
2659 swbytes += (off - start);
2660 } else {
2661 swbytes += (start - off);
2662 }
2663
2664 if (start < off) {
2665#if BYTE_ORDER != BIG_ENDIAN
2666 NTOHS(ip->ip_off);
2667 NTOHS(ip->ip_len);
2668#endif /* BYTE_ORDER != BIG_ENDIAN */
2669 ip->ip_len -= sizeof(*ip);
2670 }
2671
2672 if (swbytes != 0) {
2673 udp_in_cksum_stats(swbytes);
2674 }
2675 if (trailer != 0) {
2676 m_adj(m, -trailer);
2677 }
2678 }
2679
2680 /* callee folds in sum */
2681 uh->uh_sum = in_pseudo(ip->ip_src.s_addr,
2682 ip->ip_dst.s_addr, sum + htonl(ulen + IPPROTO_UDP));
2683 }
2684 uh->uh_sum ^= 0xffff;
2685 } else {
2686 uint16_t ip_sum;
2687 char b[9];
2688
2689 bcopy(ipov->ih_x1, b, sizeof(ipov->ih_x1));
2690 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
2691 ip_sum = ipov->ih_len;
2692 ipov->ih_len = uh->uh_ulen;
2693 uh->uh_sum = in_cksum(m, ulen + sizeof(struct ip));
2694 bcopy(b, ipov->ih_x1, sizeof(ipov->ih_x1));
2695 ipov->ih_len = ip_sum;
2696
2697 udp_in_cksum_stats(ulen);
2698 }
2699
2700 if (uh->uh_sum != 0) {
2701 udpstat.udps_badsum++;
2702 IF_UDP_STATINC(ifp, badchksum);
2703 return -1;
2704 }
2705
2706 return 0;
2707}
2708
2709void
2710udp_fill_keepalive_offload_frames(ifnet_t ifp,
2711 struct ifnet_keepalive_offload_frame *frames_array,
2712 u_int32_t frames_array_count, size_t frame_data_offset,
2713 u_int32_t *used_frames_count)
2714{
2715 struct inpcb *inp;
2716 inp_gen_t gencnt;
2717 u_int32_t frame_index = *used_frames_count;
2718
2719 if (ifp == NULL || frames_array == NULL ||
2720 frames_array_count == 0 ||
2721 frame_index >= frames_array_count ||
2722 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2723 return;
2724 }
2725
2726 lck_rw_lock_shared(udbinfo.ipi_lock);
2727 gencnt = udbinfo.ipi_gencnt;
2728 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
2729 struct socket *so;
2730 u_int8_t *data;
2731 struct ifnet_keepalive_offload_frame *frame;
2732 struct mbuf *m = NULL;
2733
2734 if (frame_index >= frames_array_count) {
2735 break;
2736 }
2737
2738 if (inp->inp_gencnt > gencnt ||
2739 inp->inp_state == INPCB_STATE_DEAD) {
2740 continue;
2741 }
2742
2743 if ((so = inp->inp_socket) == NULL ||
2744 (so->so_state & SS_DEFUNCT)) {
2745 continue;
2746 }
2747 /*
2748 * check for keepalive offload flag without socket
2749 * lock to avoid a deadlock
2750 */
2751 if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
2752 continue;
2753 }
2754
2755 udp_lock(so, 1, 0);
2756 if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
2757 udp_unlock(so, 1, 0);
2758 continue;
2759 }
2760 if ((inp->inp_vflag & INP_IPV4) &&
2761 (inp->inp_laddr.s_addr == INADDR_ANY ||
2762 inp->inp_faddr.s_addr == INADDR_ANY)) {
2763 udp_unlock(so, 1, 0);
2764 continue;
2765 }
2766 if ((inp->inp_vflag & INP_IPV6) &&
2767 (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
2768 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
2769 udp_unlock(so, 1, 0);
2770 continue;
2771 }
2772 if (inp->inp_lport == 0 || inp->inp_fport == 0) {
2773 udp_unlock(so, 1, 0);
2774 continue;
2775 }
2776 if (inp->inp_last_outifp == NULL ||
2777 inp->inp_last_outifp->if_index != ifp->if_index) {
2778 udp_unlock(so, 1, 0);
2779 continue;
2780 }
2781 if ((inp->inp_vflag & INP_IPV4)) {
2782 if ((frame_data_offset + sizeof(struct udpiphdr) +
2783 inp->inp_keepalive_datalen) >
2784 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2785 udp_unlock(so, 1, 0);
2786 continue;
2787 }
2788 if ((sizeof(struct udpiphdr) +
2789 inp->inp_keepalive_datalen) > _MHLEN) {
2790 udp_unlock(so, 1, 0);
2791 continue;
2792 }
2793 } else {
2794 if ((frame_data_offset + sizeof(struct ip6_hdr) +
2795 sizeof(struct udphdr) +
2796 inp->inp_keepalive_datalen) >
2797 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
2798 udp_unlock(so, 1, 0);
2799 continue;
2800 }
2801 if ((sizeof(struct ip6_hdr) + sizeof(struct udphdr) +
2802 inp->inp_keepalive_datalen) > _MHLEN) {
2803 udp_unlock(so, 1, 0);
2804 continue;
2805 }
2806 }
2807 MGETHDR(m, M_WAIT, MT_HEADER);
2808 if (m == NULL) {
2809 udp_unlock(so, 1, 0);
2810 continue;
2811 }
2812 /*
2813 * This inp has all the information that is needed to
2814 * generate an offload frame.
2815 */
2816 if (inp->inp_vflag & INP_IPV4) {
2817 struct ip *ip;
2818 struct udphdr *udp;
2819
2820 frame = &frames_array[frame_index];
2821 frame->length = (uint8_t)(frame_data_offset +
2822 sizeof(struct udpiphdr) +
2823 inp->inp_keepalive_datalen);
2824 frame->ether_type =
2825 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
2826 frame->interval = inp->inp_keepalive_interval;
2827 switch (inp->inp_keepalive_type) {
2828 case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY:
2829 frame->type =
2830 IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY;
2831 break;
2832 default:
2833 break;
2834 }
2835 data = mtod(m, u_int8_t *);
2836 bzero(data, sizeof(struct udpiphdr));
2837 ip = (__typeof__(ip))(void *)data;
2838 udp = (__typeof__(udp))(void *) (data +
2839 sizeof(struct ip));
2840 m->m_len = sizeof(struct udpiphdr);
2841 data = data + sizeof(struct udpiphdr);
2842 if (inp->inp_keepalive_datalen > 0 &&
2843 inp->inp_keepalive_data != NULL) {
2844 bcopy(inp->inp_keepalive_data, data,
2845 inp->inp_keepalive_datalen);
2846 m->m_len += inp->inp_keepalive_datalen;
2847 }
2848 m->m_pkthdr.len = m->m_len;
2849
2850 ip->ip_v = IPVERSION;
2851 ip->ip_hl = (sizeof(struct ip) >> 2);
2852 ip->ip_p = IPPROTO_UDP;
2853 ip->ip_len = htons(sizeof(struct udpiphdr) +
2854 (u_short)inp->inp_keepalive_datalen);
2855 ip->ip_ttl = inp->inp_ip_ttl;
2856 ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
2857 ip->ip_src = inp->inp_laddr;
2858 ip->ip_dst = inp->inp_faddr;
2859 ip->ip_sum = in_cksum_hdr_opt(ip);
2860
2861 udp->uh_sport = inp->inp_lport;
2862 udp->uh_dport = inp->inp_fport;
2863 udp->uh_ulen = htons(sizeof(struct udphdr) +
2864 (u_short)inp->inp_keepalive_datalen);
2865
2866 if (!(inp->inp_flags & INP_UDP_NOCKSUM)) {
2867 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
2868 ip->ip_dst.s_addr,
2869 htons(sizeof(struct udphdr) +
2870 (u_short)inp->inp_keepalive_datalen +
2871 IPPROTO_UDP));
2872 m->m_pkthdr.csum_flags =
2873 (CSUM_UDP | CSUM_ZERO_INVERT);
2874 m->m_pkthdr.csum_data = offsetof(struct udphdr,
2875 uh_sum);
2876 }
2877 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
2878 in_delayed_cksum(m);
2879 bcopy(m->m_data, frame->data + frame_data_offset,
2880 m->m_len);
2881 } else {
2882 struct ip6_hdr *ip6;
2883 struct udphdr *udp6;
2884
2885 VERIFY(inp->inp_vflag & INP_IPV6);
2886 frame = &frames_array[frame_index];
2887 frame->length = (uint8_t)(frame_data_offset +
2888 sizeof(struct ip6_hdr) +
2889 sizeof(struct udphdr) +
2890 inp->inp_keepalive_datalen);
2891 frame->ether_type =
2892 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
2893 frame->interval = inp->inp_keepalive_interval;
2894 switch (inp->inp_keepalive_type) {
2895 case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY:
2896 frame->type =
2897 IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY;
2898 break;
2899 default:
2900 break;
2901 }
2902 data = mtod(m, u_int8_t *);
2903 bzero(data, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
2904 ip6 = (__typeof__(ip6))(void *)data;
2905 udp6 = (__typeof__(udp6))(void *)(data +
2906 sizeof(struct ip6_hdr));
2907 m->m_len = sizeof(struct ip6_hdr) +
2908 sizeof(struct udphdr);
2909 data = data + (sizeof(struct ip6_hdr) +
2910 sizeof(struct udphdr));
2911 if (inp->inp_keepalive_datalen > 0 &&
2912 inp->inp_keepalive_data != NULL) {
2913 bcopy(inp->inp_keepalive_data, data,
2914 inp->inp_keepalive_datalen);
2915 m->m_len += inp->inp_keepalive_datalen;
2916 }
2917 m->m_pkthdr.len = m->m_len;
2918 ip6->ip6_flow = inp->inp_flow & IPV6_FLOWINFO_MASK;
2919 ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
2920 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2921 ip6->ip6_vfc |= IPV6_VERSION;
2922 ip6->ip6_nxt = IPPROTO_UDP;
2923 ip6->ip6_hlim = (uint8_t)ip6_defhlim;
2924 ip6->ip6_plen = htons(sizeof(struct udphdr) +
2925 (u_short)inp->inp_keepalive_datalen);
2926 ip6->ip6_src = inp->in6p_laddr;
2927 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
2928 ip6->ip6_src.s6_addr16[1] = 0;
2929 }
2930
2931 ip6->ip6_dst = inp->in6p_faddr;
2932 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
2933 ip6->ip6_dst.s6_addr16[1] = 0;
2934 }
2935
2936 udp6->uh_sport = inp->in6p_lport;
2937 udp6->uh_dport = inp->in6p_fport;
2938 udp6->uh_ulen = htons(sizeof(struct udphdr) +
2939 (u_short)inp->inp_keepalive_datalen);
2940 if (!(inp->inp_flags & INP_UDP_NOCKSUM)) {
2941 udp6->uh_sum = in6_pseudo(&ip6->ip6_src,
2942 &ip6->ip6_dst,
2943 htonl(sizeof(struct udphdr) +
2944 (u_short)inp->inp_keepalive_datalen +
2945 IPPROTO_UDP));
2946 m->m_pkthdr.csum_flags =
2947 (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
2948 m->m_pkthdr.csum_data = offsetof(struct udphdr,
2949 uh_sum);
2950 }
2951 m->m_pkthdr.pkt_proto = IPPROTO_UDP;
2952 in6_delayed_cksum(m);
2953 bcopy(m->m_data, frame->data + frame_data_offset,
2954 m->m_len);
2955 }
2956 if (m != NULL) {
2957 m_freem(m);
2958 m = NULL;
2959 }
2960 frame_index++;
2961 udp_unlock(so, 1, 0);
2962 }
2963 lck_rw_done(udbinfo.ipi_lock);
2964 *used_frames_count = frame_index;
2965}