2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 
  30  *      The Regents of the University of California.  All rights reserved. 
  32  * Redistribution and use in source and binary forms, with or without 
  33  * modification, are permitted provided that the following conditions 
  35  * 1. Redistributions of source code must retain the above copyright 
  36  *    notice, this list of conditions and the following disclaimer. 
  37  * 2. Redistributions in binary form must reproduce the above copyright 
  38  *    notice, this list of conditions and the following disclaimer in the 
  39  *    documentation and/or other materials provided with the distribution. 
  40  * 3. All advertising materials mentioning features or use of this software 
  41  *    must display the following acknowledgement: 
  42  *      This product includes software developed by the University of 
  43  *      California, Berkeley and its contributors. 
  44  * 4. Neither the name of the University nor the names of its contributors 
  45  *    may be used to endorse or promote products derived from this software 
  46  *    without specific prior written permission. 
  48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
  49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
  52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  60  *      @(#)udp_usrreq.c        8.6 (Berkeley) 5/23/95 
  63 #include <sys/param.h> 
  64 #include <sys/systm.h> 
  65 #include <sys/kernel.h> 
  66 #include <sys/malloc.h> 
  68 #include <sys/domain.h> 
  69 #include <sys/protosw.h> 
  70 #include <sys/socket.h> 
  71 #include <sys/socketvar.h> 
  72 #include <sys/sysctl.h> 
  73 #include <sys/syslog.h> 
  74 #include <sys/mcache.h> 
  75 #include <net/ntstat.h> 
  77 #include <kern/zalloc.h> 
  78 #include <mach/boolean.h> 
  81 #include <net/if_types.h> 
  82 #include <net/route.h> 
  84 #include <net/net_api_stats.h> 
  86 #include <netinet/in.h> 
  87 #include <netinet/in_systm.h> 
  88 #include <netinet/in_tclass.h> 
  89 #include <netinet/ip.h> 
  91 #include <netinet/ip6.h> 
  93 #include <netinet/in_pcb.h> 
  94 #include <netinet/in_var.h> 
  95 #include <netinet/ip_var.h> 
  97 #include <netinet6/in6_pcb.h> 
  98 #include <netinet6/ip6_var.h> 
  99 #include <netinet6/udp6_var.h> 
 101 #include <netinet/ip_icmp.h> 
 102 #include <netinet/icmp_var.h> 
 103 #include <netinet/udp.h> 
 104 #include <netinet/udp_var.h> 
 105 #include <sys/kdebug.h> 
 108 #include <netinet6/ipsec.h> 
 109 #include <netinet6/esp.h> 
 110 extern int ipsec_bypass
; 
 111 extern int esp_udp_encap_port
; 
 115 #include <net/necp.h> 
 119 #include <netinet/flow_divert.h> 
 120 #endif /* FLOW_DIVERT */ 
 123 #include <net/content_filter.h> 
 124 #endif /* CONTENT_FILTER */ 
 126 #define DBG_LAYER_IN_BEG        NETDBG_CODE(DBG_NETUDP, 0) 
 127 #define DBG_LAYER_IN_END        NETDBG_CODE(DBG_NETUDP, 2) 
 128 #define DBG_LAYER_OUT_BEG       NETDBG_CODE(DBG_NETUDP, 1) 
 129 #define DBG_LAYER_OUT_END       NETDBG_CODE(DBG_NETUDP, 3) 
 130 #define DBG_FNC_UDP_INPUT       NETDBG_CODE(DBG_NETUDP, (5 << 8)) 
 131 #define DBG_FNC_UDP_OUTPUT      NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1) 
 134  * UDP protocol implementation. 
 135  * Per RFC 768, August, 1980. 
 138 static int udpcksum 
= 1; 
 140 static int udpcksum 
= 0;                /* XXX */ 
 142 SYSCTL_INT(_net_inet_udp
, UDPCTL_CHECKSUM
, checksum
, 
 143     CTLFLAG_RW 
| CTLFLAG_LOCKED
, &udpcksum
, 0, ""); 
 145 int udp_log_in_vain 
= 0; 
 146 SYSCTL_INT(_net_inet_udp
, OID_AUTO
, log_in_vain
, CTLFLAG_RW 
| CTLFLAG_LOCKED
, 
 147     &udp_log_in_vain
, 0, "Log all incoming UDP packets"); 
 149 static int blackhole 
= 0; 
 150 SYSCTL_INT(_net_inet_udp
, OID_AUTO
, blackhole
, CTLFLAG_RW 
| CTLFLAG_LOCKED
, 
 151     &blackhole
, 0, "Do not send port unreachables for refused connects"); 
 153 struct inpcbhead udb
;           /* from udp_var.h */ 
 154 #define udb6    udb  /* for KAME src sync over BSD*'s */ 
 155 struct inpcbinfo udbinfo
; 
 158 #define UDBHASHSIZE 16 
 161 /* Garbage collection performed during most recent udp_gc() run */ 
 162 static boolean_t udp_gc_done 
= FALSE
; 
 165 extern int fw_verbose
; 
 166 extern void ipfwsyslog(int level
, const char *format
, ...); 
 167 extern void ipfw_stealth_stats_incr_udp(void); 
 169 /* Apple logging, log to ipfw.log */ 
 170 #define log_in_vain_log(a) {                                            \ 
 171         if ((udp_log_in_vain == 3) && (fw_verbose == 2)) {              \ 
 173         } else if ((udp_log_in_vain == 4) && (fw_verbose == 2)) {       \ 
 174                 ipfw_stealth_stats_incr_udp();                          \ 
 179 #else /* !IPFIREWALL */ 
 180 #define log_in_vain_log(a) { log a; } 
 181 #endif /* !IPFIREWALL */ 
 183 static int udp_getstat SYSCTL_HANDLER_ARGS
; 
 184 struct  udpstat udpstat
;        /* from udp_var.h */ 
 185 SYSCTL_PROC(_net_inet_udp
, UDPCTL_STATS
, stats
, 
 186     CTLTYPE_STRUCT 
| CTLFLAG_RD 
| CTLFLAG_LOCKED
, 
 187     0, 0, udp_getstat
, "S,udpstat", 
 188     "UDP statistics (struct udpstat, netinet/udp_var.h)"); 
 190 SYSCTL_INT(_net_inet_udp
, OID_AUTO
, pcbcount
, 
 191     CTLFLAG_RD 
| CTLFLAG_LOCKED
, &udbinfo
.ipi_count
, 0, 
 192     "Number of active PCBs"); 
 194 __private_extern__ 
int udp_use_randomport 
= 1; 
 195 SYSCTL_INT(_net_inet_udp
, OID_AUTO
, randomize_ports
, 
 196     CTLFLAG_RW 
| CTLFLAG_LOCKED
, &udp_use_randomport
, 0, 
 197     "Randomize UDP port numbers"); 
 201         struct sockaddr_in6     uin6_sin
; 
 202         u_char                  uin6_init_done 
: 1; 
 205         struct ip6_hdr          uip6_ip6
; 
 206         u_char                  uip6_init_done 
: 1; 
 209 int udp_abort(struct socket 
*); 
 210 int udp_attach(struct socket 
*, int, struct proc 
*); 
 211 int udp_bind(struct socket 
*, struct sockaddr 
*, struct proc 
*); 
 212 int udp_connect(struct socket 
*, struct sockaddr 
*, struct proc 
*); 
 213 int udp_connectx(struct socket 
*, struct sockaddr 
*, 
 214     struct sockaddr 
*, struct proc 
*, uint32_t, sae_associd_t
, 
 215     sae_connid_t 
*, uint32_t, void *, uint32_t, struct uio 
*, user_ssize_t 
*); 
 216 int udp_detach(struct socket 
*); 
 217 int udp_disconnect(struct socket 
*); 
 218 int udp_disconnectx(struct socket 
*, sae_associd_t
, sae_connid_t
); 
 219 int udp_send(struct socket 
*, int, struct mbuf 
*, struct sockaddr 
*, 
 220     struct mbuf 
*, struct proc 
*); 
 221 static void udp_append(struct inpcb 
*, struct ip 
*, struct mbuf 
*, int, 
 222     struct sockaddr_in 
*, struct udp_in6 
*, struct udp_ip6 
*, struct ifnet 
*); 
 224 static void udp_append(struct inpcb 
*, struct ip 
*, struct mbuf 
*, int, 
 225     struct sockaddr_in 
*, struct ifnet 
*); 
 227 static int udp_input_checksum(struct mbuf 
*, struct udphdr 
*, int, int); 
 228 int udp_output(struct inpcb 
*, struct mbuf 
*, struct sockaddr 
*, 
 229     struct mbuf 
*, struct proc 
*); 
 230 static void ip_2_ip6_hdr(struct ip6_hdr 
*ip6
, struct ip 
*ip
); 
 231 static void udp_gc(struct inpcbinfo 
*); 
 233 struct pr_usrreqs udp_usrreqs 
= { 
 234         .pru_abort 
=            udp_abort
, 
 235         .pru_attach 
=           udp_attach
, 
 236         .pru_bind 
=             udp_bind
, 
 237         .pru_connect 
=          udp_connect
, 
 238         .pru_connectx 
=         udp_connectx
, 
 239         .pru_control 
=          in_control
, 
 240         .pru_detach 
=           udp_detach
, 
 241         .pru_disconnect 
=       udp_disconnect
, 
 242         .pru_disconnectx 
=      udp_disconnectx
, 
 243         .pru_peeraddr 
=         in_getpeeraddr
, 
 244         .pru_send 
=             udp_send
, 
 245         .pru_shutdown 
=         udp_shutdown
, 
 246         .pru_sockaddr 
=         in_getsockaddr
, 
 247         .pru_sosend 
=           sosend
, 
 248         .pru_soreceive 
=        soreceive
, 
 249         .pru_soreceive_list 
=   soreceive_list
, 
 253 udp_init(struct protosw 
*pp
, struct domain 
*dp
) 
 256         static int udp_initialized 
= 0; 
 258         struct inpcbinfo        
*pcbinfo
; 
 260         VERIFY((pp
->pr_flags 
& (PR_INITIALIZED 
| PR_ATTACHED
)) == PR_ATTACHED
); 
 262         if (udp_initialized
) { 
 266         uint32_t pool_size 
= (nmbclusters 
<< MCLSHIFT
) >> MBSHIFT
; 
 267         if (pool_size 
>= 96) { 
 268                 /* Improves 10GbE UDP performance. */ 
 269                 udp_recvspace 
= 786896; 
 272         udbinfo
.ipi_listhead 
= &udb
; 
 273         udbinfo
.ipi_hashbase 
= hashinit(UDBHASHSIZE
, M_PCB
, 
 274             &udbinfo
.ipi_hashmask
); 
 275         udbinfo
.ipi_porthashbase 
= hashinit(UDBHASHSIZE
, M_PCB
, 
 276             &udbinfo
.ipi_porthashmask
); 
 277         str_size 
= (vm_size_t
) sizeof(struct inpcb
); 
 278         udbinfo
.ipi_zone 
= zinit(str_size
, 80000 * str_size
, 8192, "udpcb"); 
 282          * allocate lock group attribute and group for udp pcb mutexes 
 284         pcbinfo
->ipi_lock_grp_attr 
= lck_grp_attr_alloc_init(); 
 285         pcbinfo
->ipi_lock_grp 
= lck_grp_alloc_init("udppcb", 
 286             pcbinfo
->ipi_lock_grp_attr
); 
 287         pcbinfo
->ipi_lock_attr 
= lck_attr_alloc_init(); 
 288         if ((pcbinfo
->ipi_lock 
= lck_rw_alloc_init(pcbinfo
->ipi_lock_grp
, 
 289             pcbinfo
->ipi_lock_attr
)) == NULL
) { 
 290                 panic("%s: unable to allocate PCB lock\n", __func__
); 
 294         udbinfo
.ipi_gc 
= udp_gc
; 
 295         in_pcbinfo_attach(&udbinfo
); 
 299 udp_input(struct mbuf 
*m
, int iphlen
) 
 304         struct mbuf 
*opts 
= NULL
; 
 305         int len
, isbroadcast
; 
 307         struct sockaddr 
*append_sa
; 
 308         struct inpcbinfo 
*pcbinfo 
= &udbinfo
; 
 309         struct sockaddr_in udp_in
; 
 310         struct ip_moptions 
*imo 
= NULL
; 
 311         int foundmembership 
= 0, ret 
= 0; 
 313         struct udp_in6 udp_in6
; 
 314         struct udp_ip6 udp_ip6
; 
 316         struct ifnet 
*ifp 
= m
->m_pkthdr
.rcvif
; 
 317         boolean_t cell 
= IFNET_IS_CELLULAR(ifp
); 
 318         boolean_t wifi 
= (!cell 
&& IFNET_IS_WIFI(ifp
)); 
 319         boolean_t wired 
= (!wifi 
&& IFNET_IS_WIRED(ifp
)); 
 321         bzero(&udp_in
, sizeof(udp_in
)); 
 322         udp_in
.sin_len 
= sizeof(struct sockaddr_in
); 
 323         udp_in
.sin_family 
= AF_INET
; 
 325         bzero(&udp_in6
, sizeof(udp_in6
)); 
 326         udp_in6
.uin6_sin
.sin6_len 
= sizeof(struct sockaddr_in6
); 
 327         udp_in6
.uin6_sin
.sin6_family 
= AF_INET6
; 
 330         udpstat
.udps_ipackets
++; 
 332         KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_START
, 0, 0, 0, 0, 0); 
 334         /* Expect 32-bit aligned data pointer on strict-align platforms */ 
 335         MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
); 
 338          * Strip IP options, if any; should skip this, 
 339          * make available to user, and use on returned packets, 
 340          * but we don't yet have a way to check the checksum 
 341          * with options still present. 
 343         if (iphlen 
> sizeof(struct ip
)) { 
 345                 iphlen 
= sizeof(struct ip
); 
 349          * Get IP and UDP header together in first mbuf. 
 351         ip 
= mtod(m
, struct ip 
*); 
 352         if (m
->m_len 
< iphlen 
+ sizeof(struct udphdr
)) { 
 353                 m 
= m_pullup(m
, iphlen 
+ sizeof(struct udphdr
)); 
 355                         udpstat
.udps_hdrops
++; 
 356                         KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_END
, 
 360                 ip 
= mtod(m
, struct ip 
*); 
 362         uh 
= (struct udphdr 
*)(void *)((caddr_t
)ip 
+ iphlen
); 
 364         /* destination port of 0 is illegal, based on RFC768. */ 
 365         if (uh
->uh_dport 
== 0) { 
 366                 IF_UDP_STATINC(ifp
, port0
); 
 370         KERNEL_DEBUG(DBG_LAYER_IN_BEG
, uh
->uh_dport
, uh
->uh_sport
, 
 371             ip
->ip_src
.s_addr
, ip
->ip_dst
.s_addr
, uh
->uh_ulen
); 
 374          * Make mbuf data length reflect UDP length. 
 375          * If not enough data to reflect UDP length, drop. 
 377         len 
= ntohs((u_short
)uh
->uh_ulen
); 
 378         if (ip
->ip_len 
!= len
) { 
 379                 if (len 
> ip
->ip_len 
|| len 
< sizeof(struct udphdr
)) { 
 380                         udpstat
.udps_badlen
++; 
 381                         IF_UDP_STATINC(ifp
, badlength
); 
 384                 m_adj(m
, len 
- ip
->ip_len
); 
 385                 /* ip->ip_len = len; */ 
 388          * Save a copy of the IP header in case we want restore it 
 389          * for sending an ICMP error message in response. 
 394          * Checksum extended UDP header and data. 
 396         if (udp_input_checksum(m
, uh
, iphlen
, len
)) { 
 400         isbroadcast 
= in_broadcast(ip
->ip_dst
, ifp
); 
 402         if (IN_MULTICAST(ntohl(ip
->ip_dst
.s_addr
)) || isbroadcast
) { 
 403                 int reuse_sock 
= 0, mcast_delivered 
= 0; 
 405                 lck_rw_lock_shared(pcbinfo
->ipi_lock
); 
 407                  * Deliver a multicast or broadcast datagram to *all* sockets 
 408                  * for which the local and remote addresses and ports match 
 409                  * those of the incoming datagram.  This allows more than 
 410                  * one process to receive multi/broadcasts on the same port. 
 411                  * (This really ought to be done for unicast datagrams as 
 412                  * well, but that would cause problems with existing 
 413                  * applications that open both address-specific sockets and 
 414                  * a wildcard socket listening to the same port -- they would 
 415                  * end up receiving duplicates of every unicast datagram. 
 416                  * Those applications open the multiple sockets to overcome an 
 417                  * inadequacy of the UDP socket interface, but for backwards 
 418                  * compatibility we avoid the problem here rather than 
 419                  * fixing the interface.  Maybe 4.5BSD will remedy this?) 
 423                  * Construct sockaddr format source address. 
 425                 udp_in
.sin_port 
= uh
->uh_sport
; 
 426                 udp_in
.sin_addr 
= ip
->ip_src
; 
 428                  * Locate pcb(s) for datagram. 
 429                  * (Algorithm copied from raw_intr().) 
 432                 udp_in6
.uin6_init_done 
= udp_ip6
.uip6_init_done 
= 0; 
 434                 LIST_FOREACH(inp
, &udb
, inp_list
) { 
 439                         if (inp
->inp_socket 
== NULL
) { 
 442                         if (inp 
!= sotoinpcb(inp
->inp_socket
)) { 
 443                                 panic("%s: bad so back ptr inp=%p\n", 
 448                         if ((inp
->inp_vflag 
& INP_IPV4
) == 0) { 
 452                         if (inp_restricted_recv(inp
, ifp
)) { 
 456                         if ((inp
->inp_moptions 
== NULL
) && 
 457                             (ntohl(ip
->ip_dst
.s_addr
) != 
 458                             INADDR_ALLHOSTS_GROUP
) && (isbroadcast 
== 0)) { 
 462                         if (in_pcb_checkstate(inp
, WNT_ACQUIRE
, 0) == 
 467                         udp_lock(inp
->inp_socket
, 1, 0); 
 469                         if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == 
 471                                 udp_unlock(inp
->inp_socket
, 1, 0); 
 475                         if (inp
->inp_lport 
!= uh
->uh_dport
) { 
 476                                 udp_unlock(inp
->inp_socket
, 1, 0); 
 479                         if (inp
->inp_laddr
.s_addr 
!= INADDR_ANY
) { 
 480                                 if (inp
->inp_laddr
.s_addr 
!= 
 482                                         udp_unlock(inp
->inp_socket
, 1, 0); 
 486                         if (inp
->inp_faddr
.s_addr 
!= INADDR_ANY
) { 
 487                                 if (inp
->inp_faddr
.s_addr 
!= 
 489                                     inp
->inp_fport 
!= uh
->uh_sport
) { 
 490                                         udp_unlock(inp
->inp_socket
, 1, 0); 
 495                         if (isbroadcast 
== 0 && (ntohl(ip
->ip_dst
.s_addr
) != 
 496                             INADDR_ALLHOSTS_GROUP
)) { 
 497                                 struct sockaddr_in group
; 
 500                                 if ((imo 
= inp
->inp_moptions
) == NULL
) { 
 501                                         udp_unlock(inp
->inp_socket
, 1, 0); 
 506                                 bzero(&group
, sizeof(struct sockaddr_in
)); 
 507                                 group
.sin_len 
= sizeof(struct sockaddr_in
); 
 508                                 group
.sin_family 
= AF_INET
; 
 509                                 group
.sin_addr 
= ip
->ip_dst
; 
 511                                 blocked 
= imo_multi_filter(imo
, ifp
, 
 513                                 if (blocked 
== MCAST_PASS
) { 
 518                                 if (!foundmembership
) { 
 519                                         udp_unlock(inp
->inp_socket
, 1, 0); 
 520                                         if (blocked 
== MCAST_NOTSMEMBER 
|| 
 521                                             blocked 
== MCAST_MUTED
) { 
 522                                                 udpstat
.udps_filtermcast
++; 
 529                         reuse_sock 
= (inp
->inp_socket
->so_options 
& 
 530                             (SO_REUSEPORT 
| SO_REUSEADDR
)); 
 534                         if (!necp_socket_is_allowed_to_send_recv_v4(inp
, 
 535                             uh
->uh_dport
, uh
->uh_sport
, &ip
->ip_dst
, 
 536                             &ip
->ip_src
, ifp
, NULL
, NULL
, NULL
)) { 
 537                                 /* do not inject data to pcb */ 
 543                                 struct mbuf 
*n 
= NULL
; 
 546                                         n 
= m_copy(m
, 0, M_COPYALL
); 
 549                                 udp_append(inp
, ip
, m
, 
 550                                     iphlen 
+ sizeof(struct udphdr
), 
 551                                     &udp_in
, &udp_in6
, &udp_ip6
, ifp
); 
 553                                 udp_append(inp
, ip
, m
, 
 554                                     iphlen 
+ sizeof(struct udphdr
), 
 561                         udp_unlock(inp
->inp_socket
, 1, 0); 
 564                          * Don't look for additional matches if this one does 
 565                          * not have either the SO_REUSEPORT or SO_REUSEADDR 
 566                          * socket options set.  This heuristic avoids searching 
 567                          * through all pcbs in the common case of a non-shared 
 568                          * port.  It assumes that an application will never 
 569                          * clear these options after setting them. 
 571                         if (reuse_sock 
== 0 || m 
== NULL
) { 
 576                          * Expect 32-bit aligned data pointer on strict-align 
 579                         MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
); 
 581                          * Recompute IP and UDP header pointers for new mbuf 
 583                         ip 
= mtod(m
, struct ip 
*); 
 584                         uh 
= (struct udphdr 
*)(void *)((caddr_t
)ip 
+ iphlen
); 
 586                 lck_rw_done(pcbinfo
->ipi_lock
); 
 588                 if (mcast_delivered 
== 0) { 
 590                          * No matching pcb found; discard datagram. 
 591                          * (No need to send an ICMP Port Unreachable 
 592                          * for a broadcast or multicast datgram.) 
 594                         udpstat
.udps_noportbcast
++; 
 595                         IF_UDP_STATINC(ifp
, port_unreach
); 
 599                 /* free the extra copy of mbuf or skipped by IPSec */ 
 603                 KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_END
, 0, 0, 0, 0, 0); 
 609          * UDP to port 4500 with a payload where the first four bytes are 
 610          * not zero is a UDP encapsulated IPSec packet. Packets where 
 611          * the payload is one byte and that byte is 0xFF are NAT keepalive 
 612          * packets. Decapsulate the ESP packet and carry on with IPSec input 
 613          * or discard the NAT keep-alive. 
 615         if (ipsec_bypass 
== 0 && (esp_udp_encap_port 
& 0xFFFF) != 0 && 
 616             uh
->uh_dport 
== ntohs((u_short
)esp_udp_encap_port
)) { 
 617                 int payload_len 
= len 
- sizeof(struct udphdr
) > 4 ? 4 : 
 618                     len 
- sizeof(struct udphdr
); 
 620                 if (m
->m_len 
< iphlen 
+ sizeof(struct udphdr
) + payload_len
) { 
 621                         if ((m 
= m_pullup(m
, iphlen 
+ sizeof(struct udphdr
) + 
 622                             payload_len
)) == NULL
) { 
 623                                 udpstat
.udps_hdrops
++; 
 624                                 KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_END
, 
 629                          * Expect 32-bit aligned data pointer on strict-align 
 632                         MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
); 
 634                         ip 
= mtod(m
, struct ip 
*); 
 635                         uh 
= (struct udphdr 
*)(void *)((caddr_t
)ip 
+ iphlen
); 
 637                 /* Check for NAT keepalive packet */ 
 638                 if (payload_len 
== 1 && *(u_int8_t 
*) 
 639                     ((caddr_t
)uh 
+ sizeof(struct udphdr
)) == 0xFF) { 
 641                         KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_END
, 
 644                 } else if (payload_len 
== 4 && *(u_int32_t 
*)(void *) 
 645                     ((caddr_t
)uh 
+ sizeof(struct udphdr
)) != 0) { 
 646                         /* UDP encapsulated IPSec packet to pass through NAT */ 
 647                         KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_END
, 
 649                         /* preserve the udp header */ 
 650                         esp4_input(m
, iphlen 
+ sizeof(struct udphdr
)); 
 657          * Locate pcb for datagram. 
 659         inp 
= in_pcblookup_hash(&udbinfo
, ip
->ip_src
, uh
->uh_sport
, 
 660             ip
->ip_dst
, uh
->uh_dport
, 1, ifp
); 
 662                 IF_UDP_STATINC(ifp
, port_unreach
); 
 664                 if (udp_log_in_vain
) { 
 665                         char buf
[MAX_IPv4_STR_LEN
]; 
 666                         char buf2
[MAX_IPv4_STR_LEN
]; 
 668                         /* check src and dst address */ 
 669                         if (udp_log_in_vain 
< 3) { 
 670                                 log(LOG_INFO
, "Connection attempt to " 
 671                                     "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET
, 
 672                                     &ip
->ip_dst
, buf
, sizeof(buf
)), 
 673                                     ntohs(uh
->uh_dport
), inet_ntop(AF_INET
, 
 674                                     &ip
->ip_src
, buf2
, sizeof(buf2
)), 
 675                                     ntohs(uh
->uh_sport
)); 
 676                         } else if (!(m
->m_flags 
& (M_BCAST 
| M_MCAST
)) && 
 677                             ip
->ip_dst
.s_addr 
!= ip
->ip_src
.s_addr
) { 
 678                                 log_in_vain_log((LOG_INFO
, 
 679                                     "Stealth Mode connection attempt to " 
 680                                     "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET
, 
 681                                     &ip
->ip_dst
, buf
, sizeof(buf
)), 
 682                                     ntohs(uh
->uh_dport
), inet_ntop(AF_INET
, 
 683                                     &ip
->ip_src
, buf2
, sizeof(buf2
)), 
 684                                     ntohs(uh
->uh_sport
))) 
 687                 udpstat
.udps_noport
++; 
 688                 if (m
->m_flags 
& (M_BCAST 
| M_MCAST
)) { 
 689                         udpstat
.udps_noportbcast
++; 
 693                 if (badport_bandlim(BANDLIM_ICMP_UNREACH
) < 0) { 
 696 #endif /* ICMP_BANDLIM */ 
 698                         if (ifp 
&& ifp
->if_type 
!= IFT_LOOP
) { 
 703                 ip
->ip_len 
+= iphlen
; 
 704                 icmp_error(m
, ICMP_UNREACH
, ICMP_UNREACH_PORT
, 0, 0); 
 705                 KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_END
, 0, 0, 0, 0, 0); 
 708         udp_lock(inp
->inp_socket
, 1, 0); 
 710         if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) { 
 711                 udp_unlock(inp
->inp_socket
, 1, 0); 
 712                 IF_UDP_STATINC(ifp
, cleanup
); 
 716         if (!necp_socket_is_allowed_to_send_recv_v4(inp
, uh
->uh_dport
, 
 717             uh
->uh_sport
, &ip
->ip_dst
, &ip
->ip_src
, ifp
, NULL
, NULL
, NULL
)) { 
 718                 udp_unlock(inp
->inp_socket
, 1, 0); 
 719                 IF_UDP_STATINC(ifp
, badipsec
); 
 725          * Construct sockaddr format source address. 
 726          * Stuff source address and datagram in user buffer. 
 728         udp_in
.sin_port 
= uh
->uh_sport
; 
 729         udp_in
.sin_addr 
= ip
->ip_src
; 
 730         if ((inp
->inp_flags 
& INP_CONTROLOPTS
) != 0 || 
 731             (inp
->inp_socket
->so_options 
& SO_TIMESTAMP
) != 0 || 
 732             (inp
->inp_socket
->so_options 
& SO_TIMESTAMP_MONOTONIC
) != 0 || 
 733             (inp
->inp_socket
->so_options 
& SO_TIMESTAMP_CONTINUOUS
) != 0) { 
 735                 if (inp
->inp_vflag 
& INP_IPV6
) { 
 738                         ip_2_ip6_hdr(&udp_ip6
.uip6_ip6
, ip
); 
 739                         savedflags 
= inp
->inp_flags
; 
 740                         inp
->inp_flags 
&= ~INP_UNMAPPABLEOPTS
; 
 741                         ret 
= ip6_savecontrol(inp
, m
, &opts
); 
 742                         inp
->inp_flags 
= savedflags
; 
 746                         ret 
= ip_savecontrol(inp
, &opts
, ip
, m
); 
 749                         udp_unlock(inp
->inp_socket
, 1, 0); 
 753         m_adj(m
, iphlen 
+ sizeof(struct udphdr
)); 
 755         KERNEL_DEBUG(DBG_LAYER_IN_END
, uh
->uh_dport
, uh
->uh_sport
, 
 756             save_ip
.ip_src
.s_addr
, save_ip
.ip_dst
.s_addr
, uh
->uh_ulen
); 
 759         if (inp
->inp_vflag 
& INP_IPV6
) { 
 760                 in6_sin_2_v4mapsin6(&udp_in
, &udp_in6
.uin6_sin
); 
 761                 append_sa 
= (struct sockaddr 
*)&udp_in6
.uin6_sin
; 
 765                 append_sa 
= (struct sockaddr 
*)&udp_in
; 
 768                 INP_ADD_STAT(inp
, cell
, wifi
, wired
, rxpackets
, 1); 
 769                 INP_ADD_STAT(inp
, cell
, wifi
, wired
, rxbytes
, m
->m_pkthdr
.len
); 
 770                 inp_set_activity_bitmap(inp
); 
 772         so_recv_data_stat(inp
->inp_socket
, m
, 0); 
 773         if (sbappendaddr(&inp
->inp_socket
->so_rcv
, append_sa
, 
 774             m
, opts
, NULL
) == 0) { 
 775                 udpstat
.udps_fullsock
++; 
 777                 sorwakeup(inp
->inp_socket
); 
 779         udp_unlock(inp
->inp_socket
, 1, 0); 
 780         KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_END
, 0, 0, 0, 0, 0); 
 787         KERNEL_DEBUG(DBG_FNC_UDP_INPUT 
| DBG_FUNC_END
, 0, 0, 0, 0, 0); 
 792 ip_2_ip6_hdr(struct ip6_hdr 
*ip6
, struct ip 
*ip
) 
 794         bzero(ip6
, sizeof(*ip6
)); 
 796         ip6
->ip6_vfc 
= IPV6_VERSION
; 
 797         ip6
->ip6_plen 
= ip
->ip_len
; 
 798         ip6
->ip6_nxt 
= ip
->ip_p
; 
 799         ip6
->ip6_hlim 
= ip
->ip_ttl
; 
 800         if (ip
->ip_src
.s_addr
) { 
 801                 ip6
->ip6_src
.s6_addr32
[2] = IPV6_ADDR_INT32_SMP
; 
 802                 ip6
->ip6_src
.s6_addr32
[3] = ip
->ip_src
.s_addr
; 
 804         if (ip
->ip_dst
.s_addr
) { 
 805                 ip6
->ip6_dst
.s6_addr32
[2] = IPV6_ADDR_INT32_SMP
; 
 806                 ip6
->ip6_dst
.s6_addr32
[3] = ip
->ip_dst
.s_addr
; 
 812  * subroutine of udp_input(), mainly for source code readability. 
 816 udp_append(struct inpcb 
*last
, struct ip 
*ip
, struct mbuf 
*n
, int off
, 
 817     struct sockaddr_in 
*pudp_in
, struct udp_in6 
*pudp_in6
, 
 818     struct udp_ip6 
*pudp_ip6
, struct ifnet 
*ifp
) 
 820 udp_append(struct inpcb 
*last
, struct ip 
*ip
, struct mbuf 
*n
, int off
, 
 821     struct sockaddr_in 
*pudp_in
, struct ifnet 
*ifp
) 
 824         struct sockaddr 
*append_sa
; 
 825         struct mbuf 
*opts 
= 0; 
 826         boolean_t cell 
= IFNET_IS_CELLULAR(ifp
); 
 827         boolean_t wifi 
= (!cell 
&& IFNET_IS_WIFI(ifp
)); 
 828         boolean_t wired 
= (!wifi 
&& IFNET_IS_WIRED(ifp
)); 
 832         if (mac_inpcb_check_deliver(last
, n
, AF_INET
, SOCK_DGRAM
) != 0) { 
 836 #endif /* CONFIG_MACF_NET */ 
 837         if ((last
->inp_flags 
& INP_CONTROLOPTS
) != 0 || 
 838             (last
->inp_socket
->so_options 
& SO_TIMESTAMP
) != 0 || 
 839             (last
->inp_socket
->so_options 
& SO_TIMESTAMP_MONOTONIC
) != 0 || 
 840             (last
->inp_socket
->so_options 
& SO_TIMESTAMP_CONTINUOUS
) != 0) { 
 842                 if (last
->inp_vflag 
& INP_IPV6
) { 
 845                         if (pudp_ip6
->uip6_init_done 
== 0) { 
 846                                 ip_2_ip6_hdr(&pudp_ip6
->uip6_ip6
, ip
); 
 847                                 pudp_ip6
->uip6_init_done 
= 1; 
 849                         savedflags 
= last
->inp_flags
; 
 850                         last
->inp_flags 
&= ~INP_UNMAPPABLEOPTS
; 
 851                         ret 
= ip6_savecontrol(last
, n
, &opts
); 
 853                                 last
->inp_flags 
= savedflags
; 
 856                         last
->inp_flags 
= savedflags
; 
 860                         ret 
= ip_savecontrol(last
, &opts
, ip
, n
); 
 867         if (last
->inp_vflag 
& INP_IPV6
) { 
 868                 if (pudp_in6
->uin6_init_done 
== 0) { 
 869                         in6_sin_2_v4mapsin6(pudp_in
, &pudp_in6
->uin6_sin
); 
 870                         pudp_in6
->uin6_init_done 
= 1; 
 872                 append_sa 
= (struct sockaddr 
*)&pudp_in6
->uin6_sin
; 
 875         append_sa 
= (struct sockaddr 
*)pudp_in
; 
 877                 INP_ADD_STAT(last
, cell
, wifi
, wired
, rxpackets
, 1); 
 878                 INP_ADD_STAT(last
, cell
, wifi
, wired
, rxbytes
, 
 880                 inp_set_activity_bitmap(last
); 
 882         so_recv_data_stat(last
->inp_socket
, n
, 0); 
 884         if (sbappendaddr(&last
->inp_socket
->so_rcv
, append_sa
, 
 885             n
, opts
, NULL
) == 0) { 
 886                 udpstat
.udps_fullsock
++; 
 888                 sorwakeup(last
->inp_socket
); 
 897  * Notify a udp user of an asynchronous error; 
 898  * just wake up so that he can collect error status. 
 901 udp_notify(struct inpcb 
*inp
, int errno
) 
 903         inp
->inp_socket
->so_error 
= errno
; 
 904         sorwakeup(inp
->inp_socket
); 
 905         sowwakeup(inp
->inp_socket
); 
 909 udp_ctlinput(int cmd
, struct sockaddr 
*sa
, void *vip
, __unused 
struct ifnet 
* ifp
) 
 912         void (*notify
)(struct inpcb 
*, int) = udp_notify
; 
 913         struct in_addr faddr
; 
 914         struct inpcb 
*inp 
= NULL
; 
 916         faddr 
= ((struct sockaddr_in 
*)(void *)sa
)->sin_addr
; 
 917         if (sa
->sa_family 
!= AF_INET 
|| faddr
.s_addr 
== INADDR_ANY
) { 
 921         if (PRC_IS_REDIRECT(cmd
)) { 
 923                 notify 
= in_rtchange
; 
 924         } else if (cmd 
== PRC_HOSTDEAD
) { 
 926         } else if ((unsigned)cmd 
>= PRC_NCMDS 
|| inetctlerrmap
[cmd
] == 0) { 
 932                 bcopy(((caddr_t
)ip 
+ (ip
->ip_hl 
<< 2)), &uh
, sizeof(uh
)); 
 933                 inp 
= in_pcblookup_hash(&udbinfo
, faddr
, uh
.uh_dport
, 
 934                     ip
->ip_src
, uh
.uh_sport
, 0, NULL
); 
 935                 if (inp 
!= NULL 
&& inp
->inp_socket 
!= NULL
) { 
 936                         udp_lock(inp
->inp_socket
, 1, 0); 
 937                         if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == 
 939                                 udp_unlock(inp
->inp_socket
, 1, 0); 
 942                         (*notify
)(inp
, inetctlerrmap
[cmd
]); 
 943                         udp_unlock(inp
->inp_socket
, 1, 0); 
 946                 in_pcbnotifyall(&udbinfo
, faddr
, inetctlerrmap
[cmd
], notify
); 
 951 udp_ctloutput(struct socket 
*so
, struct sockopt 
*sopt
) 
 953         int     error 
= 0, optval 
= 0; 
 956         /* Allow <SOL_SOCKET,SO_FLUSH> at this level */ 
 957         if (sopt
->sopt_level 
!= IPPROTO_UDP 
&& 
 958             !(sopt
->sopt_level 
== SOL_SOCKET 
&& sopt
->sopt_name 
== SO_FLUSH
)) { 
 959                 return ip_ctloutput(so
, sopt
); 
 964         switch (sopt
->sopt_dir
) { 
 966                 switch (sopt
->sopt_name
) { 
 968                         /* This option is settable only for UDP over IPv4 */ 
 969                         if (!(inp
->inp_vflag 
& INP_IPV4
)) { 
 974                         if ((error 
= sooptcopyin(sopt
, &optval
, sizeof(optval
), 
 975                             sizeof(optval
))) != 0) { 
 980                                 inp
->inp_flags 
|= INP_UDP_NOCKSUM
; 
 982                                 inp
->inp_flags 
&= ~INP_UDP_NOCKSUM
; 
 985                 case UDP_KEEPALIVE_OFFLOAD
: 
 987                         struct udp_keepalive_offload ka
; 
 989                          * If the socket is not connected, the stack will 
 990                          * not know the destination address to put in the 
 991                          * keepalive datagram. Return an error now instead 
 994                         if (!(so
->so_state 
& SS_ISCONNECTED
)) { 
 998                         if (sopt
->sopt_valsize 
!= sizeof(ka
)) { 
1002                         if ((error 
= sooptcopyin(sopt
, &ka
, sizeof(ka
), 
1003                             sizeof(ka
))) != 0) { 
1007                         /* application should specify the type */ 
1008                         if (ka
.ka_type 
== 0) { 
1012                         if (ka
.ka_interval 
== 0) { 
1014                                  * if interval is 0, disable the offload 
1017                                 if (inp
->inp_keepalive_data 
!= NULL
) { 
1018                                         FREE(inp
->inp_keepalive_data
, 
1021                                 inp
->inp_keepalive_data 
= NULL
; 
1022                                 inp
->inp_keepalive_datalen 
= 0; 
1023                                 inp
->inp_keepalive_interval 
= 0; 
1024                                 inp
->inp_keepalive_type 
= 0; 
1025                                 inp
->inp_flags2 
&= ~INP2_KEEPALIVE_OFFLOAD
; 
1027                                 if (inp
->inp_keepalive_data 
!= NULL
) { 
1028                                         FREE(inp
->inp_keepalive_data
, 
1030                                         inp
->inp_keepalive_data 
= NULL
; 
1033                                 inp
->inp_keepalive_datalen 
= min( 
1035                                         UDP_KEEPALIVE_OFFLOAD_DATA_SIZE
); 
1036                                 if (inp
->inp_keepalive_datalen 
> 0) { 
1037                                         MALLOC(inp
->inp_keepalive_data
, 
1039                                             inp
->inp_keepalive_datalen
, 
1041                                         if (inp
->inp_keepalive_data 
== NULL
) { 
1042                                                 inp
->inp_keepalive_datalen 
= 0; 
1047                                             inp
->inp_keepalive_data
, 
1048                                             inp
->inp_keepalive_datalen
); 
1050                                         inp
->inp_keepalive_datalen 
= 0; 
1052                                 inp
->inp_keepalive_interval 
= 
1053                                     min(UDP_KEEPALIVE_INTERVAL_MAX_SECONDS
, 
1055                                 inp
->inp_keepalive_type 
= ka
.ka_type
; 
1056                                 inp
->inp_flags2 
|= INP2_KEEPALIVE_OFFLOAD
; 
1061                         if ((error 
= sooptcopyin(sopt
, &optval
, sizeof(optval
), 
1062                             sizeof(optval
))) != 0) { 
1066                         error 
= inp_flush(inp
, optval
); 
1070                         error 
= ENOPROTOOPT
; 
1076                 switch (sopt
->sopt_name
) { 
1078                         optval 
= inp
->inp_flags 
& INP_UDP_NOCKSUM
; 
1082                         error 
= ENOPROTOOPT
; 
1086                         error 
= sooptcopyout(sopt
, &optval
, sizeof(optval
)); 
1094 udp_pcblist SYSCTL_HANDLER_ARGS
 
1096 #pragma unused(oidp, arg1, arg2) 
1098         struct inpcb 
*inp
, **inp_list
; 
1103          * The process of preparing the TCB list is too time-consuming and 
1104          * resource-intensive to repeat twice on every request. 
1106         lck_rw_lock_exclusive(udbinfo
.ipi_lock
); 
1107         if (req
->oldptr 
== USER_ADDR_NULL
) { 
1108                 n 
= udbinfo
.ipi_count
; 
1109                 req
->oldidx 
= 2 * (sizeof(xig
)) 
1110                     + (n 
+ n 
/ 8) * sizeof(struct xinpcb
); 
1111                 lck_rw_done(udbinfo
.ipi_lock
); 
1115         if (req
->newptr 
!= USER_ADDR_NULL
) { 
1116                 lck_rw_done(udbinfo
.ipi_lock
); 
1121          * OK, now we're committed to doing something. 
1123         gencnt 
= udbinfo
.ipi_gencnt
; 
1124         n 
= udbinfo
.ipi_count
; 
1126         bzero(&xig
, sizeof(xig
)); 
1127         xig
.xig_len 
= sizeof(xig
); 
1129         xig
.xig_gen 
= gencnt
; 
1130         xig
.xig_sogen 
= so_gencnt
; 
1131         error 
= SYSCTL_OUT(req
, &xig
, sizeof(xig
)); 
1133                 lck_rw_done(udbinfo
.ipi_lock
); 
1137          * We are done if there is no pcb 
1140                 lck_rw_done(udbinfo
.ipi_lock
); 
1144         inp_list 
= _MALLOC(n 
* sizeof(*inp_list
), M_TEMP
, M_WAITOK
); 
1145         if (inp_list 
== 0) { 
1146                 lck_rw_done(udbinfo
.ipi_lock
); 
1150         for (inp 
= LIST_FIRST(udbinfo
.ipi_listhead
), i 
= 0; inp 
&& i 
< n
; 
1151             inp 
= LIST_NEXT(inp
, inp_list
)) { 
1152                 if (inp
->inp_gencnt 
<= gencnt 
&& 
1153                     inp
->inp_state 
!= INPCB_STATE_DEAD
) { 
1154                         inp_list
[i
++] = inp
; 
1160         for (i 
= 0; i 
< n
; i
++) { 
1165                 if (in_pcb_checkstate(inp
, WNT_ACQUIRE
, 0) == WNT_STOPUSING
) { 
1168                 udp_lock(inp
->inp_socket
, 1, 0); 
1169                 if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) { 
1170                         udp_unlock(inp
->inp_socket
, 1, 0); 
1173                 if (inp
->inp_gencnt 
> gencnt
) { 
1174                         udp_unlock(inp
->inp_socket
, 1, 0); 
1178                 bzero(&xi
, sizeof(xi
)); 
1179                 xi
.xi_len 
= sizeof(xi
); 
1180                 /* XXX should avoid extra copy */ 
1181                 inpcb_to_compat(inp
, &xi
.xi_inp
); 
1182                 if (inp
->inp_socket
) { 
1183                         sotoxsocket(inp
->inp_socket
, &xi
.xi_socket
); 
1186                 udp_unlock(inp
->inp_socket
, 1, 0); 
1188                 error 
= SYSCTL_OUT(req
, &xi
, sizeof(xi
)); 
1192                  * Give the user an updated idea of our state. 
1193                  * If the generation differs from what we told 
1194                  * her before, she knows that something happened 
1195                  * while we were processing this request, and it 
1196                  * might be necessary to retry. 
1198                 bzero(&xig
, sizeof(xig
)); 
1199                 xig
.xig_len 
= sizeof(xig
); 
1200                 xig
.xig_gen 
= udbinfo
.ipi_gencnt
; 
1201                 xig
.xig_sogen 
= so_gencnt
; 
1202                 xig
.xig_count 
= udbinfo
.ipi_count
; 
1203                 error 
= SYSCTL_OUT(req
, &xig
, sizeof(xig
)); 
1205         FREE(inp_list
, M_TEMP
); 
1206         lck_rw_done(udbinfo
.ipi_lock
); 
1210 SYSCTL_PROC(_net_inet_udp
, UDPCTL_PCBLIST
, pcblist
, 
1211     CTLTYPE_STRUCT 
| CTLFLAG_RD 
| CTLFLAG_LOCKED
, 0, 0, udp_pcblist
, 
1212     "S,xinpcb", "List of active UDP sockets"); 
1214 #if !CONFIG_EMBEDDED 
1217 udp_pcblist64 SYSCTL_HANDLER_ARGS
 
1219 #pragma unused(oidp, arg1, arg2) 
1221         struct inpcb 
*inp
, **inp_list
; 
1226          * The process of preparing the TCB list is too time-consuming and 
1227          * resource-intensive to repeat twice on every request. 
1229         lck_rw_lock_shared(udbinfo
.ipi_lock
); 
1230         if (req
->oldptr 
== USER_ADDR_NULL
) { 
1231                 n 
= udbinfo
.ipi_count
; 
1233                     2 * (sizeof(xig
)) + (n 
+ n 
/ 8) * sizeof(struct xinpcb64
); 
1234                 lck_rw_done(udbinfo
.ipi_lock
); 
1238         if (req
->newptr 
!= USER_ADDR_NULL
) { 
1239                 lck_rw_done(udbinfo
.ipi_lock
); 
1244          * OK, now we're committed to doing something. 
1246         gencnt 
= udbinfo
.ipi_gencnt
; 
1247         n 
= udbinfo
.ipi_count
; 
1249         bzero(&xig
, sizeof(xig
)); 
1250         xig
.xig_len 
= sizeof(xig
); 
1252         xig
.xig_gen 
= gencnt
; 
1253         xig
.xig_sogen 
= so_gencnt
; 
1254         error 
= SYSCTL_OUT(req
, &xig
, sizeof(xig
)); 
1256                 lck_rw_done(udbinfo
.ipi_lock
); 
1260          * We are done if there is no pcb 
1263                 lck_rw_done(udbinfo
.ipi_lock
); 
1267         inp_list 
= _MALLOC(n 
* sizeof(*inp_list
), M_TEMP
, M_WAITOK
); 
1268         if (inp_list 
== 0) { 
1269                 lck_rw_done(udbinfo
.ipi_lock
); 
1273         for (inp 
= LIST_FIRST(udbinfo
.ipi_listhead
), i 
= 0; inp 
&& i 
< n
; 
1274             inp 
= LIST_NEXT(inp
, inp_list
)) { 
1275                 if (inp
->inp_gencnt 
<= gencnt 
&& 
1276                     inp
->inp_state 
!= INPCB_STATE_DEAD
) { 
1277                         inp_list
[i
++] = inp
; 
1283         for (i 
= 0; i 
< n
; i
++) { 
1288                 if (in_pcb_checkstate(inp
, WNT_ACQUIRE
, 0) == WNT_STOPUSING
) { 
1291                 udp_lock(inp
->inp_socket
, 1, 0); 
1292                 if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) { 
1293                         udp_unlock(inp
->inp_socket
, 1, 0); 
1296                 if (inp
->inp_gencnt 
> gencnt
) { 
1297                         udp_unlock(inp
->inp_socket
, 1, 0); 
1301                 bzero(&xi
, sizeof(xi
)); 
1302                 xi
.xi_len 
= sizeof(xi
); 
1303                 inpcb_to_xinpcb64(inp
, &xi
); 
1304                 if (inp
->inp_socket
) { 
1305                         sotoxsocket64(inp
->inp_socket
, &xi
.xi_socket
); 
1308                 udp_unlock(inp
->inp_socket
, 1, 0); 
1310                 error 
= SYSCTL_OUT(req
, &xi
, sizeof(xi
)); 
1314                  * Give the user an updated idea of our state. 
1315                  * If the generation differs from what we told 
1316                  * her before, she knows that something happened 
1317                  * while we were processing this request, and it 
1318                  * might be necessary to retry. 
1320                 bzero(&xig
, sizeof(xig
)); 
1321                 xig
.xig_len 
= sizeof(xig
); 
1322                 xig
.xig_gen 
= udbinfo
.ipi_gencnt
; 
1323                 xig
.xig_sogen 
= so_gencnt
; 
1324                 xig
.xig_count 
= udbinfo
.ipi_count
; 
1325                 error 
= SYSCTL_OUT(req
, &xig
, sizeof(xig
)); 
1327         FREE(inp_list
, M_TEMP
); 
1328         lck_rw_done(udbinfo
.ipi_lock
); 
1332 SYSCTL_PROC(_net_inet_udp
, OID_AUTO
, pcblist64
, 
1333     CTLTYPE_STRUCT 
| CTLFLAG_RD 
| CTLFLAG_LOCKED
, 0, 0, udp_pcblist64
, 
1334     "S,xinpcb64", "List of active UDP sockets"); 
1336 #endif /* !CONFIG_EMBEDDED */ 
1339 udp_pcblist_n SYSCTL_HANDLER_ARGS
 
1341 #pragma unused(oidp, arg1, arg2) 
1342         return get_pcblist_n(IPPROTO_UDP
, req
, &udbinfo
); 
1345 SYSCTL_PROC(_net_inet_udp
, OID_AUTO
, pcblist_n
, 
1346     CTLTYPE_STRUCT 
| CTLFLAG_RD 
| CTLFLAG_LOCKED
, 0, 0, udp_pcblist_n
, 
1347     "S,xinpcb_n", "List of active UDP sockets"); 
1349 __private_extern__ 
void 
1350 udp_get_ports_used(uint32_t ifindex
, int protocol
, uint32_t flags
, 
1353         inpcb_get_ports_used(ifindex
, protocol
, flags
, bitfield
, 
1357 __private_extern__ 
uint32_t 
1358 udp_count_opportunistic(unsigned int ifindex
, u_int32_t flags
) 
1360         return inpcb_count_opportunistic(ifindex
, &udbinfo
, flags
); 
1363 __private_extern__ 
uint32_t 
1364 udp_find_anypcb_byaddr(struct ifaddr 
*ifa
) 
1366         return inpcb_find_anypcb_byaddr(ifa
, &udbinfo
); 
1370 udp_check_pktinfo(struct mbuf 
*control
, struct ifnet 
**outif
, 
1371     struct in_addr 
*laddr
) 
1373         struct cmsghdr 
*cm 
= 0; 
1374         struct in_pktinfo 
*pktinfo
; 
1377         if (outif 
!= NULL
) { 
1382          * XXX: Currently, we assume all the optional information is stored 
1385         if (control
->m_next
) { 
1389         if (control
->m_len 
< CMSG_LEN(0)) { 
1393         for (cm 
= M_FIRST_CMSGHDR(control
); 
1394             is_cmsg_valid(control
, cm
); 
1395             cm 
= M_NXT_CMSGHDR(control
, cm
)) { 
1396                 if (cm
->cmsg_level 
!= IPPROTO_IP 
|| 
1397                     cm
->cmsg_type 
!= IP_PKTINFO
) { 
1401                 if (cm
->cmsg_len 
!= CMSG_LEN(sizeof(struct in_pktinfo
))) { 
1405                 pktinfo 
=  (struct in_pktinfo 
*)(void *)CMSG_DATA(cm
); 
1407                 /* Check for a valid ifindex in pktinfo */ 
1408                 ifnet_head_lock_shared(); 
1410                 if (pktinfo
->ipi_ifindex 
> if_index
) { 
1416                  * If ipi_ifindex is specified it takes precedence 
1417                  * over ipi_spec_dst. 
1419                 if (pktinfo
->ipi_ifindex
) { 
1420                         ifp 
= ifindex2ifnet
[pktinfo
->ipi_ifindex
]; 
1425                         if (outif 
!= NULL
) { 
1426                                 ifnet_reference(ifp
); 
1430                         laddr
->s_addr 
= INADDR_ANY
; 
1437                  * Use the provided ipi_spec_dst address for temp 
1440                 *laddr 
= pktinfo
->ipi_spec_dst
; 
1447 udp_output(struct inpcb 
*inp
, struct mbuf 
*m
, struct sockaddr 
*addr
, 
1448     struct mbuf 
*control
, struct proc 
*p
) 
1450         struct udpiphdr 
*ui
; 
1451         int len 
= m
->m_pkthdr
.len
; 
1452         struct sockaddr_in 
*sin
; 
1453         struct in_addr origladdr
, laddr
, faddr
, pi_laddr
; 
1454         u_short lport
, fport
; 
1455         int error 
= 0, udp_dodisconnect 
= 0, pktinfo 
= 0; 
1456         struct socket 
*so 
= inp
->inp_socket
; 
1458         struct mbuf 
*inpopts
; 
1459         struct ip_moptions 
*mopts
; 
1461         struct ip_out_args ipoa
; 
1463         struct m_tag 
*cfil_tag 
= NULL
; 
1464         bool cfil_faddr_use 
= false; 
1465         uint32_t cfil_so_state_change_cnt 
= 0; 
1466         short cfil_so_options 
= 0; 
1467         struct sockaddr 
*cfil_faddr 
= NULL
; 
1470         bzero(&ipoa
, sizeof(ipoa
)); 
1471         ipoa
.ipoa_boundif 
= IFSCOPE_NONE
; 
1472         ipoa
.ipoa_flags 
= IPOAF_SELECT_SRCIF
; 
1474         struct ifnet 
*outif 
= NULL
; 
1475         struct flowadv 
*adv 
= &ipoa
.ipoa_flowadv
; 
1476         int sotc 
= SO_TC_UNSPEC
; 
1477         int netsvctype 
= _NET_SERVICE_TYPE_UNSPEC
; 
1478         struct ifnet 
*origoutifp 
= NULL
; 
1481         /* Enable flow advisory only when connected */ 
1482         flowadv 
= (so
->so_state 
& SS_ISCONNECTED
) ? 1 : 0; 
1483         pi_laddr
.s_addr 
= INADDR_ANY
; 
1485         KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT 
| DBG_FUNC_START
, 0, 0, 0, 0, 0); 
1487         socket_lock_assert_owned(so
); 
1491          * If socket is subject to UDP Content Filter and no addr is passed in, 
1492          * retrieve CFIL saved state from mbuf and use it if necessary. 
1494         if (so
->so_cfil_db 
&& !addr
) { 
1495                 cfil_tag 
= cfil_udp_get_socket_state(m
, &cfil_so_state_change_cnt
, &cfil_so_options
, &cfil_faddr
); 
1497                         sin 
= (struct sockaddr_in 
*)(void *)cfil_faddr
; 
1498                         if (inp 
&& inp
->inp_faddr
.s_addr 
== INADDR_ANY
) { 
1500                                  * Socket is unconnected, simply use the saved faddr as 'addr' to go through 
1501                                  * the connect/disconnect logic. 
1503                                 addr 
= (struct sockaddr 
*)cfil_faddr
; 
1504                         } else if ((so
->so_state_change_cnt 
!= cfil_so_state_change_cnt
) && 
1505                             (inp
->inp_fport 
!= sin
->sin_port 
|| 
1506                             inp
->inp_faddr
.s_addr 
!= sin
->sin_addr
.s_addr
)) { 
1508                                  * Socket is connected but socket state and dest addr/port changed. 
1509                                  * We need to use the saved faddr info. 
1511                                 cfil_faddr_use 
= true; 
1517         if (control 
!= NULL
) { 
1518                 sotc 
= so_tc_from_control(control
, &netsvctype
); 
1519                 VERIFY(outif 
== NULL
); 
1520                 error 
= udp_check_pktinfo(control
, &outif
, &pi_laddr
); 
1527                 if (outif 
!= NULL
) { 
1528                         ipoa
.ipoa_boundif 
= outif
->if_index
; 
1531         if (sotc 
== SO_TC_UNSPEC
) { 
1532                 sotc 
= so
->so_traffic_class
; 
1533                 netsvctype 
= so
->so_netsvctype
; 
1536         KERNEL_DEBUG(DBG_LAYER_OUT_BEG
, inp
->inp_fport
, inp
->inp_lport
, 
1537             inp
->inp_laddr
.s_addr
, inp
->inp_faddr
.s_addr
, 
1538             (htons((u_short
)len 
+ sizeof(struct udphdr
)))); 
1540         if (len 
+ sizeof(struct udpiphdr
) > IP_MAXPACKET
) { 
1545         if (flowadv 
&& INP_WAIT_FOR_IF_FEEDBACK(inp
)) { 
1547                  * The socket is flow-controlled, drop the packets 
1548                  * until the inp is not flow controlled 
1554          * If socket was bound to an ifindex, tell ip_output about it. 
1555          * If the ancillary IP_PKTINFO option contains an interface index, 
1556          * it takes precedence over the one specified by IP_BOUND_IF. 
1558         if (ipoa
.ipoa_boundif 
== IFSCOPE_NONE 
&& 
1559             (inp
->inp_flags 
& INP_BOUND_IF
)) { 
1560                 VERIFY(inp
->inp_boundifp 
!= NULL
); 
1561                 ifnet_reference(inp
->inp_boundifp
);     /* for this routine */ 
1562                 if (outif 
!= NULL
) { 
1563                         ifnet_release(outif
); 
1565                 outif 
= inp
->inp_boundifp
; 
1566                 ipoa
.ipoa_boundif 
= outif
->if_index
; 
1568         if (INP_NO_CELLULAR(inp
)) { 
1569                 ipoa
.ipoa_flags 
|=  IPOAF_NO_CELLULAR
; 
1571         if (INP_NO_EXPENSIVE(inp
)) { 
1572                 ipoa
.ipoa_flags 
|=  IPOAF_NO_EXPENSIVE
; 
1574         if (INP_AWDL_UNRESTRICTED(inp
)) { 
1575                 ipoa
.ipoa_flags 
|=  IPOAF_AWDL_UNRESTRICTED
; 
1577         ipoa
.ipoa_sotc 
= sotc
; 
1578         ipoa
.ipoa_netsvctype 
= netsvctype
; 
1579         soopts 
|= IP_OUTARGS
; 
1582          * If there was a routing change, discard cached route and check 
1583          * that we have a valid source address.  Reacquire a new source 
1584          * address if INADDR_ANY was specified. 
1586          * If we are using cfil saved state, go through this cache cleanup 
1587          * so that we can get a new route. 
1589         if (ROUTE_UNUSABLE(&inp
->inp_route
) 
1594                 struct in_ifaddr 
*ia 
= NULL
; 
1596                 ROUTE_RELEASE(&inp
->inp_route
); 
1598                 /* src address is gone? */ 
1599                 if (inp
->inp_laddr
.s_addr 
!= INADDR_ANY 
&& 
1600                     (ia 
= ifa_foraddr(inp
->inp_laddr
.s_addr
)) == NULL
) { 
1601                         if (!(inp
->inp_flags 
& INP_INADDR_ANY
) || 
1602                             (so
->so_state 
& SS_ISCONNECTED
)) { 
1605                                  * If the source address is gone, return an 
1607                                  * - the source was specified 
1608                                  * - the socket was already connected 
1610                                 soevent(so
, (SO_FILT_HINT_LOCKED 
| 
1611                                     SO_FILT_HINT_NOSRCADDR
)); 
1612                                 error 
= EADDRNOTAVAIL
; 
1615                                 /* new src will be set later */ 
1616                                 inp
->inp_laddr
.s_addr 
= INADDR_ANY
; 
1617                                 inp
->inp_last_outifp 
= NULL
; 
1621                         IFA_REMREF(&ia
->ia_ifa
); 
1626          * IP_PKTINFO option check.  If a temporary scope or src address 
1627          * is provided, use it for this packet only and make sure we forget 
1628          * it after sending this datagram. 
1630         if (pi_laddr
.s_addr 
!= INADDR_ANY 
|| 
1631             (ipoa
.ipoa_boundif 
!= IFSCOPE_NONE 
&& pktinfo
)) { 
1632                 /* temp src address for this datagram only */ 
1634                 origladdr
.s_addr 
= INADDR_ANY
; 
1635                 /* we don't want to keep the laddr or route */ 
1636                 udp_dodisconnect 
= 1; 
1637                 /* remember we don't care about src addr */ 
1638                 inp
->inp_flags 
|= INP_INADDR_ANY
; 
1640                 origladdr 
= laddr 
= inp
->inp_laddr
; 
1643         origoutifp 
= inp
->inp_last_outifp
; 
1644         faddr 
= inp
->inp_faddr
; 
1645         lport 
= inp
->inp_lport
; 
1646         fport 
= inp
->inp_fport
; 
1649         if (cfil_faddr_use
) { 
1650                 faddr 
= ((struct sockaddr_in 
*)(void *)cfil_faddr
)->sin_addr
; 
1651                 fport 
= ((struct sockaddr_in 
*)(void *)cfil_faddr
)->sin_port
; 
1656                 sin 
= (struct sockaddr_in 
*)(void *)addr
; 
1657                 if (faddr
.s_addr 
!= INADDR_ANY
) { 
1663                          * In case we don't have a local port set, go through 
1664                          * the full connect.  We don't have a local port yet 
1665                          * (i.e., we can't be looked up), so it's not an issue 
1666                          * if the input runs at the same time we do this. 
1668                         /* if we have a source address specified, use that */ 
1669                         if (pi_laddr
.s_addr 
!= INADDR_ANY
) { 
1670                                 inp
->inp_laddr 
= pi_laddr
; 
1673                          * If a scope is specified, use it.  Scope from 
1674                          * IP_PKTINFO takes precendence over the the scope 
1675                          * set via INP_BOUND_IF. 
1677                         error 
= in_pcbconnect(inp
, addr
, p
, ipoa
.ipoa_boundif
, 
1683                         laddr 
= inp
->inp_laddr
; 
1684                         lport 
= inp
->inp_lport
; 
1685                         faddr 
= inp
->inp_faddr
; 
1686                         fport 
= inp
->inp_fport
; 
1687                         udp_dodisconnect 
= 1; 
1689                         /* synch up in case in_pcbladdr() overrides */ 
1690                         if (outif 
!= NULL 
&& ipoa
.ipoa_boundif 
!= IFSCOPE_NONE
) { 
1691                                 ipoa
.ipoa_boundif 
= outif
->if_index
; 
1697                          * We have a full address and a local port; use those 
1698                          * info to build the packet without changing the pcb 
1699                          * and interfering with the input path. See 3851370. 
1701                          * Scope from IP_PKTINFO takes precendence over the 
1702                          * the scope set via INP_BOUND_IF. 
1704                         if (laddr
.s_addr 
== INADDR_ANY
) { 
1705                                 if ((error 
= in_pcbladdr(inp
, addr
, &laddr
, 
1706                                     ipoa
.ipoa_boundif
, &outif
, 0)) != 0) { 
1710                                  * from pcbconnect: remember we don't 
1711                                  * care about src addr. 
1713                                 inp
->inp_flags 
|= INP_INADDR_ANY
; 
1715                                 /* synch up in case in_pcbladdr() overrides */ 
1716                                 if (outif 
!= NULL 
&& 
1717                                     ipoa
.ipoa_boundif 
!= IFSCOPE_NONE
) { 
1718                                         ipoa
.ipoa_boundif 
= outif
->if_index
; 
1722                         faddr 
= sin
->sin_addr
; 
1723                         fport 
= sin
->sin_port
; 
1726                 if (faddr
.s_addr 
== INADDR_ANY
) { 
1733         mac_mbuf_label_associate_inpcb(inp
, m
); 
1734 #endif /* CONFIG_MACF_NET */ 
1736         if (inp
->inp_flowhash 
== 0) { 
1737                 inp
->inp_flowhash 
= inp_calc_flowhash(inp
); 
1740         if (fport 
== htons(53) && !(so
->so_flags1 
& SOF1_DNS_COUNTED
)) { 
1741                 so
->so_flags1 
|= SOF1_DNS_COUNTED
; 
1742                 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_socket_inet_dgram_dns
); 
1746          * Calculate data length and get a mbuf 
1747          * for UDP and IP headers. 
1749         M_PREPEND(m
, sizeof(struct udpiphdr
), M_DONTWAIT
, 1); 
1756          * Fill in mbuf with extended UDP header 
1757          * and addresses and length put into network format. 
1759         ui 
= mtod(m
, struct udpiphdr 
*); 
1760         bzero(ui
->ui_x1
, sizeof(ui
->ui_x1
));    /* XXX still needed? */ 
1761         ui
->ui_pr 
= IPPROTO_UDP
; 
1764         ui
->ui_sport 
= lport
; 
1765         ui
->ui_dport 
= fport
; 
1766         ui
->ui_ulen 
= htons((u_short
)len 
+ sizeof(struct udphdr
)); 
1769          * Set up checksum to pseudo header checksum and output datagram. 
1771          * Treat flows to be CLAT46'd as IPv6 flow and compute checksum 
1772          * no matter what, as IPv6 mandates checksum for UDP. 
1774          * Here we only compute the one's complement sum of the pseudo header. 
1775          * The payload computation and final complement is delayed to much later 
1776          * in IP processing to decide if remaining computation needs to be done 
1779          * That is communicated by setting CSUM_UDP in csum_flags. 
1780          * The offset of checksum from the start of ULP header is communicated 
1781          * through csum_data. 
1783          * Note since this already contains the pseudo checksum header, any 
1784          * later operation at IP layer that modify the values used here must 
1785          * update the checksum as well (for example NAT etc). 
1787         if ((inp
->inp_flags2 
& INP2_CLAT46_FLOW
) || 
1788             (udpcksum 
&& !(inp
->inp_flags 
& INP_UDP_NOCKSUM
))) { 
1789                 ui
->ui_sum 
= in_pseudo(ui
->ui_src
.s_addr
, ui
->ui_dst
.s_addr
, 
1790                     htons((u_short
)len 
+ sizeof(struct udphdr
) + IPPROTO_UDP
)); 
1791                 m
->m_pkthdr
.csum_flags 
= (CSUM_UDP 
| CSUM_ZERO_INVERT
); 
1792                 m
->m_pkthdr
.csum_data 
= offsetof(struct udphdr
, uh_sum
); 
1796         ((struct ip 
*)ui
)->ip_len 
= sizeof(struct udpiphdr
) + len
; 
1797         ((struct ip 
*)ui
)->ip_ttl 
= inp
->inp_ip_ttl
;    /* XXX */ 
1798         ((struct ip 
*)ui
)->ip_tos 
= inp
->inp_ip_tos
;    /* XXX */ 
1799         udpstat
.udps_opackets
++; 
1801         KERNEL_DEBUG(DBG_LAYER_OUT_END
, ui
->ui_dport
, ui
->ui_sport
, 
1802             ui
->ui_src
.s_addr
, ui
->ui_dst
.s_addr
, ui
->ui_ulen
); 
1806                 necp_kernel_policy_id policy_id
; 
1807                 necp_kernel_policy_id skip_policy_id
; 
1808                 u_int32_t route_rule_id
; 
1811                  * We need a route to perform NECP route rule checks 
1813                 if (net_qos_policy_restricted 
!= 0 && 
1814                     ROUTE_UNUSABLE(&inp
->inp_route
)) { 
1815                         struct sockaddr_in to
; 
1816                         struct sockaddr_in from
; 
1818                         ROUTE_RELEASE(&inp
->inp_route
); 
1820                         bzero(&from
, sizeof(struct sockaddr_in
)); 
1821                         from
.sin_family 
= AF_INET
; 
1822                         from
.sin_len 
= sizeof(struct sockaddr_in
); 
1823                         from
.sin_addr 
= laddr
; 
1825                         bzero(&to
, sizeof(struct sockaddr_in
)); 
1826                         to
.sin_family 
= AF_INET
; 
1827                         to
.sin_len 
= sizeof(struct sockaddr_in
); 
1828                         to
.sin_addr 
= faddr
; 
1830                         inp
->inp_route
.ro_dst
.sa_family 
= AF_INET
; 
1831                         inp
->inp_route
.ro_dst
.sa_len 
= sizeof(struct sockaddr_in
); 
1832                         ((struct sockaddr_in 
*)(void *)&inp
->inp_route
.ro_dst
)->sin_addr 
= 
1835                         rtalloc_scoped(&inp
->inp_route
, ipoa
.ipoa_boundif
); 
1837                         inp_update_necp_policy(inp
, (struct sockaddr 
*)&from
, 
1838                             (struct sockaddr 
*)&to
, ipoa
.ipoa_boundif
); 
1839                         inp
->inp_policyresult
.results
.qos_marking_gencount 
= 0; 
1842                 if (!necp_socket_is_allowed_to_send_recv_v4(inp
, lport
, fport
, 
1843                     &laddr
, &faddr
, NULL
, &policy_id
, &route_rule_id
, &skip_policy_id
)) { 
1844                         error 
= EHOSTUNREACH
; 
1848                 necp_mark_packet_from_socket(m
, inp
, policy_id
, route_rule_id
, skip_policy_id
); 
1850                 if (net_qos_policy_restricted 
!= 0) { 
1851                         necp_socket_update_qos_marking(inp
, 
1852                             inp
->inp_route
.ro_rt
, NULL
, route_rule_id
); 
1856         if ((so
->so_flags1 
& SOF1_QOSMARKING_ALLOWED
)) { 
1857                 ipoa
.ipoa_flags 
|= IPOAF_QOSMARKING_ALLOWED
; 
1861         if (inp
->inp_sp 
!= NULL 
&& ipsec_setsocket(m
, inp
->inp_socket
) != 0) { 
1867         inpopts 
= inp
->inp_options
; 
1869         if (cfil_tag 
&& (inp
->inp_socket
->so_options 
!= cfil_so_options
)) { 
1870                 soopts 
|= (cfil_so_options 
& (SO_DONTROUTE 
| SO_BROADCAST
)); 
1873         soopts 
|= (inp
->inp_socket
->so_options 
& (SO_DONTROUTE 
| SO_BROADCAST
)); 
1875         mopts 
= inp
->inp_moptions
; 
1876         if (mopts 
!= NULL
) { 
1878                 IMO_ADDREF_LOCKED(mopts
); 
1879                 if (IN_MULTICAST(ntohl(ui
->ui_dst
.s_addr
)) && 
1880                     mopts
->imo_multicast_ifp 
!= NULL
) { 
1881                         /* no reference needed */ 
1882                         inp
->inp_last_outifp 
= mopts
->imo_multicast_ifp
; 
1887         /* Copy the cached route and take an extra reference */ 
1888         inp_route_copyout(inp
, &ro
); 
1890         set_packet_service_class(m
, so
, sotc
, 0); 
1891         m
->m_pkthdr
.pkt_flowsrc 
= FLOWSRC_INPCB
; 
1892         m
->m_pkthdr
.pkt_flowid 
= inp
->inp_flowhash
; 
1893         m
->m_pkthdr
.pkt_proto 
= IPPROTO_UDP
; 
1894         m
->m_pkthdr
.pkt_flags 
|= (PKTF_FLOW_ID 
| PKTF_FLOW_LOCALSRC
); 
1896                 m
->m_pkthdr
.pkt_flags 
|= PKTF_FLOW_ADV
; 
1898         m
->m_pkthdr
.tx_udp_pid 
= so
->last_pid
; 
1899         if (so
->so_flags 
& SOF_DELEGATED
) { 
1900                 m
->m_pkthdr
.tx_udp_e_pid 
= so
->e_pid
; 
1902                 m
->m_pkthdr
.tx_udp_e_pid 
= 0; 
1905         if (ipoa
.ipoa_boundif 
!= IFSCOPE_NONE
) { 
1906                 ipoa
.ipoa_flags 
|= IPOAF_BOUND_IF
; 
1909         if (laddr
.s_addr 
!= INADDR_ANY
) { 
1910                 ipoa
.ipoa_flags 
|= IPOAF_BOUND_SRCADDR
; 
1913         inp
->inp_sndinprog_cnt
++; 
1915         socket_unlock(so
, 0); 
1916         error 
= ip_output(m
, inpopts
, &ro
, soopts
, mopts
, &ipoa
); 
1919         if (mopts 
!= NULL
) { 
1923         if (error 
== 0 && nstat_collect
) { 
1924                 boolean_t cell
, wifi
, wired
; 
1926                 if (ro
.ro_rt 
!= NULL
) { 
1927                         cell 
= IFNET_IS_CELLULAR(ro
.ro_rt
->rt_ifp
); 
1928                         wifi 
= (!cell 
&& IFNET_IS_WIFI(ro
.ro_rt
->rt_ifp
)); 
1929                         wired 
= (!wifi 
&& IFNET_IS_WIRED(ro
.ro_rt
->rt_ifp
)); 
1931                         cell 
= wifi 
= wired 
= FALSE
; 
1933                 INP_ADD_STAT(inp
, cell
, wifi
, wired
, txpackets
, 1); 
1934                 INP_ADD_STAT(inp
, cell
, wifi
, wired
, txbytes
, len
); 
1935                 inp_set_activity_bitmap(inp
); 
1938         if (flowadv 
&& (adv
->code 
== FADV_FLOW_CONTROLLED 
|| 
1939             adv
->code 
== FADV_SUSPENDED
)) { 
1941                  * return a hint to the application that 
1942                  * the packet has been dropped 
1945                 inp_set_fc_state(inp
, adv
->code
); 
1948         VERIFY(inp
->inp_sndinprog_cnt 
> 0); 
1949         if (--inp
->inp_sndinprog_cnt 
== 0) { 
1950                 inp
->inp_flags 
&= ~(INP_FC_FEEDBACK
); 
1953         /* Synchronize PCB cached route */ 
1954         inp_route_copyin(inp
, &ro
); 
1957         if (udp_dodisconnect
) { 
1958                 /* Always discard the cached route for unconnected socket */ 
1959                 ROUTE_RELEASE(&inp
->inp_route
); 
1960                 in_pcbdisconnect(inp
); 
1961                 inp
->inp_laddr 
= origladdr
;     /* XXX rehash? */ 
1962                 /* no reference needed */ 
1963                 inp
->inp_last_outifp 
= origoutifp
; 
1964         } else if (inp
->inp_route
.ro_rt 
!= NULL
) { 
1965                 struct rtentry 
*rt 
= inp
->inp_route
.ro_rt
; 
1966                 struct ifnet 
*outifp
; 
1968                 if (rt
->rt_flags 
& (RTF_MULTICAST 
| RTF_BROADCAST
)) { 
1969                         rt 
= NULL
;      /* unusable */ 
1973                  * Discard temporary route for cfil case 
1975                 if (cfil_faddr_use
) { 
1976                         rt 
= NULL
;      /* unusable */ 
1981                  * Always discard if it is a multicast or broadcast route. 
1984                         ROUTE_RELEASE(&inp
->inp_route
); 
1988                  * If the destination route is unicast, update outifp with 
1989                  * that of the route interface used by IP. 
1992                     (outifp 
= rt
->rt_ifp
) != inp
->inp_last_outifp
) { 
1993                         inp
->inp_last_outifp 
= outifp
; /* no reference needed */ 
1995                         so
->so_pktheadroom 
= P2ROUNDUP( 
1996                                 sizeof(struct udphdr
) + 
1998                                 ifnet_hdrlen(outifp
) + 
1999                                 ifnet_mbuf_packetpreamblelen(outifp
), 
2003                 ROUTE_RELEASE(&inp
->inp_route
); 
2007          * If output interface was cellular/expensive, and this socket is 
2008          * denied access to it, generate an event. 
2010         if (error 
!= 0 && (ipoa
.ipoa_retflags 
& IPOARF_IFDENIED
) && 
2011             (INP_NO_CELLULAR(inp
) || INP_NO_EXPENSIVE(inp
))) { 
2012                 soevent(so
, (SO_FILT_HINT_LOCKED 
| SO_FILT_HINT_IFDENIED
)); 
2016         KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT 
| DBG_FUNC_END
, error
, 0, 0, 0, 0); 
2022         if (outif 
!= NULL
) { 
2023                 ifnet_release(outif
); 
2028                 m_tag_free(cfil_tag
); 
2035 u_int32_t       udp_sendspace 
= 9216;           /* really max datagram size */ 
2036 /* 187 1K datagrams (approx 192 KB) */ 
2037 u_int32_t       udp_recvspace 
= 187 * (1024 + 
2039     sizeof(struct sockaddr_in6
) 
2041     sizeof(struct sockaddr_in
) 
2045 /* Check that the values of udp send and recv space do not exceed sb_max */ 
2047 sysctl_udp_sospace(struct sysctl_oid 
*oidp
, void *arg1
, int arg2
, 
2048     struct sysctl_req 
*req
) 
2050 #pragma unused(arg1, arg2) 
2051         u_int32_t new_value 
= 0, *space_p 
= NULL
; 
2052         int changed 
= 0, error 
= 0; 
2053         u_quad_t sb_effective_max 
= (sb_max 
/ (MSIZE 
+ MCLBYTES
)) * MCLBYTES
; 
2055         switch (oidp
->oid_number
) { 
2056         case UDPCTL_RECVSPACE
: 
2057                 space_p 
= &udp_recvspace
; 
2059         case UDPCTL_MAXDGRAM
: 
2060                 space_p 
= &udp_sendspace
; 
2065         error 
= sysctl_io_number(req
, *space_p
, sizeof(u_int32_t
), 
2066             &new_value
, &changed
); 
2068                 if (new_value 
> 0 && new_value 
<= sb_effective_max
) { 
2069                         *space_p 
= new_value
; 
2077 SYSCTL_PROC(_net_inet_udp
, UDPCTL_RECVSPACE
, recvspace
, 
2078     CTLTYPE_INT 
| CTLFLAG_RW 
| CTLFLAG_LOCKED
, &udp_recvspace
, 0, 
2079     &sysctl_udp_sospace
, "IU", "Maximum incoming UDP datagram size"); 
2081 SYSCTL_PROC(_net_inet_udp
, UDPCTL_MAXDGRAM
, maxdgram
, 
2082     CTLTYPE_INT 
| CTLFLAG_RW 
| CTLFLAG_LOCKED
, &udp_sendspace
, 0, 
2083     &sysctl_udp_sospace
, "IU", "Maximum outgoing UDP datagram size"); 
2086 udp_abort(struct socket 
*so
) 
2090         inp 
= sotoinpcb(so
); 
2092                 panic("%s: so=%p null inp\n", __func__
, so
); 
2095         soisdisconnected(so
); 
2101 udp_attach(struct socket 
*so
, int proto
, struct proc 
*p
) 
2103 #pragma unused(proto) 
2107         inp 
= sotoinpcb(so
); 
2109                 panic("%s so=%p inp=%p\n", __func__
, so
, inp
); 
2112         error 
= in_pcballoc(so
, &udbinfo
, p
); 
2116         error 
= soreserve(so
, udp_sendspace
, udp_recvspace
); 
2120         inp 
= (struct inpcb 
*)so
->so_pcb
; 
2121         inp
->inp_vflag 
|= INP_IPV4
; 
2122         inp
->inp_ip_ttl 
= ip_defttl
; 
2123         if (nstat_collect
) { 
2124                 nstat_udp_new_pcb(inp
); 
2130 udp_bind(struct socket 
*so
, struct sockaddr 
*nam
, struct proc 
*p
) 
2135         if (nam
->sa_family 
!= 0 && nam
->sa_family 
!= AF_INET 
&& 
2136             nam
->sa_family 
!= AF_INET6
) { 
2137                 return EAFNOSUPPORT
; 
2140         inp 
= sotoinpcb(so
); 
2144         error 
= in_pcbbind(inp
, nam
, p
); 
2147         /* Update NECP client with bind result if not in middle of connect */ 
2149             (inp
->inp_flags2 
& INP2_CONNECT_IN_PROGRESS
) && 
2150             !uuid_is_null(inp
->necp_client_uuid
)) { 
2151                 socket_unlock(so
, 0); 
2152                 necp_client_assign_from_socket(so
->last_pid
, inp
->necp_client_uuid
, inp
); 
2161 udp_connect(struct socket 
*so
, struct sockaddr 
*nam
, struct proc 
*p
) 
2166         inp 
= sotoinpcb(so
); 
2170         if (inp
->inp_faddr
.s_addr 
!= INADDR_ANY
) { 
2174         if (!(so
->so_flags1 
& SOF1_CONNECT_COUNTED
)) { 
2175                 so
->so_flags1 
|= SOF1_CONNECT_COUNTED
; 
2176                 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_socket_inet_dgram_connected
); 
2181         if (necp_socket_should_use_flow_divert(inp
)) { 
2182                 uint32_t fd_ctl_unit 
= 
2183                     necp_socket_get_flow_divert_control_unit(inp
); 
2184                 if (fd_ctl_unit 
> 0) { 
2185                         error 
= flow_divert_pcb_init(so
, fd_ctl_unit
); 
2187                                 error 
= flow_divert_connect_out(so
, nam
, p
); 
2194 #endif /* FLOW_DIVERT */ 
2197         error 
= in_pcbconnect(inp
, nam
, p
, IFSCOPE_NONE
, NULL
); 
2200                 /* Update NECP client with connected five-tuple */ 
2201                 if (!uuid_is_null(inp
->necp_client_uuid
)) { 
2202                         socket_unlock(so
, 0); 
2203                         necp_client_assign_from_socket(so
->last_pid
, inp
->necp_client_uuid
, inp
); 
2209                 if (inp
->inp_flowhash 
== 0) { 
2210                         inp
->inp_flowhash 
= inp_calc_flowhash(inp
); 
2217 udp_connectx_common(struct socket 
*so
, int af
, struct sockaddr 
*src
, struct sockaddr 
*dst
, 
2218     struct proc 
*p
, uint32_t ifscope
, sae_associd_t aid
, sae_connid_t 
*pcid
, 
2219     uint32_t flags
, void *arg
, uint32_t arglen
, 
2220     struct uio 
*uio
, user_ssize_t 
*bytes_written
) 
2222 #pragma unused(aid, flags, arg, arglen) 
2223         struct inpcb 
*inp 
= sotoinpcb(so
); 
2225         user_ssize_t datalen 
= 0; 
2231         VERIFY(dst 
!= NULL
); 
2233         ASSERT(!(inp
->inp_flags2 
& INP2_CONNECT_IN_PROGRESS
)); 
2234         inp
->inp_flags2 
|= INP2_CONNECT_IN_PROGRESS
; 
2237         inp_update_necp_policy(inp
, src
, dst
, ifscope
); 
2240         /* bind socket to the specified interface, if requested */ 
2241         if (ifscope 
!= IFSCOPE_NONE 
&& 
2242             (error 
= inp_bindif(inp
, ifscope
, NULL
)) != 0) { 
2246         /* if source address and/or port is specified, bind to it */ 
2248                 error 
= sobindlock(so
, src
, 0); /* already locked */ 
2256                 error 
= udp_connect(so
, dst
, p
); 
2260                 error 
= udp6_connect(so
, dst
, p
); 
2273          * If there is data, copy it. DATA_IDEMPOTENT is ignored. 
2274          * CONNECT_RESUME_ON_READ_WRITE is ignored. 
2277                 socket_unlock(so
, 0); 
2279                 VERIFY(bytes_written 
!= NULL
); 
2281                 datalen 
= uio_resid(uio
); 
2282                 error 
= so
->so_proto
->pr_usrreqs
->pru_sosend(so
, NULL
, 
2283                     (uio_t
)uio
, NULL
, NULL
, 0); 
2286                 /* If error returned is EMSGSIZE, for example, disconnect */ 
2287                 if (error 
== 0 || error 
== EWOULDBLOCK
) { 
2288                         *bytes_written 
= datalen 
- uio_resid(uio
); 
2290                         (void) so
->so_proto
->pr_usrreqs
->pru_disconnectx(so
, 
2291                             SAE_ASSOCID_ANY
, SAE_CONNID_ANY
); 
2294                  * mask the EWOULDBLOCK error so that the caller 
2295                  * knows that atleast the connect was successful. 
2297                 if (error 
== EWOULDBLOCK
) { 
2302         if (error 
== 0 && pcid 
!= NULL
) { 
2303                 *pcid 
= 1;      /* there is only 1 connection for UDP */ 
2306         inp
->inp_flags2 
&= ~INP2_CONNECT_IN_PROGRESS
; 
2311 udp_connectx(struct socket 
*so
, struct sockaddr 
*src
, 
2312     struct sockaddr 
*dst
, struct proc 
*p
, uint32_t ifscope
, 
2313     sae_associd_t aid
, sae_connid_t 
*pcid
, uint32_t flags
, void *arg
, 
2314     uint32_t arglen
, struct uio 
*uio
, user_ssize_t 
*bytes_written
) 
2316         return udp_connectx_common(so
, AF_INET
, src
, dst
, 
2317                    p
, ifscope
, aid
, pcid
, flags
, arg
, arglen
, uio
, bytes_written
); 
2321 udp_detach(struct socket 
*so
) 
2325         inp 
= sotoinpcb(so
); 
2327                 panic("%s: so=%p null inp\n", __func__
, so
); 
2332          * If this is a socket that does not want to wakeup the device 
2333          * for it's traffic, the application might be waiting for 
2334          * close to complete before going to sleep. Send a notification 
2335          * for this kind of sockets 
2337         if (so
->so_options 
& SO_NOWAKEFROMSLEEP
) { 
2338                 socket_post_kev_msg_closed(so
); 
2342         inp
->inp_state 
= INPCB_STATE_DEAD
; 
2347 udp_disconnect(struct socket 
*so
) 
2351         inp 
= sotoinpcb(so
); 
2354             || (necp_socket_should_use_flow_divert(inp
)) 
2357                 return inp 
== NULL 
? EINVAL 
: EPROTOTYPE
; 
2359         if (inp
->inp_faddr
.s_addr 
== INADDR_ANY
) { 
2363         in_pcbdisconnect(inp
); 
2365         /* reset flow controlled state, just in case */ 
2366         inp_reset_fc_state(inp
); 
2368         inp
->inp_laddr
.s_addr 
= INADDR_ANY
; 
2369         so
->so_state 
&= ~SS_ISCONNECTED
;                /* XXX */ 
2370         inp
->inp_last_outifp 
= NULL
; 
2376 udp_disconnectx(struct socket 
*so
, sae_associd_t aid
, sae_connid_t cid
) 
2379         if (aid 
!= SAE_ASSOCID_ANY 
&& aid 
!= SAE_ASSOCID_ALL
) { 
2383         return udp_disconnect(so
); 
2387 udp_send(struct socket 
*so
, int flags
, struct mbuf 
*m
, 
2388     struct sockaddr 
*addr
, struct mbuf 
*control
, struct proc 
*p
) 
2391 #pragma unused(flags) 
2392 #endif /* !(FLOW_DIVERT) */ 
2395         inp 
= sotoinpcb(so
); 
2400                 if (control 
!= NULL
) { 
2408         if (necp_socket_should_use_flow_divert(inp
)) { 
2409                 /* Implicit connect */ 
2410                 return flow_divert_implicit_data_out(so
, flags
, m
, addr
, 
2413 #endif /* FLOW_DIVERT */ 
2416         return udp_output(inp
, m
, addr
, control
, p
); 
2420 udp_shutdown(struct socket 
*so
) 
2424         inp 
= sotoinpcb(so
); 
2433 udp_lock(struct socket 
*so
, int refcount
, void *debug
) 
2437         if (debug 
== NULL
) { 
2438                 lr_saved 
= __builtin_return_address(0); 
2443         if (so
->so_pcb 
!= NULL
) { 
2444                 LCK_MTX_ASSERT(&((struct inpcb 
*)so
->so_pcb
)->inpcb_mtx
, 
2445                     LCK_MTX_ASSERT_NOTOWNED
); 
2446                 lck_mtx_lock(&((struct inpcb 
*)so
->so_pcb
)->inpcb_mtx
); 
2448                 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__
, 
2449                     so
, lr_saved
, solockhistory_nr(so
)); 
2456         so
->lock_lr
[so
->next_lock_lr
] = lr_saved
; 
2457         so
->next_lock_lr 
= (so
->next_lock_lr 
+ 1) % SO_LCKDBG_MAX
; 
2462 udp_unlock(struct socket 
*so
, int refcount
, void *debug
) 
2466         if (debug 
== NULL
) { 
2467                 lr_saved 
= __builtin_return_address(0); 
2473                 VERIFY(so
->so_usecount 
> 0); 
2476         if (so
->so_pcb 
== NULL
) { 
2477                 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__
, 
2478                     so
, lr_saved
, solockhistory_nr(so
)); 
2481                 LCK_MTX_ASSERT(&((struct inpcb 
*)so
->so_pcb
)->inpcb_mtx
, 
2482                     LCK_MTX_ASSERT_OWNED
); 
2483                 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
; 
2484                 so
->next_unlock_lr 
= (so
->next_unlock_lr 
+ 1) % SO_LCKDBG_MAX
; 
2485                 lck_mtx_unlock(&((struct inpcb 
*)so
->so_pcb
)->inpcb_mtx
); 
2491 udp_getlock(struct socket 
*so
, int flags
) 
2493 #pragma unused(flags) 
2494         struct inpcb 
*inp 
= sotoinpcb(so
); 
2496         if (so
->so_pcb 
== NULL
) { 
2497                 panic("%s: so=%p NULL so_pcb lrh= %s\n", __func__
, 
2498                     so
, solockhistory_nr(so
)); 
2501         return &inp
->inpcb_mtx
; 
2505  * UDP garbage collector callback (inpcb_timer_func_t). 
2507  * Returns > 0 to keep timer active. 
2510 udp_gc(struct inpcbinfo 
*ipi
) 
2512         struct inpcb 
*inp
, *inpnxt
; 
2515         if (lck_rw_try_lock_exclusive(ipi
->ipi_lock
) == FALSE
) { 
2516                 if (udp_gc_done 
== TRUE
) { 
2517                         udp_gc_done 
= FALSE
; 
2518                         /* couldn't get the lock, must lock next time */ 
2519                         atomic_add_32(&ipi
->ipi_gc_req
.intimer_fast
, 1); 
2522                 lck_rw_lock_exclusive(ipi
->ipi_lock
); 
2527         for (inp 
= udb
.lh_first
; inp 
!= NULL
; inp 
= inpnxt
) { 
2528                 inpnxt 
= inp
->inp_list
.le_next
; 
2531                  * Skip unless it's STOPUSING; garbage collector will 
2532                  * be triggered by in_pcb_checkstate() upon setting 
2533                  * wantcnt to that value.  If the PCB is already dead, 
2534                  * keep gc active to anticipate wantcnt changing. 
2536                 if (inp
->inp_wantcnt 
!= WNT_STOPUSING
) { 
2541                  * Skip if busy, no hurry for cleanup.  Keep gc active 
2542                  * and try the lock again during next round. 
2544                 if (!socket_try_lock(inp
->inp_socket
)) { 
2545                         atomic_add_32(&ipi
->ipi_gc_req
.intimer_fast
, 1); 
2550                  * Keep gc active unless usecount is 0. 
2552                 so 
= inp
->inp_socket
; 
2553                 if (so
->so_usecount 
== 0) { 
2554                         if (inp
->inp_state 
!= INPCB_STATE_DEAD
) { 
2556                                 if (SOCK_CHECK_DOM(so
, PF_INET6
)) { 
2564                         socket_unlock(so
, 0); 
2565                         atomic_add_32(&ipi
->ipi_gc_req
.intimer_fast
, 1); 
2568         lck_rw_done(ipi
->ipi_lock
); 
2572 udp_getstat SYSCTL_HANDLER_ARGS
 
2574 #pragma unused(oidp, arg1, arg2) 
2575         if (req
->oldptr 
== USER_ADDR_NULL
) { 
2576                 req
->oldlen 
= (size_t)sizeof(struct udpstat
); 
2579         return SYSCTL_OUT(req
, &udpstat
, MIN(sizeof(udpstat
), req
->oldlen
)); 
2583 udp_in_cksum_stats(u_int32_t len
) 
2585         udpstat
.udps_rcv_swcsum
++; 
2586         udpstat
.udps_rcv_swcsum_bytes 
+= len
; 
2590 udp_out_cksum_stats(u_int32_t len
) 
2592         udpstat
.udps_snd_swcsum
++; 
2593         udpstat
.udps_snd_swcsum_bytes 
+= len
; 
2598 udp_in6_cksum_stats(u_int32_t len
) 
2600         udpstat
.udps_rcv6_swcsum
++; 
2601         udpstat
.udps_rcv6_swcsum_bytes 
+= len
; 
2605 udp_out6_cksum_stats(u_int32_t len
) 
2607         udpstat
.udps_snd6_swcsum
++; 
2608         udpstat
.udps_snd6_swcsum_bytes 
+= len
; 
2613  * Checksum extended UDP header and data. 
2616 udp_input_checksum(struct mbuf 
*m
, struct udphdr 
*uh
, int off
, int ulen
) 
2618         struct ifnet 
*ifp 
= m
->m_pkthdr
.rcvif
; 
2619         struct ip 
*ip 
= mtod(m
, struct ip 
*); 
2620         struct ipovly 
*ipov 
= (struct ipovly 
*)ip
; 
2622         if (uh
->uh_sum 
== 0) { 
2623                 udpstat
.udps_nosum
++; 
2627         /* ip_stripoptions() must have been called before we get here */ 
2628         ASSERT((ip
->ip_hl 
<< 2) == sizeof(*ip
)); 
2630         if ((hwcksum_rx 
|| (ifp
->if_flags 
& IFF_LOOPBACK
) || 
2631             (m
->m_pkthdr
.pkt_flags 
& PKTF_LOOP
)) && 
2632             (m
->m_pkthdr
.csum_flags 
& CSUM_DATA_VALID
)) { 
2633                 if (m
->m_pkthdr
.csum_flags 
& CSUM_PSEUDO_HDR
) { 
2634                         uh
->uh_sum 
= m
->m_pkthdr
.csum_rx_val
; 
2636                         uint32_t sum 
= m
->m_pkthdr
.csum_rx_val
; 
2637                         uint32_t start 
= m
->m_pkthdr
.csum_rx_start
; 
2638                         int32_t trailer 
= (m_pktlen(m
) - (off 
+ ulen
)); 
2641                          * Perform 1's complement adjustment of octets 
2642                          * that got included/excluded in the hardware- 
2643                          * calculated checksum value.  Ignore cases 
2644                          * where the value already includes the entire 
2645                          * IP header span, as the sum for those octets 
2646                          * would already be 0 by the time we get here; 
2647                          * IP has already performed its header checksum 
2648                          * checks.  If we do need to adjust, restore 
2649                          * the original fields in the IP header when 
2650                          * computing the adjustment value.  Also take 
2651                          * care of any trailing bytes and subtract out 
2652                          * their partial sum. 
2654                         ASSERT(trailer 
>= 0); 
2655                         if ((m
->m_pkthdr
.csum_flags 
& CSUM_PARTIAL
) && 
2656                             ((start 
!= 0 && start 
!= off
) || trailer 
!= 0)) { 
2657                                 uint32_t swbytes 
= (uint32_t)trailer
; 
2660                                         ip
->ip_len 
+= sizeof(*ip
); 
2661 #if BYTE_ORDER != BIG_ENDIAN 
2664 #endif /* BYTE_ORDER != BIG_ENDIAN */ 
2666                                 /* callee folds in sum */ 
2667                                 sum 
= m_adj_sum16(m
, start
, off
, ulen
, sum
); 
2669                                         swbytes 
+= (off 
- start
); 
2671                                         swbytes 
+= (start 
- off
); 
2675 #if BYTE_ORDER != BIG_ENDIAN 
2678 #endif /* BYTE_ORDER != BIG_ENDIAN */ 
2679                                         ip
->ip_len 
-= sizeof(*ip
); 
2683                                         udp_in_cksum_stats(swbytes
); 
2690                         /* callee folds in sum */ 
2691                         uh
->uh_sum 
= in_pseudo(ip
->ip_src
.s_addr
, 
2692                             ip
->ip_dst
.s_addr
, sum 
+ htonl(ulen 
+ IPPROTO_UDP
)); 
2694                 uh
->uh_sum 
^= 0xffff; 
2699                 bcopy(ipov
->ih_x1
, b
, sizeof(ipov
->ih_x1
)); 
2700                 bzero(ipov
->ih_x1
, sizeof(ipov
->ih_x1
)); 
2701                 ip_sum 
= ipov
->ih_len
; 
2702                 ipov
->ih_len 
= uh
->uh_ulen
; 
2703                 uh
->uh_sum 
= in_cksum(m
, ulen 
+ sizeof(struct ip
)); 
2704                 bcopy(b
, ipov
->ih_x1
, sizeof(ipov
->ih_x1
)); 
2705                 ipov
->ih_len 
= ip_sum
; 
2707                 udp_in_cksum_stats(ulen
); 
2710         if (uh
->uh_sum 
!= 0) { 
2711                 udpstat
.udps_badsum
++; 
2712                 IF_UDP_STATINC(ifp
, badchksum
); 
2720 udp_fill_keepalive_offload_frames(ifnet_t ifp
, 
2721     struct ifnet_keepalive_offload_frame 
*frames_array
, 
2722     u_int32_t frames_array_count
, size_t frame_data_offset
, 
2723     u_int32_t 
*used_frames_count
) 
2727         u_int32_t frame_index 
= *used_frames_count
; 
2729         if (ifp 
== NULL 
|| frames_array 
== NULL 
|| 
2730             frames_array_count 
== 0 || 
2731             frame_index 
>= frames_array_count 
|| 
2732             frame_data_offset 
>= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE
) { 
2736         lck_rw_lock_shared(udbinfo
.ipi_lock
); 
2737         gencnt 
= udbinfo
.ipi_gencnt
; 
2738         LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
) { 
2741                 struct ifnet_keepalive_offload_frame 
*frame
; 
2742                 struct mbuf 
*m 
= NULL
; 
2744                 if (frame_index 
>= frames_array_count
) { 
2748                 if (inp
->inp_gencnt 
> gencnt 
|| 
2749                     inp
->inp_state 
== INPCB_STATE_DEAD
) { 
2753                 if ((so 
= inp
->inp_socket
) == NULL 
|| 
2754                     (so
->so_state 
& SS_DEFUNCT
)) { 
2758                  * check for keepalive offload flag without socket 
2759                  * lock to avoid a deadlock 
2761                 if (!(inp
->inp_flags2 
& INP2_KEEPALIVE_OFFLOAD
)) { 
2766                 if (!(inp
->inp_vflag 
& (INP_IPV4 
| INP_IPV6
))) { 
2767                         udp_unlock(so
, 1, 0); 
2770                 if ((inp
->inp_vflag 
& INP_IPV4
) && 
2771                     (inp
->inp_laddr
.s_addr 
== INADDR_ANY 
|| 
2772                     inp
->inp_faddr
.s_addr 
== INADDR_ANY
)) { 
2773                         udp_unlock(so
, 1, 0); 
2776                 if ((inp
->inp_vflag 
& INP_IPV6
) && 
2777                     (IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
) || 
2778                     IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_faddr
))) { 
2779                         udp_unlock(so
, 1, 0); 
2782                 if (inp
->inp_lport 
== 0 || inp
->inp_fport 
== 0) { 
2783                         udp_unlock(so
, 1, 0); 
2786                 if (inp
->inp_last_outifp 
== NULL 
|| 
2787                     inp
->inp_last_outifp
->if_index 
!= ifp
->if_index
) { 
2788                         udp_unlock(so
, 1, 0); 
2791                 if ((inp
->inp_vflag 
& INP_IPV4
)) { 
2792                         if ((frame_data_offset 
+ sizeof(struct udpiphdr
) + 
2793                             inp
->inp_keepalive_datalen
) > 
2794                             IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE
) { 
2795                                 udp_unlock(so
, 1, 0); 
2798                         if ((sizeof(struct udpiphdr
) + 
2799                             inp
->inp_keepalive_datalen
) > _MHLEN
) { 
2800                                 udp_unlock(so
, 1, 0); 
2804                         if ((frame_data_offset 
+ sizeof(struct ip6_hdr
) + 
2805                             sizeof(struct udphdr
) + 
2806                             inp
->inp_keepalive_datalen
) > 
2807                             IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE
) { 
2808                                 udp_unlock(so
, 1, 0); 
2811                         if ((sizeof(struct ip6_hdr
) + sizeof(struct udphdr
) + 
2812                             inp
->inp_keepalive_datalen
) > _MHLEN
) { 
2813                                 udp_unlock(so
, 1, 0); 
2817                 MGETHDR(m
, M_WAIT
, MT_HEADER
); 
2819                         udp_unlock(so
, 1, 0); 
2823                  * This inp has all the information that is needed to 
2824                  * generate an offload frame. 
2826                 if (inp
->inp_vflag 
& INP_IPV4
) { 
2830                         frame 
= &frames_array
[frame_index
]; 
2831                         frame
->length 
= frame_data_offset 
+ 
2832                             sizeof(struct udpiphdr
) + 
2833                             inp
->inp_keepalive_datalen
; 
2835                             IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4
; 
2836                         frame
->interval 
= inp
->inp_keepalive_interval
; 
2837                         switch (inp
->inp_keepalive_type
) { 
2838                         case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY
: 
2840                                     IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY
; 
2845                         data 
= mtod(m
, u_int8_t 
*); 
2846                         bzero(data
, sizeof(struct udpiphdr
)); 
2847                         ip 
= (__typeof__(ip
))(void *)data
; 
2848                         udp 
= (__typeof__(udp
))(void *) (data 
+ 
2850                         m
->m_len 
= sizeof(struct udpiphdr
); 
2851                         data 
= data 
+ sizeof(struct udpiphdr
); 
2852                         if (inp
->inp_keepalive_datalen 
> 0 && 
2853                             inp
->inp_keepalive_data 
!= NULL
) { 
2854                                 bcopy(inp
->inp_keepalive_data
, data
, 
2855                                     inp
->inp_keepalive_datalen
); 
2856                                 m
->m_len 
+= inp
->inp_keepalive_datalen
; 
2858                         m
->m_pkthdr
.len 
= m
->m_len
; 
2860                         ip
->ip_v 
= IPVERSION
; 
2861                         ip
->ip_hl 
= (sizeof(struct ip
) >> 2); 
2862                         ip
->ip_p 
= IPPROTO_UDP
; 
2863                         ip
->ip_len 
= htons(sizeof(struct udpiphdr
) + 
2864                             (u_short
)inp
->inp_keepalive_datalen
); 
2865                         ip
->ip_ttl 
= inp
->inp_ip_ttl
; 
2866                         ip
->ip_tos 
|= (inp
->inp_ip_tos 
& ~IPTOS_ECN_MASK
); 
2867                         ip
->ip_src 
= inp
->inp_laddr
; 
2868                         ip
->ip_dst 
= inp
->inp_faddr
; 
2869                         ip
->ip_sum 
= in_cksum_hdr_opt(ip
); 
2871                         udp
->uh_sport 
= inp
->inp_lport
; 
2872                         udp
->uh_dport 
= inp
->inp_fport
; 
2873                         udp
->uh_ulen 
= htons(sizeof(struct udphdr
) + 
2874                             (u_short
)inp
->inp_keepalive_datalen
); 
2876                         if (!(inp
->inp_flags 
& INP_UDP_NOCKSUM
)) { 
2877                                 udp
->uh_sum 
= in_pseudo(ip
->ip_src
.s_addr
, 
2879                                     htons(sizeof(struct udphdr
) + 
2880                                     (u_short
)inp
->inp_keepalive_datalen 
+ 
2882                                 m
->m_pkthdr
.csum_flags 
= 
2883                                     (CSUM_UDP 
| CSUM_ZERO_INVERT
); 
2884                                 m
->m_pkthdr
.csum_data 
= offsetof(struct udphdr
, 
2887                         m
->m_pkthdr
.pkt_proto 
= IPPROTO_UDP
; 
2888                         in_delayed_cksum(m
); 
2889                         bcopy(m
->m_data
, frame
->data 
+ frame_data_offset
, 
2892                         struct ip6_hdr 
*ip6
; 
2893                         struct udphdr 
*udp6
; 
2895                         VERIFY(inp
->inp_vflag 
& INP_IPV6
); 
2896                         frame 
= &frames_array
[frame_index
]; 
2897                         frame
->length 
= frame_data_offset 
+ 
2898                             sizeof(struct ip6_hdr
) + 
2899                             sizeof(struct udphdr
) + 
2900                             inp
->inp_keepalive_datalen
; 
2902                             IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6
; 
2903                         frame
->interval 
= inp
->inp_keepalive_interval
; 
2904                         switch (inp
->inp_keepalive_type
) { 
2905                         case UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY
: 
2907                                     IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY
; 
2912                         data 
= mtod(m
, u_int8_t 
*); 
2913                         bzero(data
, sizeof(struct ip6_hdr
) + sizeof(struct udphdr
)); 
2914                         ip6 
= (__typeof__(ip6
))(void *)data
; 
2915                         udp6 
= (__typeof__(udp6
))(void *)(data 
+ 
2916                             sizeof(struct ip6_hdr
)); 
2917                         m
->m_len 
= sizeof(struct ip6_hdr
) + 
2918                             sizeof(struct udphdr
); 
2919                         data 
= data 
+ (sizeof(struct ip6_hdr
) + 
2920                             sizeof(struct udphdr
)); 
2921                         if (inp
->inp_keepalive_datalen 
> 0 && 
2922                             inp
->inp_keepalive_data 
!= NULL
) { 
2923                                 bcopy(inp
->inp_keepalive_data
, data
, 
2924                                     inp
->inp_keepalive_datalen
); 
2925                                 m
->m_len 
+= inp
->inp_keepalive_datalen
; 
2927                         m
->m_pkthdr
.len 
= m
->m_len
; 
2928                         ip6
->ip6_flow 
= inp
->inp_flow 
& IPV6_FLOWINFO_MASK
; 
2929                         ip6
->ip6_flow 
= ip6
->ip6_flow 
& ~IPV6_FLOW_ECN_MASK
; 
2930                         ip6
->ip6_vfc 
&= ~IPV6_VERSION_MASK
; 
2931                         ip6
->ip6_vfc 
|= IPV6_VERSION
; 
2932                         ip6
->ip6_nxt 
= IPPROTO_UDP
; 
2933                         ip6
->ip6_hlim 
= ip6_defhlim
; 
2934                         ip6
->ip6_plen 
= htons(sizeof(struct udphdr
) + 
2935                             (u_short
)inp
->inp_keepalive_datalen
); 
2936                         ip6
->ip6_src 
= inp
->in6p_laddr
; 
2937                         if (IN6_IS_SCOPE_EMBED(&ip6
->ip6_src
)) { 
2938                                 ip6
->ip6_src
.s6_addr16
[1] = 0; 
2941                         ip6
->ip6_dst 
= inp
->in6p_faddr
; 
2942                         if (IN6_IS_SCOPE_EMBED(&ip6
->ip6_dst
)) { 
2943                                 ip6
->ip6_dst
.s6_addr16
[1] = 0; 
2946                         udp6
->uh_sport 
= inp
->in6p_lport
; 
2947                         udp6
->uh_dport 
= inp
->in6p_fport
; 
2948                         udp6
->uh_ulen 
= htons(sizeof(struct udphdr
) + 
2949                             (u_short
)inp
->inp_keepalive_datalen
); 
2950                         if (!(inp
->inp_flags 
& INP_UDP_NOCKSUM
)) { 
2951                                 udp6
->uh_sum 
= in6_pseudo(&ip6
->ip6_src
, 
2953                                     htonl(sizeof(struct udphdr
) + 
2954                                     (u_short
)inp
->inp_keepalive_datalen 
+ 
2956                                 m
->m_pkthdr
.csum_flags 
= 
2957                                     (CSUM_UDPIPV6 
| CSUM_ZERO_INVERT
); 
2958                                 m
->m_pkthdr
.csum_data 
= offsetof(struct udphdr
, 
2961                         m
->m_pkthdr
.pkt_proto 
= IPPROTO_UDP
; 
2962                         in6_delayed_cksum(m
); 
2963                         bcopy(m
->m_data
, frame
->data 
+ frame_data_offset
, 
2971                 udp_unlock(so
, 1, 0); 
2973         lck_rw_done(udbinfo
.ipi_lock
); 
2974         *used_frames_count 
= frame_index
;