+ for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
+ inp = LIST_NEXT(inp, inp_list)) {
+ if (inp->inp_gencnt <= gencnt &&
+ inp->inp_state != INPCB_STATE_DEAD)
+ inp_list[i++] = inp;
+ }
+ n = i;
+
+ error = 0;
+ for (i = 0; i < n; i++) {
+ struct xinpcb64 xi;
+
+ inp = inp_list[i];
+
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+ continue;
+ udp_lock(inp->inp_socket, 1, 0);
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ udp_unlock(inp->inp_socket, 1, 0);
+ continue;
+ }
+ if (inp->inp_gencnt > gencnt) {
+ udp_unlock(inp->inp_socket, 1, 0);
+ continue;
+ }
+
+ bzero(&xi, sizeof (xi));
+ xi.xi_len = sizeof (xi);
+ inpcb_to_xinpcb64(inp, &xi);
+ if (inp->inp_socket)
+ sotoxsocket64(inp->inp_socket, &xi.xi_socket);
+
+ udp_unlock(inp->inp_socket, 1, 0);
+
+ error = SYSCTL_OUT(req, &xi, sizeof (xi));
+ }
+ if (!error) {
+ /*
+ * Give the user an updated idea of our state.
+ * If the generation differs from what we told
+ * her before, she knows that something happened
+ * while we were processing this request, and it
+ * might be necessary to retry.
+ */
+ bzero(&xig, sizeof (xig));
+ xig.xig_len = sizeof (xig);
+ xig.xig_gen = udbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = udbinfo.ipi_count;
+ error = SYSCTL_OUT(req, &xig, sizeof (xig));
+ }
+ FREE(inp_list, M_TEMP);
+ lck_rw_done(udbinfo.ipi_lock);
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist64,
+ "S,xinpcb64", "List of active UDP sockets");
+
+
+static int
+udp_pcblist_n SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ return (get_pcblist_n(IPPROTO_UDP, req, &udbinfo));
+}
+
+SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist_n,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist_n,
+ "S,xinpcb_n", "List of active UDP sockets");
+
+__private_extern__ void
+udp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags,
+ bitstr_t *bitfield)
+{
+ inpcb_get_ports_used(ifindex, protocol, flags, bitfield, &udbinfo);
+}
+
+__private_extern__ uint32_t
+udp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
+{
+ return (inpcb_count_opportunistic(ifindex, &udbinfo, flags));
+}
+
+__private_extern__ uint32_t
+udp_find_anypcb_byaddr(struct ifaddr *ifa)
+{
+ return (inpcb_find_anypcb_byaddr(ifa, &udbinfo));
+}
+
+static int
+udp_check_pktinfo(struct mbuf *control, struct ifnet **outif,
+ struct in_addr *laddr)
+{
+ struct cmsghdr *cm = 0;
+ struct in_pktinfo *pktinfo;
+ struct ifnet *ifp;
+
+ if (outif != NULL)
+ *outif = NULL;
+
+ /*
+ * XXX: Currently, we assume all the optional information is stored
+ * in a single mbuf.
+ */
+ if (control->m_next)
+ return (EINVAL);
+
+ if (control->m_len < CMSG_LEN(0))
+ return (EINVAL);
+
+ for (cm = M_FIRST_CMSGHDR(control); cm;
+ cm = M_NXT_CMSGHDR(control, cm)) {
+ if (cm->cmsg_len < sizeof (struct cmsghdr) ||
+ cm->cmsg_len > control->m_len)
+ return (EINVAL);
+
+ if (cm->cmsg_level != IPPROTO_IP || cm->cmsg_type != IP_PKTINFO)
+ continue;
+
+ if (cm->cmsg_len != CMSG_LEN(sizeof (struct in_pktinfo)))
+ return (EINVAL);
+
+ pktinfo = (struct in_pktinfo *)(void *)CMSG_DATA(cm);
+
+ /* Check for a valid ifindex in pktinfo */
+ ifnet_head_lock_shared();
+
+ if (pktinfo->ipi_ifindex > if_index) {
+ ifnet_head_done();
+ return (ENXIO);
+ }
+
+ /*
+ * If ipi_ifindex is specified it takes precedence
+ * over ipi_spec_dst.
+ */
+ if (pktinfo->ipi_ifindex) {
+ ifp = ifindex2ifnet[pktinfo->ipi_ifindex];
+ if (ifp == NULL) {
+ ifnet_head_done();
+ return (ENXIO);
+ }
+ if (outif != NULL) {
+ ifnet_reference(ifp);
+ *outif = ifp;
+ }
+ ifnet_head_done();
+ laddr->s_addr = INADDR_ANY;
+ break;
+ }
+
+ ifnet_head_done();
+
+ /*
+ * Use the provided ipi_spec_dst address for temp
+ * source address.
+ */
+ *laddr = pktinfo->ipi_spec_dst;
+ break;
+ }
+ return (0);
+}
+
+static int
+udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct proc *p)
+{
+ struct udpiphdr *ui;
+ int len = m->m_pkthdr.len;
+ struct sockaddr_in *sin;
+ struct in_addr origladdr, laddr, faddr, pi_laddr;
+ u_short lport, fport;
+ int error = 0, udp_dodisconnect = 0, pktinfo = 0;
+ struct socket *so = inp->inp_socket;
+ int soopts = 0;
+ struct mbuf *inpopts;
+ struct ip_moptions *mopts;
+ struct route ro;
+ struct ip_out_args ipoa =
+ { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF, 0, 0, 0 };
+ struct ifnet *outif = NULL;
+ struct flowadv *adv = &ipoa.ipoa_flowadv;
+ int sotc = SO_TC_UNSPEC;
+ int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
+ struct ifnet *origoutifp = NULL;
+ int flowadv = 0;
+
+ /* Enable flow advisory only when connected */
+ flowadv = (so->so_state & SS_ISCONNECTED) ? 1 : 0;
+ pi_laddr.s_addr = INADDR_ANY;
+
+ KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+ if (control != NULL) {
+ sotc = so_tc_from_control(control, &netsvctype);
+ VERIFY(outif == NULL);
+ error = udp_check_pktinfo(control, &outif, &pi_laddr);
+ m_freem(control);
+ control = NULL;
+ if (error)
+ goto release;
+ pktinfo++;
+ if (outif != NULL)
+ ipoa.ipoa_boundif = outif->if_index;
+ }
+ if (sotc == SO_TC_UNSPEC) {
+ sotc = so->so_traffic_class;
+ netsvctype = so->so_netsvctype;
+ }
+
+ KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
+ inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
+ (htons((u_short)len + sizeof (struct udphdr))));
+
+ if (len + sizeof (struct udpiphdr) > IP_MAXPACKET) {
+ error = EMSGSIZE;
+ goto release;
+ }
+
+ if (flowadv && INP_WAIT_FOR_IF_FEEDBACK(inp)) {
+ /*
+ * The socket is flow-controlled, drop the packets
+ * until the inp is not flow controlled
+ */
+ error = ENOBUFS;
+ goto release;
+ }
+ /*
+ * If socket was bound to an ifindex, tell ip_output about it.
+ * If the ancillary IP_PKTINFO option contains an interface index,
+ * it takes precedence over the one specified by IP_BOUND_IF.
+ */
+ if (ipoa.ipoa_boundif == IFSCOPE_NONE &&
+ (inp->inp_flags & INP_BOUND_IF)) {
+ VERIFY(inp->inp_boundifp != NULL);
+ ifnet_reference(inp->inp_boundifp); /* for this routine */
+ if (outif != NULL)
+ ifnet_release(outif);
+ outif = inp->inp_boundifp;
+ ipoa.ipoa_boundif = outif->if_index;
+ }
+ if (INP_NO_CELLULAR(inp))
+ ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
+ if (INP_NO_EXPENSIVE(inp))
+ ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
+ if (INP_AWDL_UNRESTRICTED(inp))
+ ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
+ ipoa.ipoa_sotc = sotc;
+ ipoa.ipoa_netsvctype = netsvctype;
+ soopts |= IP_OUTARGS;
+
+ /*
+ * If there was a routing change, discard cached route and check
+ * that we have a valid source address. Reacquire a new source
+ * address if INADDR_ANY was specified.
+ */
+ if (ROUTE_UNUSABLE(&inp->inp_route)) {
+ struct in_ifaddr *ia = NULL;
+
+ ROUTE_RELEASE(&inp->inp_route);
+
+ /* src address is gone? */
+ if (inp->inp_laddr.s_addr != INADDR_ANY &&
+ (ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
+ if (!(inp->inp_flags & INP_INADDR_ANY) ||
+ (so->so_state & SS_ISCONNECTED)) {
+ /*
+ * Rdar://5448998
+ * If the source address is gone, return an
+ * error if:
+ * - the source was specified
+ * - the socket was already connected
+ */
+ soevent(so, (SO_FILT_HINT_LOCKED |
+ SO_FILT_HINT_NOSRCADDR));