+
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ return (error);
+}
+
+/*
+ * Caller passes in the ioctl data pointer directly via "ifr", with the
+ * expectation that this routine always uses bcopy() or other byte-aligned
+ * memory accesses.
+ */
+static __attribute__((noinline)) int
+inctl_ifbrdaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd,
+ struct ifreq *ifr)
+{
+ struct kev_in_data in_event_data;
+ struct kev_msg ev_msg;
+ int error = 0;
+
+ VERIFY(ifp != NULL);
+
+ if (ia == NULL)
+ return (EADDRNOTAVAIL);
+
+ if (!(ifp->if_flags & IFF_BROADCAST))
+ return (EINVAL);
+
+ bzero(&in_event_data, sizeof (struct kev_in_data));
+ bzero(&ev_msg, sizeof (struct kev_msg));
+
+ switch (cmd) {
+ case SIOCGIFBRDADDR: /* struct ifreq */
+ IFA_LOCK(&ia->ia_ifa);
+ bcopy(&ia->ia_broadaddr, &ifr->ifr_broadaddr,
+ sizeof (struct sockaddr_in));
+ IFA_UNLOCK(&ia->ia_ifa);
+ break;
+
+ case SIOCSIFBRDADDR: /* struct ifreq */
+ IFA_LOCK(&ia->ia_ifa);
+ bcopy(&ifr->ifr_broadaddr, &ia->ia_broadaddr,
+ sizeof (struct sockaddr_in));
+
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_NETWORK_CLASS;
+ ev_msg.kev_subclass = KEV_INET_SUBCLASS;
+
+ ev_msg.event_code = KEV_INET_SIFBRDADDR;
+
+ if (ia->ia_ifa.ifa_dstaddr) {
+ in_event_data.ia_dstaddr = ((struct sockaddr_in *)
+ (void *)ia->ia_ifa.ifa_dstaddr)->sin_addr;
+ } else {
+ in_event_data.ia_dstaddr.s_addr = INADDR_ANY;
+ }
+ in_event_data.ia_addr = ia->ia_addr.sin_addr;
+ in_event_data.ia_net = ia->ia_net;
+ in_event_data.ia_netmask = ia->ia_netmask;
+ in_event_data.ia_subnet = ia->ia_subnet;
+ in_event_data.ia_subnetmask = ia->ia_subnetmask;
+ in_event_data.ia_netbroadcast = ia->ia_netbroadcast;
+ IFA_UNLOCK(&ia->ia_ifa);
+ (void) strlcpy(&in_event_data.link_data.if_name[0],
+ ifp->if_name, IFNAMSIZ);
+ in_event_data.link_data.if_family = ifp->if_family;
+ in_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit;
+
+ ev_msg.dv[0].data_ptr = &in_event_data;
+ ev_msg.dv[0].data_length = sizeof (struct kev_in_data);
+ ev_msg.dv[1].data_length = 0;
+
+ kev_post_msg(&ev_msg);
+ break;
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ return (error);
+}
+
+/*
+ * Caller passes in the ioctl data pointer directly via "ifr", with the
+ * expectation that this routine always uses bcopy() or other byte-aligned
+ * memory accesses.
+ */
+static __attribute__((noinline)) int
+inctl_ifnetmask(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd,
+ struct ifreq *ifr)
+{
+ struct kev_in_data in_event_data;
+ struct kev_msg ev_msg;
+ struct sockaddr_in mask;
+ int error = 0;
+
+ VERIFY(ifp != NULL);
+
+ bzero(&in_event_data, sizeof (struct kev_in_data));
+ bzero(&ev_msg, sizeof (struct kev_msg));
+
+ switch (cmd) {
+ case SIOCGIFNETMASK: /* struct ifreq */
+ if (ia == NULL) {
+ error = EADDRNOTAVAIL;
+ break;
+ }
+ IFA_LOCK(&ia->ia_ifa);
+ bcopy(&ia->ia_sockmask, &ifr->ifr_addr, sizeof (mask));
+ IFA_UNLOCK(&ia->ia_ifa);
+ break;
+
+ case SIOCSIFNETMASK: { /* struct ifreq */
+ in_addr_t i;
+
+ bcopy(&ifr->ifr_addr, &mask, sizeof (mask));
+ i = mask.sin_addr.s_addr;
+
+ VERIFY(ia != NULL);
+ IFA_LOCK(&ia->ia_ifa);
+ ia->ia_subnetmask = ntohl(ia->ia_sockmask.sin_addr.s_addr = i);
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_NETWORK_CLASS;
+ ev_msg.kev_subclass = KEV_INET_SUBCLASS;
+
+ ev_msg.event_code = KEV_INET_SIFNETMASK;
+
+ if (ia->ia_ifa.ifa_dstaddr) {
+ in_event_data.ia_dstaddr = ((struct sockaddr_in *)
+ (void *)ia->ia_ifa.ifa_dstaddr)->sin_addr;
+ } else {
+ in_event_data.ia_dstaddr.s_addr = INADDR_ANY;
+ }
+ in_event_data.ia_addr = ia->ia_addr.sin_addr;
+ in_event_data.ia_net = ia->ia_net;
+ in_event_data.ia_netmask = ia->ia_netmask;
+ in_event_data.ia_subnet = ia->ia_subnet;
+ in_event_data.ia_subnetmask = ia->ia_subnetmask;
+ in_event_data.ia_netbroadcast = ia->ia_netbroadcast;
+ IFA_UNLOCK(&ia->ia_ifa);
+ (void) strlcpy(&in_event_data.link_data.if_name[0],
+ ifp->if_name, IFNAMSIZ);
+ in_event_data.link_data.if_family = ifp->if_family;
+ in_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit;
+
+ ev_msg.dv[0].data_ptr = &in_event_data;
+ ev_msg.dv[0].data_length = sizeof (struct kev_in_data);
+ ev_msg.dv[1].data_length = 0;
+
+ kev_post_msg(&ev_msg);
+ break;
+ }
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ return (error);
+}
+
+/*
+ * Generic INET control operations (ioctl's).
+ *
+ * ifp is NULL if not an interface-specific ioctl.
+ *
+ * Most of the routines called to handle the ioctls would end up being
+ * tail-call optimized, which unfortunately causes this routine to
+ * consume too much stack space; this is the reason for the "noinline"
+ * attribute used on those routines.
+ *
+ * If called directly from within the networking stack (as opposed to via
+ * pru_control), the socket parameter may be NULL.
+ */
+int
+in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp,
+ struct proc *p)
+{
+ struct ifreq *ifr = (struct ifreq *)(void *)data;
+ struct sockaddr_in addr, dstaddr;
+ struct sockaddr_in sin, *sa = NULL;
+ boolean_t privileged = (proc_suser(p) == 0);
+ boolean_t so_unlocked = FALSE;
+ struct in_ifaddr *ia = NULL;
+ struct ifaddr *ifa;
+ int error = 0;
+
+ /* In case it's NULL, make sure it came from the kernel */
+ VERIFY(so != NULL || p == kernproc);
+
+ /*
+ * ioctls which don't require ifp, but require socket.
+ */
+ switch (cmd) {
+ case SIOCGASSOCIDS32: /* struct so_aidreq32 */
+ case SIOCGASSOCIDS64: /* struct so_aidreq64 */
+ return (inctl_associd(so, cmd, data));
+ /* NOTREACHED */
+
+ case SIOCGCONNIDS32: /* struct so_cidreq32 */
+ case SIOCGCONNIDS64: /* struct so_cidreq64 */
+ return (inctl_connid(so, cmd, data));
+ /* NOTREACHED */
+
+ case SIOCGCONNINFO32: /* struct so_cinforeq32 */
+ case SIOCGCONNINFO64: /* struct so_cinforeq64 */
+ return (inctl_conninfo(so, cmd, data));
+ /* NOTREACHED */
+ }
+
+ /*
+ * The rest of ioctls require ifp; reject if we don't have one;
+ * return ENXIO to be consistent with ifioctl().
+ */
+ if (ifp == NULL)
+ return (ENXIO);
+
+ /*
+ * ioctls which require ifp but not interface address.
+ */
+ switch (cmd) {
+ case SIOCAUTOADDR: /* struct ifreq */
+ if (!privileged)
+ return (EPERM);
+ return (inctl_autoaddr(ifp, ifr));
+ /* NOTREACHED */
+
+ case SIOCARPIPLL: /* struct ifreq */
+ if (!privileged)
+ return (EPERM);
+ return (inctl_arpipll(ifp, ifr));
+ /* NOTREACHED */
+
+ case SIOCSETROUTERMODE: /* struct ifreq */
+ if (!privileged)
+ return (EPERM);
+ return (inctl_setrouter(ifp, ifr));
+ /* NOTREACHED */
+
+ case SIOCPROTOATTACH: /* struct ifreq */
+ if (!privileged)
+ return (EPERM);
+ return (in_domifattach(ifp));
+ /* NOTREACHED */
+
+ case SIOCPROTODETACH: /* struct ifreq */
+ if (!privileged)
+ return (EPERM);
+
+ /*
+ * If an IPv4 address is still present, refuse to detach.
+ */
+ ifnet_lock_shared(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ IFA_LOCK(ifa);
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ IFA_UNLOCK(ifa);