/*
- * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <net/route.h>
#include <net/flowhash.h>
#include <net/flowadv.h>
+#include <net/nat464_utils.h>
#include <net/ntstat.h>
+#include <net/restricted_in_port.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/in_var.h>
#include <netinet/ip_var.h>
+
#if INET6
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <sys/ubc.h>
#include <sys/vnode.h>
-static lck_grp_t *inpcb_lock_grp;
-static lck_attr_t *inpcb_lock_attr;
-static lck_grp_attr_t *inpcb_lock_grp_attr;
-decl_lck_mtx_data(static, inpcb_lock); /* global INPCB lock */
+#include <os/log.h>
+
+extern const char *proc_name_address(struct proc *);
+
+static lck_grp_t *inpcb_lock_grp;
+static lck_attr_t *inpcb_lock_attr;
+static lck_grp_attr_t *inpcb_lock_grp_attr;
+decl_lck_mtx_data(static, inpcb_lock); /* global INPCB lock */
decl_lck_mtx_data(static, inpcb_timeout_lock);
static TAILQ_HEAD(, inpcbinfo) inpcb_head = TAILQ_HEAD_INITIALIZER(inpcb_head);
-static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */
+static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */
static boolean_t inpcb_garbage_collecting = FALSE; /* gc timer is scheduled */
-static boolean_t inpcb_ticking = FALSE; /* "slow" timer is scheduled */
+static boolean_t inpcb_ticking = FALSE; /* "slow" timer is scheduled */
static boolean_t inpcb_fast_timer_on = FALSE;
-static boolean_t intcoproc_unrestricted = FALSE;
-extern char *proc_best_name(proc_t);
-
-/*
- * If the total number of gc reqs is above a threshold, schedule
- * garbage collect timer sooner
- */
-static boolean_t inpcb_toomany_gcreq = FALSE;
-
-#define INPCB_GCREQ_THRESHOLD 50000
+#define INPCB_GCREQ_THRESHOLD 50000
static thread_call_t inpcb_thread_call, inpcb_fast_thread_call;
static void inpcb_sched_timeout(void);
static void inpcb_sched_lazy_timeout(void);
static void _inpcb_sched_timeout(unsigned int);
static void inpcb_timeout(void *, void *);
-const int inpcb_timeout_lazy = 10; /* 10 seconds leeway for lazy timers */
+const int inpcb_timeout_lazy = 10; /* 10 seconds leeway for lazy timers */
extern int tvtohz(struct timeval *);
#if CONFIG_PROC_UUID_POLICY
#endif /* NECP */
#endif /* !CONFIG_PROC_UUID_POLICY */
-#define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8))
-#define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
+#define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8))
+#define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
/*
* These configure the range of local port addresses assigned to
* "unspecified" outgoing connections/packets/whatever.
*/
-int ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */
-int ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */
-int ipport_firstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
-int ipport_lastauto = IPPORT_HILASTAUTO; /* 65535 */
-int ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
-int ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */
-
-#define RANGECHK(var, min, max) \
+int ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */
+int ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */
+int ipport_firstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
+int ipport_lastauto = IPPORT_HILASTAUTO; /* 65535 */
+int ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
+int ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */
+
+#define RANGECHK(var, min, max) \
if ((var) < (min)) { (var) = (min); } \
else if ((var) > (max)) { (var) = (max); }
{
#pragma unused(arg1, arg2)
int error;
+#if (DEBUG | DEVELOPMENT)
+ int old_value = *(int *)oidp->oid_arg1;
+ /*
+ * For unit testing allow a non-superuser process with the
+ * proper entitlement to modify the variables
+ */
+ if (req->newptr) {
+ if (proc_suser(current_proc()) != 0 &&
+ (error = priv_check_cred(kauth_cred_get(),
+ PRIV_NETINET_RESERVEDPORT, 0))) {
+ return EPERM;
+ }
+ }
+#endif /* (DEBUG | DEVELOPMENT) */
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
if (!error) {
RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, USHRT_MAX);
RANGECHK(ipport_hilastauto, IPPORT_RESERVED, USHRT_MAX);
}
- return (error);
+
+#if (DEBUG | DEVELOPMENT)
+ os_log(OS_LOG_DEFAULT,
+ "%s:%u sysctl net.restricted_port.verbose: %d -> %d)",
+ proc_best_name(current_proc()), proc_selfpid(),
+ old_value, *(int *)oidp->oid_arg1);
+#endif /* (DEBUG | DEVELOPMENT) */
+
+ return error;
}
#undef RANGECHK
SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange,
- CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IP Ports");
+ CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IP Ports");
+
+#if (DEBUG | DEVELOPMENT)
+#define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY)
+#else
+#define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED)
+#endif /* (DEBUG | DEVELOPMENT) */
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
- CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
- &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", "");
+ CTLFAGS_IP_PORTRANGE,
+ &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
- CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
- &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", "");
+ CTLFAGS_IP_PORTRANGE,
+ &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first,
- CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
- &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", "");
+ CTLFAGS_IP_PORTRANGE,
+ &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last,
- CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
- &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", "");
+ CTLFAGS_IP_PORTRANGE,
+ &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
- CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
- &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", "");
+ CTLFAGS_IP_PORTRANGE,
+ &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
- CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
- &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", "");
+ CTLFAGS_IP_PORTRANGE,
+ &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", "");
static uint32_t apn_fallbk_debug = 0;
#define apn_fallbk_log(x) do { if (apn_fallbk_debug >= 1) log x; } while (0)
+#if CONFIG_EMBEDDED
+static boolean_t apn_fallbk_enabled = TRUE;
+
+SYSCTL_DECL(_net_inet);
+SYSCTL_NODE(_net_inet, OID_AUTO, apn_fallback, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "APN Fallback");
+SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &apn_fallbk_enabled, 0, "APN fallback enable");
+SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &apn_fallbk_debug, 0, "APN fallback debug enable");
+#else
static boolean_t apn_fallbk_enabled = FALSE;
+#endif
-extern int udp_use_randomport;
-extern int tcp_use_randomport;
+extern int udp_use_randomport;
+extern int tcp_use_randomport;
/* Structs used for flowhash computation */
struct inp_flowhash_key_addr {
union {
- struct in_addr v4;
+ struct in_addr v4;
struct in6_addr v6;
- u_int8_t addr8[16];
- u_int16_t addr16[8];
- u_int32_t addr32[4];
+ u_int8_t addr8[16];
+ u_int16_t addr16[8];
+ u_int32_t addr32[4];
} infha;
};
struct inp_flowhash_key {
- struct inp_flowhash_key_addr infh_laddr;
- struct inp_flowhash_key_addr infh_faddr;
- u_int32_t infh_lport;
- u_int32_t infh_fport;
- u_int32_t infh_af;
- u_int32_t infh_proto;
- u_int32_t infh_rand1;
- u_int32_t infh_rand2;
+ struct inp_flowhash_key_addr infh_laddr;
+ struct inp_flowhash_key_addr infh_faddr;
+ u_int32_t infh_lport;
+ u_int32_t infh_fport;
+ u_int32_t infh_af;
+ u_int32_t infh_proto;
+ u_int32_t infh_rand1;
+ u_int32_t infh_rand2;
};
static u_int32_t inp_hash_seed = 0;
static int infc_cmp(const struct inpcb *, const struct inpcb *);
/* Flags used by inp_fc_getinp */
-#define INPFC_SOLOCKED 0x1
-#define INPFC_REMOVE 0x2
+#define INPFC_SOLOCKED 0x1
+#define INPFC_REMOVE 0x2
static struct inpcb *inp_fc_getinp(u_int32_t, u_int32_t);
static void inp_fc_feedback(struct inpcb *);
inpcb_thread_call = thread_call_allocate_with_priority(inpcb_timeout,
NULL, THREAD_CALL_PRIORITY_KERNEL);
inpcb_fast_thread_call = thread_call_allocate_with_priority(
- inpcb_timeout, NULL, THREAD_CALL_PRIORITY_KERNEL);
- if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL)
+ inpcb_timeout, NULL, THREAD_CALL_PRIORITY_KERNEL);
+ if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL) {
panic("unable to alloc the inpcb thread call");
+ }
/*
* Initialize data structures required to deliver
RB_INIT(&inp_fc_tree);
bzero(&key_inp, sizeof(key_inp));
lck_mtx_unlock(&inp_fc_lck);
-
- PE_parse_boot_argn("intcoproc_unrestricted", &intcoproc_unrestricted,
- sizeof (intcoproc_unrestricted));
}
-#define INPCB_HAVE_TIMER_REQ(req) (((req).intimer_lazy > 0) || \
+#define INPCB_HAVE_TIMER_REQ(req) (((req).intimer_lazy > 0) || \
((req).intimer_fast > 0) || ((req).intimer_nodelay > 0))
static void
inpcb_timeout(void *arg0, void *arg1)
{
-#pragma unused(arg0)
+#pragma unused(arg0, arg1)
struct inpcbinfo *ipi;
boolean_t t, gc;
struct intimercount gccnt, tmcnt;
- boolean_t toomany_gc = FALSE;
-
- if (arg1 != NULL) {
- VERIFY(arg1 == &inpcb_toomany_gcreq);
- toomany_gc = *(boolean_t *)arg1;
- }
/*
* Update coarse-grained networking timestamp (in sec.); the idea
TAILQ_FOREACH(ipi, &inpcb_head, ipi_entry) {
if (INPCB_HAVE_TIMER_REQ(ipi->ipi_gc_req)) {
bzero(&ipi->ipi_gc_req,
- sizeof(ipi->ipi_gc_req));
+ sizeof(ipi->ipi_gc_req));
if (gc && ipi->ipi_gc != NULL) {
ipi->ipi_gc(ipi);
gccnt.intimer_lazy +=
}
if (INPCB_HAVE_TIMER_REQ(ipi->ipi_timer_req)) {
bzero(&ipi->ipi_timer_req,
- sizeof(ipi->ipi_timer_req));
+ sizeof(ipi->ipi_timer_req));
if (t && ipi->ipi_timer != NULL) {
ipi->ipi_timer(ipi);
tmcnt.intimer_lazy +=
ipi->ipi_timer_req.intimer_lazy;
- tmcnt.intimer_lazy +=
+ tmcnt.intimer_fast +=
ipi->ipi_timer_req.intimer_fast;
tmcnt.intimer_nodelay +=
ipi->ipi_timer_req.intimer_nodelay;
}
/* lock was dropped above, so check first before overriding */
- if (!inpcb_garbage_collecting)
+ if (!inpcb_garbage_collecting) {
inpcb_garbage_collecting = INPCB_HAVE_TIMER_REQ(gccnt);
- if (!inpcb_ticking)
+ }
+ if (!inpcb_ticking) {
inpcb_ticking = INPCB_HAVE_TIMER_REQ(tmcnt);
+ }
/* re-arm the timer if there's work to do */
- if (toomany_gc) {
- inpcb_toomany_gcreq = FALSE;
- } else {
- inpcb_timeout_run--;
- VERIFY(inpcb_timeout_run >= 0 && inpcb_timeout_run < 2);
- }
+ inpcb_timeout_run--;
+ VERIFY(inpcb_timeout_run >= 0 && inpcb_timeout_run < 2);
- if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0)
+ if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0) {
inpcb_sched_timeout();
- else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5)
+ } else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5) {
/* be lazy when idle with little activity */
inpcb_sched_lazy_timeout();
- else
+ } else {
inpcb_sched_timeout();
+ }
lck_mtx_unlock(&inpcb_timeout_lock);
}
uint64_t deadline, leeway;
clock_interval_to_deadline(1, NSEC_PER_SEC, &deadline);
- lck_mtx_assert(&inpcb_timeout_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&inpcb_timeout_lock, LCK_MTX_ASSERT_OWNED);
if (inpcb_timeout_run == 0 &&
(inpcb_garbage_collecting || inpcb_ticking)) {
lck_mtx_convert_spin(&inpcb_timeout_lock);
clock_interval_to_absolutetime_interval(offset,
NSEC_PER_SEC, &leeway);
thread_call_enter_delayed_with_leeway(
- inpcb_thread_call, NULL, deadline, leeway,
- THREAD_CALL_DELAY_LEEWAY);
+ inpcb_thread_call, NULL, deadline, leeway,
+ THREAD_CALL_DELAY_LEEWAY);
}
} else if (inpcb_timeout_run == 1 &&
offset == 0 && !inpcb_fast_timer_on) {
inpcb_gc_sched(struct inpcbinfo *ipi, u_int32_t type)
{
u_int32_t gccnt;
- uint64_t deadline;
lck_mtx_lock_spin(&inpcb_timeout_lock);
inpcb_garbage_collecting = TRUE;
gccnt = ipi->ipi_gc_req.intimer_nodelay +
- ipi->ipi_gc_req.intimer_fast;
+ ipi->ipi_gc_req.intimer_fast;
- if (gccnt > INPCB_GCREQ_THRESHOLD && !inpcb_toomany_gcreq) {
- inpcb_toomany_gcreq = TRUE;
-
- /*
- * There are toomany pcbs waiting to be garbage collected,
- * schedule a much faster timeout in addition to
- * the caller's request
- */
- lck_mtx_convert_spin(&inpcb_timeout_lock);
- clock_interval_to_deadline(100, NSEC_PER_MSEC, &deadline);
- thread_call_enter1_delayed(inpcb_thread_call,
- &inpcb_toomany_gcreq, deadline);
+ if (gccnt > INPCB_GCREQ_THRESHOLD) {
+ type = INPCB_TIMER_FAST;
}
switch (type) {
void
inpcb_timer_sched(struct inpcbinfo *ipi, u_int32_t type)
{
-
lck_mtx_lock_spin(&inpcb_timeout_lock);
inpcb_ticking = TRUE;
switch (type) {
lck_mtx_lock(&inpcb_lock);
TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
- if (ipi0 == ipi)
+ if (ipi0 == ipi) {
break;
+ }
}
- if (ipi0 != NULL)
+ if (ipi0 != NULL) {
TAILQ_REMOVE(&inpcb_head, ipi0, ipi_entry);
- else
+ } else {
error = ENXIO;
+ }
lck_mtx_unlock(&inpcb_lock);
- return (error);
+ return error;
}
/*
{
#pragma unused(p)
struct inpcb *inp;
- caddr_t temp;
+ caddr_t temp;
#if CONFIG_MACF_NET
int mac_error;
#endif /* CONFIG_MACF_NET */
if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
inp = (struct inpcb *)zalloc(pcbinfo->ipi_zone);
- if (inp == NULL)
- return (ENOBUFS);
- bzero((caddr_t)inp, sizeof (*inp));
+ if (inp == NULL) {
+ return ENOBUFS;
+ }
+ bzero((caddr_t)inp, sizeof(*inp));
} else {
inp = (struct inpcb *)(void *)so->so_saved_pcb;
temp = inp->inp_saved_ppcb;
- bzero((caddr_t)inp, sizeof (*inp));
+ bzero((caddr_t)inp, sizeof(*inp));
inp->inp_saved_ppcb = temp;
}
#if CONFIG_MACF_NET
mac_error = mac_inpcb_label_init(inp, M_WAITOK);
if (mac_error != 0) {
- if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0)
+ if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
zfree(pcbinfo->ipi_zone, inp);
- return (mac_error);
+ }
+ return mac_error;
}
mac_inpcb_label_associate(so, inp);
#endif /* CONFIG_MACF_NET */
/* make sure inp_stat is always 64-bit aligned */
inp->inp_stat = (struct inp_stat *)P2ROUNDUP(inp->inp_stat_store,
- sizeof (u_int64_t));
+ sizeof(u_int64_t));
if (((uintptr_t)inp->inp_stat - (uintptr_t)inp->inp_stat_store) +
- sizeof (*inp->inp_stat) > sizeof (inp->inp_stat_store)) {
+ sizeof(*inp->inp_stat) > sizeof(inp->inp_stat_store)) {
panic("%s: insufficient space to align inp_stat", __func__);
/* NOTREACHED */
}
/* make sure inp_cstat is always 64-bit aligned */
inp->inp_cstat = (struct inp_stat *)P2ROUNDUP(inp->inp_cstat_store,
- sizeof (u_int64_t));
+ sizeof(u_int64_t));
if (((uintptr_t)inp->inp_cstat - (uintptr_t)inp->inp_cstat_store) +
- sizeof (*inp->inp_cstat) > sizeof (inp->inp_cstat_store)) {
+ sizeof(*inp->inp_cstat) > sizeof(inp->inp_cstat_store)) {
panic("%s: insufficient space to align inp_cstat", __func__);
/* NOTREACHED */
}
/* make sure inp_wstat is always 64-bit aligned */
inp->inp_wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_wstat_store,
- sizeof (u_int64_t));
+ sizeof(u_int64_t));
if (((uintptr_t)inp->inp_wstat - (uintptr_t)inp->inp_wstat_store) +
- sizeof (*inp->inp_wstat) > sizeof (inp->inp_wstat_store)) {
+ sizeof(*inp->inp_wstat) > sizeof(inp->inp_wstat_store)) {
panic("%s: insufficient space to align inp_wstat", __func__);
/* NOTREACHED */
}
/* make sure inp_Wstat is always 64-bit aligned */
inp->inp_Wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_Wstat_store,
- sizeof (u_int64_t));
+ sizeof(u_int64_t));
if (((uintptr_t)inp->inp_Wstat - (uintptr_t)inp->inp_Wstat_store) +
- sizeof (*inp->inp_Wstat) > sizeof (inp->inp_Wstat_store)) {
+ sizeof(*inp->inp_Wstat) > sizeof(inp->inp_Wstat_store)) {
panic("%s: insufficient space to align inp_Wstat", __func__);
/* NOTREACHED */
}
}
#if INET6
- if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on)
+ if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on) {
inp->inp_flags |= IN6P_IPV6_V6ONLY;
+ }
- if (ip6_auto_flowlabel)
+ if (ip6_auto_flowlabel) {
inp->inp_flags |= IN6P_AUTOFLOWLABEL;
+ }
#endif /* INET6 */
- if (intcoproc_unrestricted)
+ if (intcoproc_unrestricted) {
inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
+ }
(void) inp_update_policy(inp);
LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
pcbinfo->ipi_count++;
lck_rw_done(pcbinfo->ipi_lock);
- return (0);
+ return 0;
}
/*
* in_pcblookup_local_and_cleanup does everything
* in_pcblookup_local does but it checks for a socket
* that's going away. Since we know that the lock is
- * held read+write when this funciton is called, we
+ * held read+write when this function is called, we
* can safely dispose of this socket like the slow
* timer would usually do and return NULL. This is
* great for bind.
if (inp != NULL && inp->inp_wantcnt == WNT_STOPUSING) {
struct socket *so = inp->inp_socket;
- lck_mtx_lock(&inp->inpcb_mtx);
+ socket_lock(so, 0);
if (so->so_usecount == 0) {
- if (inp->inp_state != INPCB_STATE_DEAD)
+ if (inp->inp_state != INPCB_STATE_DEAD) {
in_pcbdetach(inp);
- in_pcbdispose(inp); /* will unlock & destroy */
+ }
+ in_pcbdispose(inp); /* will unlock & destroy */
inp = NULL;
} else {
- lck_mtx_unlock(&inp->inpcb_mtx);
+ socket_unlock(so, 0);
}
}
- return (inp);
+ return inp;
}
static void
* who has set SOF_NOTIFYCONFLICT owns.
*/
struct kev_msg ev_msg;
- struct kev_in_portinuse in_portinuse;
+ struct kev_in_portinuse in_portinuse;
- bzero(&in_portinuse, sizeof (struct kev_in_portinuse));
- bzero(&ev_msg, sizeof (struct kev_msg));
- in_portinuse.port = ntohs(port); /* port in host order */
+ bzero(&in_portinuse, sizeof(struct kev_in_portinuse));
+ bzero(&ev_msg, sizeof(struct kev_msg));
+ in_portinuse.port = ntohs(port); /* port in host order */
in_portinuse.req_pid = proc_selfpid();
ev_msg.vendor_code = KEV_VENDOR_APPLE;
ev_msg.kev_class = KEV_NETWORK_CLASS;
ev_msg.kev_subclass = KEV_INET_SUBCLASS;
ev_msg.event_code = KEV_INET_PORTINUSE;
ev_msg.dv[0].data_ptr = &in_portinuse;
- ev_msg.dv[0].data_length = sizeof (struct kev_in_portinuse);
+ ev_msg.dv[0].data_length = sizeof(struct kev_in_portinuse);
ev_msg.dv[1].data_length = 0;
dlil_post_complete_msg(NULL, &ev_msg);
}
struct in_addr laddr;
struct ifnet *outif = NULL;
- if (TAILQ_EMPTY(&in_ifaddrhead)) /* XXX broken! */
- return (EADDRNOTAVAIL);
- if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY)
- return (EINVAL);
- if (!(so->so_options & (SO_REUSEADDR|SO_REUSEPORT)))
+ if (TAILQ_EMPTY(&in_ifaddrhead)) { /* XXX broken! */
+ return EADDRNOTAVAIL;
+ }
+ if (!(so->so_options & (SO_REUSEADDR | SO_REUSEPORT))) {
wild = 1;
+ }
bzero(&laddr, sizeof(laddr));
socket_unlock(so, 0); /* keep reference on socket */
lck_rw_lock_exclusive(pcbinfo->ipi_lock);
+ if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
+ /* another thread completed the bind */
+ lck_rw_done(pcbinfo->ipi_lock);
+ socket_lock(so, 0);
+ return EINVAL;
+ }
if (nam != NULL) {
-
- if (nam->sa_len != sizeof (struct sockaddr_in)) {
+ if (nam->sa_len != sizeof(struct sockaddr_in)) {
lck_rw_done(pcbinfo->ipi_lock);
socket_lock(so, 0);
- return (EINVAL);
+ return EINVAL;
}
#if 0
/*
if (nam->sa_family != AF_INET) {
lck_rw_done(pcbinfo->ipi_lock);
socket_lock(so, 0);
- return (EAFNOSUPPORT);
+ return EAFNOSUPPORT;
}
#endif /* 0 */
lport = SIN(nam)->sin_port;
* and a multicast address is bound on both
* new and duplicated sockets.
*/
- if (so->so_options & SO_REUSEADDR)
- reuseport = SO_REUSEADDR|SO_REUSEPORT;
+ if (so->so_options & SO_REUSEADDR) {
+ reuseport = SO_REUSEADDR | SO_REUSEPORT;
+ }
} else if (SIN(nam)->sin_addr.s_addr != INADDR_ANY) {
struct sockaddr_in sin;
struct ifaddr *ifa;
/* Sanitized for interface address searches */
- bzero(&sin, sizeof (sin));
+ bzero(&sin, sizeof(sin));
sin.sin_family = AF_INET;
- sin.sin_len = sizeof (struct sockaddr_in);
+ sin.sin_len = sizeof(struct sockaddr_in);
sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
ifa = ifa_ifwithaddr(SA(&sin));
if (ifa == NULL) {
lck_rw_done(pcbinfo->ipi_lock);
socket_lock(so, 0);
- return (EADDRNOTAVAIL);
+ return EADDRNOTAVAIL;
} else {
/*
* Opportunistically determine the outbound
IFA_REMREF(ifa);
}
}
+
+
if (lport != 0) {
struct inpcb *t;
uid_t u;
- if (ntohs(lport) < IPPORT_RESERVED) {
+#if !CONFIG_EMBEDDED
+ if (ntohs(lport) < IPPORT_RESERVED &&
+ SIN(nam)->sin_addr.s_addr != 0 &&
+ !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
cred = kauth_cred_proc_ref(p);
error = priv_check_cred(cred,
PRIV_NETINET_RESERVEDPORT, 0);
if (error != 0) {
lck_rw_done(pcbinfo->ipi_lock);
socket_lock(so, 0);
- return (EACCES);
+ return EACCES;
}
}
+#endif /* !CONFIG_EMBEDDED */
+ /*
+ * Check wether the process is allowed to bind to a restricted port
+ */
+ if (!current_task_can_use_restricted_in_port(lport,
+ so->so_proto->pr_protocol, PORT_FLAGS_BSD)) {
+ lck_rw_done(pcbinfo->ipi_lock);
+ socket_lock(so, 0);
+ return EADDRINUSE;
+ }
+
if (!IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
(u = kauth_cred_getuid(so->so_cred)) != 0 &&
(t = in_pcblookup_local_and_cleanup(
- inp->inp_pcbinfo, SIN(nam)->sin_addr, lport,
- INPLOOKUP_WILDCARD)) != NULL &&
+ inp->inp_pcbinfo, SIN(nam)->sin_addr, lport,
+ INPLOOKUP_WILDCARD)) != NULL &&
(SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
t->inp_laddr.s_addr != INADDR_ANY ||
!(t->inp_socket->so_options & SO_REUSEPORT)) &&
(u != kauth_cred_getuid(t->inp_socket->so_cred)) &&
!(t->inp_socket->so_flags & SOF_REUSESHAREUID) &&
(SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
- t->inp_laddr.s_addr != INADDR_ANY)) {
+ t->inp_laddr.s_addr != INADDR_ANY) &&
+ (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
+ !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
+ uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
if ((t->inp_socket->so_flags &
SOF_NOTIFYCONFLICT) &&
- !(so->so_flags & SOF_NOTIFYCONFLICT))
+ !(so->so_flags & SOF_NOTIFYCONFLICT)) {
conflict = 1;
+ }
lck_rw_done(pcbinfo->ipi_lock);
- if (conflict)
+ if (conflict) {
in_pcb_conflict_post_msg(lport);
+ }
socket_lock(so, 0);
- return (EADDRINUSE);
+ return EADDRINUSE;
}
t = in_pcblookup_local_and_cleanup(pcbinfo,
SIN(nam)->sin_addr, lport, wild);
if (t != NULL &&
- (reuseport & t->inp_socket->so_options) == 0) {
+ (reuseport & t->inp_socket->so_options) == 0 &&
+ (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
+ !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
+ uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
#if INET6
if (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
t->inp_laddr.s_addr != INADDR_ANY ||
SOCK_DOM(t->inp_socket) != PF_INET6)
#endif /* INET6 */
{
-
if ((t->inp_socket->so_flags &
SOF_NOTIFYCONFLICT) &&
- !(so->so_flags & SOF_NOTIFYCONFLICT))
+ !(so->so_flags & SOF_NOTIFYCONFLICT)) {
conflict = 1;
+ }
lck_rw_done(pcbinfo->ipi_lock);
- if (conflict)
+ if (conflict) {
in_pcb_conflict_post_msg(lport);
+ }
socket_lock(so, 0);
- return (EADDRINUSE);
+ return EADDRINUSE;
}
}
}
if (lport == 0) {
u_short first, last;
int count;
+ bool found;
+
+ /*
+ * Override wild = 1 for implicit bind (mainly used by connect)
+ * For implicit bind (lport == 0), we always use an unused port,
+ * so REUSEADDR|REUSEPORT don't apply
+ */
+ wild = 1;
randomport = (so->so_flags & SOF_BINDRANDOMPORT) ||
(so->so_type == SOCK_STREAM ? tcp_use_randomport :
*/
anonport = TRUE;
if (inp->inp_flags & INP_HIGHPORT) {
- first = ipport_hifirstauto; /* sysctl */
+ first = ipport_hifirstauto; /* sysctl */
last = ipport_hilastauto;
lastport = &pcbinfo->ipi_lasthi;
} else if (inp->inp_flags & INP_LOWPORT) {
if (error != 0) {
lck_rw_done(pcbinfo->ipi_lock);
socket_lock(so, 0);
- return (error);
+ return error;
}
- first = ipport_lowfirstauto; /* 1023 */
- last = ipport_lowlastauto; /* 600 */
+ first = ipport_lowfirstauto; /* 1023 */
+ last = ipport_lowlastauto; /* 600 */
lastport = &pcbinfo->ipi_lastlow;
} else {
- first = ipport_firstauto; /* sysctl */
+ first = ipport_firstauto; /* sysctl */
last = ipport_lastauto;
lastport = &pcbinfo->ipi_lastport;
}
/* No point in randomizing if only one port is available */
- if (first == last)
+ if (first == last) {
randomport = 0;
+ }
/*
* Simple check to ensure all ports are not used up causing
* a deadlock here.
* is not being tested on each round of the loop.
*/
if (first > last) {
+ struct in_addr lookup_addr;
+
/*
* counting down
*/
if (randomport) {
- read_random(&rand_port, sizeof (rand_port));
+ read_frandom(&rand_port, sizeof(rand_port));
*lastport =
first - (rand_port % (first - last));
}
count = first - last;
+ lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
+ inp->inp_laddr;
+
+ found = false;
do {
- if (count-- < 0) { /* completely used? */
+ if (count-- < 0) { /* completely used? */
lck_rw_done(pcbinfo->ipi_lock);
socket_lock(so, 0);
- return (EADDRNOTAVAIL);
+ return EADDRNOTAVAIL;
}
--*lastport;
- if (*lastport > first || *lastport < last)
+ if (*lastport > first || *lastport < last) {
*lastport = first;
+ }
lport = htons(*lastport);
- } while (in_pcblookup_local_and_cleanup(pcbinfo,
- ((laddr.s_addr != INADDR_ANY) ? laddr :
- inp->inp_laddr), lport, wild));
+
+ /*
+ * Skip if this is a restricted port as we do not want to
+ * restricted ports as ephemeral
+ */
+ if (IS_RESTRICTED_IN_PORT(lport)) {
+ continue;
+ }
+
+ found = in_pcblookup_local_and_cleanup(pcbinfo,
+ lookup_addr, lport, wild) == NULL;
+ } while (!found);
} else {
+ struct in_addr lookup_addr;
+
/*
* counting up
*/
if (randomport) {
- read_random(&rand_port, sizeof (rand_port));
+ read_frandom(&rand_port, sizeof(rand_port));
*lastport =
first + (rand_port % (first - last));
}
count = last - first;
+ lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
+ inp->inp_laddr;
+
+ found = false;
do {
- if (count-- < 0) { /* completely used? */
+ if (count-- < 0) { /* completely used? */
lck_rw_done(pcbinfo->ipi_lock);
socket_lock(so, 0);
- return (EADDRNOTAVAIL);
+ return EADDRNOTAVAIL;
}
++*lastport;
- if (*lastport < first || *lastport > last)
+ if (*lastport < first || *lastport > last) {
*lastport = first;
+ }
lport = htons(*lastport);
- } while (in_pcblookup_local_and_cleanup(pcbinfo,
- ((laddr.s_addr != INADDR_ANY) ? laddr :
- inp->inp_laddr), lport, wild));
+
+ /*
+ * Skip if this is a restricted port as we do not want to
+ * restricted ports as ephemeral
+ */
+ if (IS_RESTRICTED_IN_PORT(lport)) {
+ continue;
+ }
+
+ found = in_pcblookup_local_and_cleanup(pcbinfo,
+ lookup_addr, lport, wild) == NULL;
+ } while (!found);
}
}
socket_lock(so, 0);
*/
if (inp->inp_state == INPCB_STATE_DEAD) {
lck_rw_done(pcbinfo->ipi_lock);
- return (ECONNABORTED);
+ return ECONNABORTED;
}
if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
lck_rw_done(pcbinfo->ipi_lock);
- return (EINVAL);
+ return EINVAL;
}
if (laddr.s_addr != INADDR_ANY) {
inp->inp_last_outifp = outif;
}
inp->inp_lport = lport;
- if (anonport)
+ if (anonport) {
inp->inp_flags |= INP_ANONPORT;
+ }
if (in_pcbinshash(inp, 1) != 0) {
inp->inp_laddr.s_addr = INADDR_ANY;
inp->inp_last_outifp = NULL;
inp->inp_lport = 0;
- if (anonport)
+ if (anonport) {
inp->inp_flags &= ~INP_ANONPORT;
+ }
lck_rw_done(pcbinfo->ipi_lock);
- return (EAGAIN);
+ return EAGAIN;
}
lck_rw_done(pcbinfo->ipi_lock);
sflt_notify(so, sock_evt_bound, NULL);
- return (0);
+ return 0;
}
-#define APN_FALLBACK_IP_FILTER(a) \
+#define APN_FALLBACK_IP_FILTER(a) \
(IN_LINKLOCAL(ntohl((a)->sin_addr.s_addr)) || \
IN_LOOPBACK(ntohl((a)->sin_addr.s_addr)) || \
IN_ZERONET(ntohl((a)->sin_addr.s_addr)) || \
IN_MULTICAST(ntohl((a)->sin_addr.s_addr)) || \
IN_PRIVATE(ntohl((a)->sin_addr.s_addr)))
-#define APN_FALLBACK_NOTIF_INTERVAL 2 /* Magic Number */
+#define APN_FALLBACK_NOTIF_INTERVAL 2 /* Magic Number */
static uint64_t last_apn_fallback = 0;
static boolean_t
-apn_fallback_required (proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4)
+apn_fallback_required(proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4)
{
uint64_t timenow;
struct sockaddr_storage lookup_default_addr;
VERIFY(proc != NULL);
- if (apn_fallbk_enabled == FALSE)
+ if (apn_fallbk_enabled == FALSE) {
return FALSE;
+ }
- if (proc == kernproc)
+ if (proc == kernproc) {
return FALSE;
+ }
- if (so && (so->so_options & SO_NOAPNFALLBK))
+ if (so && (so->so_options & SO_NOAPNFALLBK)) {
return FALSE;
+ }
timenow = net_uptime();
if ((timenow - last_apn_fallback) < APN_FALLBACK_NOTIF_INTERVAL) {
return FALSE;
}
- if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4))
+ if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4)) {
return FALSE;
+ }
/* Check if we have unscoped IPv6 default route through cellular */
bzero(&lookup_default_addr, sizeof(lookup_default_addr));
bzero(&sb, sizeof(struct stat64));
context = vfs_context_create(NULL);
- vn_stat_error = vn_stat(proc->p_textvp, &sb, NULL, 1, context);
+ vn_stat_error = vn_stat(proc->p_textvp, &sb, NULL, 1, 0, context);
(void)vfs_context_rele(context);
if (vn_stat_error != 0 ||
}
static void
-apn_fallback_trigger(proc_t proc)
+apn_fallback_trigger(proc_t proc, struct socket *so)
{
pid_t pid = 0;
struct kev_msg ev_msg;
proc_getexecutableuuid(proc, application_uuid,
sizeof(application_uuid));
- bzero(&ev_msg, sizeof (struct kev_msg));
+ bzero(&ev_msg, sizeof(struct kev_msg));
ev_msg.vendor_code = KEV_VENDOR_APPLE;
ev_msg.kev_class = KEV_NETWORK_CLASS;
ev_msg.kev_subclass = KEV_NETEVENT_SUBCLASS;
ev_msg.event_code = KEV_NETEVENT_APNFALLBACK;
bzero(&apnfallbk_data, sizeof(apnfallbk_data));
- apnfallbk_data.epid = pid;
- uuid_copy(apnfallbk_data.euuid, application_uuid);
+
+ if (so->so_flags & SOF_DELEGATED) {
+ apnfallbk_data.epid = so->e_pid;
+ uuid_copy(apnfallbk_data.euuid, so->e_uuid);
+ } else {
+ apnfallbk_data.epid = so->last_pid;
+ uuid_copy(apnfallbk_data.euuid, so->last_uuid);
+ }
ev_msg.dv[0].data_ptr = &apnfallbk_data;
ev_msg.dv[0].data_length = sizeof(apnfallbk_data);
int error = 0;
boolean_t restricted = FALSE;
- if (outif != NULL)
+ if (outif != NULL) {
*outif = NULL;
- if (nam->sa_len != sizeof (struct sockaddr_in))
- return (EINVAL);
- if (SIN(nam)->sin_family != AF_INET)
- return (EAFNOSUPPORT);
- if (raw == 0 && SIN(nam)->sin_port == 0)
- return (EADDRNOTAVAIL);
+ }
+ if (nam->sa_len != sizeof(struct sockaddr_in)) {
+ return EINVAL;
+ }
+ if (SIN(nam)->sin_family != AF_INET) {
+ return EAFNOSUPPORT;
+ }
+ if (raw == 0 && SIN(nam)->sin_port == 0) {
+ return EADDRNOTAVAIL;
+ }
/*
* If the destination address is INADDR_ANY,
if (inp->inp_laddr.s_addr != INADDR_ANY) {
VERIFY(ia == NULL);
*laddr = inp->inp_laddr;
- return (0);
+ return 0;
}
/*
* If the ifscope is specified by the caller (e.g. IP_PKTINFO)
* then it overrides the sticky ifscope set for the socket.
*/
- if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF))
+ if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF)) {
ifscope = inp->inp_boundifp->if_index;
+ }
/*
* If route is known or can be allocated now,
* Note that we should check the address family of the cached
* destination, in case of sharing the cache with IPv6.
*/
- if (ro->ro_rt != NULL)
+ if (ro->ro_rt != NULL) {
RT_LOCK_SPIN(ro->ro_rt);
+ }
if (ROUTE_UNUSABLE(ro) || ro->ro_dst.sa_family != AF_INET ||
SIN(&ro->ro_dst)->sin_addr.s_addr != SIN(nam)->sin_addr.s_addr ||
(inp->inp_socket->so_options & SO_DONTROUTE)) {
- if (ro->ro_rt != NULL)
+ if (ro->ro_rt != NULL) {
RT_UNLOCK(ro->ro_rt);
+ }
ROUTE_RELEASE(ro);
}
if (!(inp->inp_socket->so_options & SO_DONTROUTE) &&
(ro->ro_rt == NULL || ro->ro_rt->rt_ifp == NULL)) {
- if (ro->ro_rt != NULL)
+ if (ro->ro_rt != NULL) {
RT_UNLOCK(ro->ro_rt);
+ }
ROUTE_RELEASE(ro);
/* No route yet, so try to acquire one */
- bzero(&ro->ro_dst, sizeof (struct sockaddr_in));
+ bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
ro->ro_dst.sa_family = AF_INET;
- ro->ro_dst.sa_len = sizeof (struct sockaddr_in);
+ ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
SIN(&ro->ro_dst)->sin_addr = SIN(nam)->sin_addr;
rtalloc_scoped(ro, ifscope);
- if (ro->ro_rt != NULL)
+ if (ro->ro_rt != NULL) {
RT_LOCK_SPIN(ro->ro_rt);
+ }
}
/* Sanitized local copy for interface address searches */
- bzero(&sin, sizeof (sin));
+ bzero(&sin, sizeof(sin));
sin.sin_family = AF_INET;
- sin.sin_len = sizeof (struct sockaddr_in);
+ sin.sin_len = sizeof(struct sockaddr_in);
sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
/*
* If we did not find (or use) a route, assume dest is reachable
VERIFY(ia == NULL);
ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
- if (ia == NULL)
+ if (ia == NULL) {
ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
+ }
error = ((ia == NULL) ? ENETUNREACH : 0);
if (apn_fallback_required(proc, inp->inp_socket,
- (void *)nam))
- apn_fallback_trigger(proc);
+ (void *)nam)) {
+ apn_fallback_trigger(proc, inp->inp_socket);
+ }
goto done;
}
RT_CONVERT_LOCK(ro->ro_rt);
ia = ifatoia(ro->ro_rt->rt_ifa);
IFA_ADDREF(&ia->ia_ifa);
+
+ /*
+ * Mark the control block for notification of
+ * a possible flow that might undergo clat46
+ * translation.
+ *
+ * We defer the decision to a later point when
+ * inpcb is being disposed off.
+ * The reason is that we only want to send notification
+ * if the flow was ever used to send data.
+ */
+ if (IS_INTF_CLAT46(ro->ro_rt->rt_ifp)) {
+ inp->inp_flags2 |= INP2_CLAT46_FLOW;
+ }
+
RT_UNLOCK(ro->ro_rt);
error = 0;
}
*/
VERIFY(ia == NULL);
ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
- if (ia == NULL)
+ if (ia == NULL) {
ia = ifatoia(ifa_ifwithaddr_scoped(SA(&sin), ifscope));
- if (ia == NULL)
+ }
+ if (ia == NULL) {
ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
+ }
if (ia == NULL) {
RT_LOCK(ro->ro_rt);
ia = ifatoia(ro->ro_rt->rt_ifa);
- if (ia != NULL)
+ if (ia != NULL) {
IFA_ADDREF(&ia->ia_ifa);
+ }
RT_UNLOCK(ro->ro_rt);
}
error = ((ia == NULL) ? ENETUNREACH : 0);
if (imo->imo_multicast_ifp != NULL && (ia == NULL ||
ia->ia_ifp != imo->imo_multicast_ifp)) {
ifp = imo->imo_multicast_ifp;
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
+ }
lck_rw_lock_shared(in_ifaddr_rwlock);
TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
- if (ia->ia_ifp == ifp)
+ if (ia->ia_ifp == ifp) {
break;
+ }
}
- if (ia != NULL)
+ if (ia != NULL) {
IFA_ADDREF(&ia->ia_ifa);
+ }
lck_rw_done(in_ifaddr_rwlock);
- if (ia == NULL)
+ if (ia == NULL) {
error = EADDRNOTAVAIL;
- else
+ } else {
error = 0;
+ }
}
IMO_UNLOCK(imo);
}
if (outif != NULL) {
struct ifnet *ifp;
- if (ro->ro_rt != NULL)
+ if (ro->ro_rt != NULL) {
ifp = ro->ro_rt->rt_ifp;
- else
+ } else {
ifp = ia->ia_ifp;
+ }
VERIFY(ifp != NULL);
IFA_CONVERT_LOCK(&ia->ia_ifa);
- ifnet_reference(ifp); /* for caller */
- if (*outif != NULL)
+ ifnet_reference(ifp); /* for caller */
+ if (*outif != NULL) {
ifnet_release(*outif);
+ }
*outif = ifp;
}
IFA_UNLOCK(&ia->ia_ifa);
SO_FILT_HINT_IFDENIED));
}
- return (error);
+ return error;
}
/*
int error;
struct socket *so = inp->inp_socket;
+#if CONTENT_FILTER
+ if (so) {
+ so->so_state_change_cnt++;
+ }
+#endif
+
/*
* Call inner routine, to assign local interface address.
*/
- if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0)
- return (error);
+ if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0) {
+ return error;
+ }
socket_unlock(so, 0);
pcb = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port,
* embryonic socket, it can get aborted if another thread is closing
* the listener (radar 7947600).
*/
- if ((so->so_flags & SOF_ABORTED) != 0)
- return (ECONNREFUSED);
+ if ((so->so_flags & SOF_ABORTED) != 0) {
+ return ECONNREFUSED;
+ }
if (pcb != NULL) {
in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
- return (EADDRINUSE);
+ return EADDRINUSE;
}
if (inp->inp_laddr.s_addr == INADDR_ANY) {
if (inp->inp_lport == 0) {
error = in_pcbbind(inp, NULL, p);
- if (error)
- return (error);
+ if (error) {
+ return error;
+ }
}
if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
/*
* This routines can be refactored and handle this better
* in future.
*/
- if (inp->inp_lport == 0)
- return (EINVAL);
+ if (inp->inp_lport == 0) {
+ return EINVAL;
+ }
if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
/*
* Lock inversion issue, mostly with udp
}
inp->inp_faddr = sin->sin_addr;
inp->inp_fport = sin->sin_port;
- if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP)
+ if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
nstat_pcb_invalidate_cache(inp);
+ }
in_pcbrehash(inp);
lck_rw_done(inp->inp_pcbinfo->ipi_lock);
- return (0);
+ return 0;
}
void
{
struct socket *so = inp->inp_socket;
- if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP)
+ if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
nstat_pcb_cache(inp);
+ }
inp->inp_faddr.s_addr = INADDR_ANY;
inp->inp_fport = 0;
+#if CONTENT_FILTER
+ if (so) {
+ so->so_state_change_cnt++;
+ }
+#endif
+
if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
/* lock inversion issue, mostly with udp multicast packets */
socket_unlock(so, 0);
* so check for SOF_MP_SUBFLOW socket flag before detaching the PCB;
* when the socket is closed for real, SOF_MP_SUBFLOW would be cleared.
*/
- if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF))
+ if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) {
in_pcbdetach(inp);
+ }
}
void
}
#endif /* IPSEC */
+ if (inp->inp_stat != NULL && SOCK_PROTO(so) == IPPROTO_UDP) {
+ if (inp->inp_stat->rxpackets == 0 && inp->inp_stat->txpackets == 0) {
+ INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_no_data);
+ }
+ }
+
/*
* Let NetworkStatistics know this PCB is going away
* before we detach it.
*/
if (nstat_collect &&
- (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP))
+ (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) {
nstat_pcb_detach(inp);
+ }
/* Free memory buffer held for generating keep alives */
if (inp->inp_keepalive_data != NULL) {
inp->inp_moptions = NULL;
sofreelastref(so, 0);
inp->inp_state = INPCB_STATE_DEAD;
+
+ /*
+ * Enqueue an event to send kernel event notification
+ * if the flow has to CLAT46 for data packets
+ */
+ if (inp->inp_flags2 & INP2_CLAT46_FLOW) {
+ /*
+ * If there has been any exchange of data bytes
+ * over this flow.
+ * Schedule a notification to report that flow is
+ * using client side translation.
+ */
+ if (inp->inp_stat != NULL &&
+ (inp->inp_stat->txbytes != 0 ||
+ inp->inp_stat->rxbytes != 0)) {
+ if (so->so_flags & SOF_DELEGATED) {
+ in6_clat46_event_enqueue_nwk_wq_entry(
+ IN6_CLAT46_EVENT_V4_FLOW,
+ so->e_pid,
+ so->e_uuid);
+ } else {
+ in6_clat46_event_enqueue_nwk_wq_entry(
+ IN6_CLAT46_EVENT_V4_FLOW,
+ so->last_pid,
+ so->last_uuid);
+ }
+ }
+ }
+
/* makes sure we're not called twice from so_close */
so->so_flags |= SOF_PCBCLEARING;
}
}
- lck_rw_assert(ipi->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
+ LCK_RW_ASSERT(ipi->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
inp->inp_gencnt = ++ipi->ipi_gencnt;
/* access ipi in in_pcbremlists */
/* NOTREACHED */
}
lck_mtx_unlock(&inp->inpcb_mtx);
+
+#if NECP
+ necp_inpcb_remove_cb(inp);
+#endif /* NECP */
+
lck_mtx_destroy(&inp->inpcb_mtx, ipi->ipi_lock_grp);
}
/* makes sure we're not called twice from so_close */
/*
* Do the malloc first in case it blocks.
*/
- MALLOC(sin, struct sockaddr_in *, sizeof (*sin), M_SONAME, M_WAITOK);
- if (sin == NULL)
- return (ENOBUFS);
- bzero(sin, sizeof (*sin));
+ MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK);
+ if (sin == NULL) {
+ return ENOBUFS;
+ }
+ bzero(sin, sizeof(*sin));
sin->sin_family = AF_INET;
- sin->sin_len = sizeof (*sin);
+ sin->sin_len = sizeof(*sin);
if ((inp = sotoinpcb(so)) == NULL) {
FREE(sin, M_SONAME);
- return (EINVAL);
+ return EINVAL;
}
sin->sin_port = inp->inp_lport;
sin->sin_addr = inp->inp_laddr;
*nam = (struct sockaddr *)sin;
- return (0);
+ return 0;
}
int
-in_getsockaddr_s(struct socket *so, struct sockaddr_storage *ss)
+in_getsockaddr_s(struct socket *so, struct sockaddr_in *ss)
{
- struct sockaddr_in *sin = SIN(ss);
+ struct sockaddr_in *sin = ss;
struct inpcb *inp;
VERIFY(ss != NULL);
- bzero(ss, sizeof (*ss));
+ bzero(ss, sizeof(*ss));
sin->sin_family = AF_INET;
- sin->sin_len = sizeof (*sin);
+ sin->sin_len = sizeof(*sin);
- if ((inp = sotoinpcb(so)) == NULL
-#if NECP
- || (necp_socket_should_use_flow_divert(inp))
-#endif /* NECP */
- )
- return (inp == NULL ? EINVAL : EPROTOTYPE);
+ if ((inp = sotoinpcb(so)) == NULL) {
+ return EINVAL;
+ }
sin->sin_port = inp->inp_lport;
sin->sin_addr = inp->inp_laddr;
- return (0);
+ return 0;
}
int
/*
* Do the malloc first in case it blocks.
*/
- MALLOC(sin, struct sockaddr_in *, sizeof (*sin), M_SONAME, M_WAITOK);
- if (sin == NULL)
- return (ENOBUFS);
- bzero((caddr_t)sin, sizeof (*sin));
+ MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK);
+ if (sin == NULL) {
+ return ENOBUFS;
+ }
+ bzero((caddr_t)sin, sizeof(*sin));
sin->sin_family = AF_INET;
- sin->sin_len = sizeof (*sin);
+ sin->sin_len = sizeof(*sin);
if ((inp = sotoinpcb(so)) == NULL) {
FREE(sin, M_SONAME);
- return (EINVAL);
+ return EINVAL;
}
sin->sin_port = inp->inp_fport;
sin->sin_addr = inp->inp_faddr;
*nam = (struct sockaddr *)sin;
- return (0);
-}
-
-int
-in_getpeeraddr_s(struct socket *so, struct sockaddr_storage *ss)
-{
- struct sockaddr_in *sin = SIN(ss);
- struct inpcb *inp;
-
- VERIFY(ss != NULL);
- bzero(ss, sizeof (*ss));
-
- sin->sin_family = AF_INET;
- sin->sin_len = sizeof (*sin);
-
- if ((inp = sotoinpcb(so)) == NULL
-#if NECP
- || (necp_socket_should_use_flow_divert(inp))
-#endif /* NECP */
- ) {
- return (inp == NULL ? EINVAL : EPROTOTYPE);
- }
-
- sin->sin_port = inp->inp_fport;
- sin->sin_addr = inp->inp_faddr;
- return (0);
+ return 0;
}
void
LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
#if INET6
- if (!(inp->inp_vflag & INP_IPV4))
+ if (!(inp->inp_vflag & INP_IPV4)) {
continue;
+ }
#endif /* INET6 */
if (inp->inp_faddr.s_addr != faddr.s_addr ||
- inp->inp_socket == NULL)
+ inp->inp_socket == NULL) {
continue;
- if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+ }
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
continue;
+ }
socket_lock(inp->inp_socket, 1);
(*notify)(inp, errno);
(void) in_pcb_checkstate(inp, WNT_RELEASE, 1);
*/
release = TRUE;
}
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
+ }
}
- if (rt == NULL || release)
+ if (rt == NULL || release) {
ROUTE_RELEASE(&inp->inp_route);
+ }
}
/*
*/
release = TRUE;
}
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
+ }
}
- if (rt == NULL || release)
+ if (rt == NULL || release) {
ROUTE_RELEASE(&inp->inp_route);
+ }
}
/*
pcbinfo->ipi_hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
- if (!(inp->inp_vflag & INP_IPV4))
+ if (!(inp->inp_vflag & INP_IPV4)) {
continue;
+ }
#endif /* INET6 */
if (inp->inp_faddr.s_addr == INADDR_ANY &&
inp->inp_laddr.s_addr == laddr.s_addr &&
/*
* Found.
*/
- return (inp);
+ return inp;
}
}
/*
* Not found.
*/
KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0, 0, 0, 0, 0);
- return (NULL);
+ return NULL;
} else {
struct inpcbporthead *porthash;
struct inpcbport *phd;
porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
pcbinfo->ipi_porthashmask)];
LIST_FOREACH(phd, porthash, phd_hash) {
- if (phd->phd_port == lport)
+ if (phd->phd_port == lport) {
break;
+ }
}
if (phd != NULL) {
/*
LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
wildcard = 0;
#if INET6
- if (!(inp->inp_vflag & INP_IPV4))
+ if (!(inp->inp_vflag & INP_IPV4)) {
continue;
+ }
#endif /* INET6 */
- if (inp->inp_faddr.s_addr != INADDR_ANY)
+ if (inp->inp_faddr.s_addr != INADDR_ANY) {
wildcard++;
+ }
if (inp->inp_laddr.s_addr != INADDR_ANY) {
- if (laddr.s_addr == INADDR_ANY)
+ if (laddr.s_addr == INADDR_ANY) {
wildcard++;
- else if (inp->inp_laddr.s_addr !=
- laddr.s_addr)
+ } else if (inp->inp_laddr.s_addr !=
+ laddr.s_addr) {
continue;
+ }
} else {
- if (laddr.s_addr != INADDR_ANY)
+ if (laddr.s_addr != INADDR_ANY) {
wildcard++;
+ }
}
if (wildcard < matchwild) {
match = inp;
}
KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match,
0, 0, 0, 0);
- return (match);
+ return match;
}
}
pcbinfo->ipi_hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
- if (!(inp->inp_vflag & INP_IPV4))
+ if (!(inp->inp_vflag & INP_IPV4)) {
continue;
+ }
#endif /* INET6 */
- if (inp_restricted_recv(inp, ifp))
+ if (inp_restricted_recv(inp, ifp)) {
continue;
+ }
+
+#if NECP
+ if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
+ continue;
+ }
+#endif /* NECP */
if (inp->inp_faddr.s_addr == faddr.s_addr &&
inp->inp_laddr.s_addr == laddr.s_addr &&
* Found.
*/
*uid = kauth_cred_getuid(
- inp->inp_socket->so_cred);
+ inp->inp_socket->so_cred);
*gid = kauth_cred_getgid(
- inp->inp_socket->so_cred);
+ inp->inp_socket->so_cred);
}
lck_rw_done(pcbinfo->ipi_lock);
- return (found);
+ return found;
}
}
* Not found.
*/
lck_rw_done(pcbinfo->ipi_lock);
- return (0);
+ return 0;
}
head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
pcbinfo->ipi_hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
- if (!(inp->inp_vflag & INP_IPV4))
+ if (!(inp->inp_vflag & INP_IPV4)) {
continue;
+ }
#endif /* INET6 */
- if (inp_restricted_recv(inp, ifp))
+ if (inp_restricted_recv(inp, ifp)) {
+ continue;
+ }
+
+#if NECP
+ if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
continue;
+ }
+#endif /* NECP */
if (inp->inp_faddr.s_addr == INADDR_ANY &&
inp->inp_lport == lport) {
if (inp->inp_laddr.s_addr == laddr.s_addr) {
if ((found = (inp->inp_socket != NULL))) {
*uid = kauth_cred_getuid(
- inp->inp_socket->so_cred);
+ inp->inp_socket->so_cred);
*gid = kauth_cred_getgid(
- inp->inp_socket->so_cred);
+ inp->inp_socket->so_cred);
}
lck_rw_done(pcbinfo->ipi_lock);
- return (found);
+ return found;
} else if (inp->inp_laddr.s_addr == INADDR_ANY) {
#if INET6
if (inp->inp_socket &&
- SOCK_CHECK_DOM(inp->inp_socket, PF_INET6))
+ SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
local_wild_mapped = inp;
- else
+ } else
#endif /* INET6 */
- local_wild = inp;
+ local_wild = inp;
}
}
}
if (local_wild_mapped != NULL) {
if ((found = (local_wild_mapped->inp_socket != NULL))) {
*uid = kauth_cred_getuid(
- local_wild_mapped->inp_socket->so_cred);
+ local_wild_mapped->inp_socket->so_cred);
*gid = kauth_cred_getgid(
- local_wild_mapped->inp_socket->so_cred);
+ local_wild_mapped->inp_socket->so_cred);
}
lck_rw_done(pcbinfo->ipi_lock);
- return (found);
+ return found;
}
#endif /* INET6 */
lck_rw_done(pcbinfo->ipi_lock);
- return (0);
+ return 0;
}
if ((found = (local_wild->inp_socket != NULL))) {
*uid = kauth_cred_getuid(
- local_wild->inp_socket->so_cred);
+ local_wild->inp_socket->so_cred);
*gid = kauth_cred_getgid(
- local_wild->inp_socket->so_cred);
+ local_wild->inp_socket->so_cred);
}
lck_rw_done(pcbinfo->ipi_lock);
- return (found);
+ return found;
}
/*
pcbinfo->ipi_hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
- if (!(inp->inp_vflag & INP_IPV4))
+ if (!(inp->inp_vflag & INP_IPV4)) {
continue;
+ }
#endif /* INET6 */
- if (inp_restricted_recv(inp, ifp))
+ if (inp_restricted_recv(inp, ifp)) {
continue;
+ }
+
+#if NECP
+ if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
+ continue;
+ }
+#endif /* NECP */
if (inp->inp_faddr.s_addr == faddr.s_addr &&
inp->inp_laddr.s_addr == laddr.s_addr &&
if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
WNT_STOPUSING) {
lck_rw_done(pcbinfo->ipi_lock);
- return (inp);
+ return inp;
} else {
/* it's there but dead, say it isn't found */
lck_rw_done(pcbinfo->ipi_lock);
- return (NULL);
+ return NULL;
}
}
}
* Not found.
*/
lck_rw_done(pcbinfo->ipi_lock);
- return (NULL);
+ return NULL;
}
head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
pcbinfo->ipi_hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
- if (!(inp->inp_vflag & INP_IPV4))
+ if (!(inp->inp_vflag & INP_IPV4)) {
continue;
+ }
#endif /* INET6 */
- if (inp_restricted_recv(inp, ifp))
+ if (inp_restricted_recv(inp, ifp)) {
continue;
+ }
+
+#if NECP
+ if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
+ continue;
+ }
+#endif /* NECP */
if (inp->inp_faddr.s_addr == INADDR_ANY &&
inp->inp_lport == lport) {
if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
WNT_STOPUSING) {
lck_rw_done(pcbinfo->ipi_lock);
- return (inp);
+ return inp;
} else {
/* it's dead; say it isn't found */
lck_rw_done(pcbinfo->ipi_lock);
- return (NULL);
+ return NULL;
}
} else if (inp->inp_laddr.s_addr == INADDR_ANY) {
#if INET6
- if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6))
+ if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
local_wild_mapped = inp;
- else
+ } else
#endif /* INET6 */
- local_wild = inp;
+ local_wild = inp;
}
}
}
if (in_pcb_checkstate(local_wild_mapped,
WNT_ACQUIRE, 0) != WNT_STOPUSING) {
lck_rw_done(pcbinfo->ipi_lock);
- return (local_wild_mapped);
+ return local_wild_mapped;
} else {
/* it's dead; say it isn't found */
lck_rw_done(pcbinfo->ipi_lock);
- return (NULL);
+ return NULL;
}
}
#endif /* INET6 */
lck_rw_done(pcbinfo->ipi_lock);
- return (NULL);
+ return NULL;
}
if (in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
lck_rw_done(pcbinfo->ipi_lock);
- return (local_wild);
+ return local_wild;
}
/*
* It's either not found or is already dead.
*/
lck_rw_done(pcbinfo->ipi_lock);
- return (NULL);
+ return NULL;
}
/*
*
* @param inp Pointer to internet protocol control block
* @param locked Implies if ipi_lock (protecting pcb list)
- * is already locked or not.
+ * is already locked or not.
*
* @return int error on failure and 0 on success
*/
if (!locked) {
lck_rw_done(pcbinfo->ipi_lock);
}
- return (ECONNABORTED);
+ return ECONNABORTED;
}
#if INET6
- if (inp->inp_vflag & INP_IPV6)
+ if (inp->inp_vflag & INP_IPV6) {
hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
- else
+ } else
#endif /* INET6 */
- hashkey_faddr = inp->inp_faddr.s_addr;
+ hashkey_faddr = inp->inp_faddr.s_addr;
inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
inp->inp_fport, pcbinfo->ipi_hashmask);
* Go through port list and look for a head for this lport.
*/
LIST_FOREACH(phd, pcbporthash, phd_hash) {
- if (phd->phd_port == inp->inp_lport)
+ if (phd->phd_port == inp->inp_lport) {
break;
+ }
}
/*
* If none exists, malloc one and tack it on.
*/
if (phd == NULL) {
- MALLOC(phd, struct inpcbport *, sizeof (struct inpcbport),
+ MALLOC(phd, struct inpcbport *, sizeof(struct inpcbport),
M_PCB, M_WAITOK);
if (phd == NULL) {
- if (!locked)
+ if (!locked) {
lck_rw_done(pcbinfo->ipi_lock);
- return (ENOBUFS); /* XXX */
+ }
+ return ENOBUFS; /* XXX */
}
phd->phd_port = inp->inp_lport;
LIST_INIT(&phd->phd_pcblist);
}
VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
+
+
inp->inp_phd = phd;
LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
inp->inp_flags2 |= INP2_INHASHLIST;
- if (!locked)
+ if (!locked) {
lck_rw_done(pcbinfo->ipi_lock);
+ }
#if NECP
// This call catches the original setting of the local address
inp_update_necp_policy(inp, NULL, NULL, 0);
#endif /* NECP */
- return (0);
+ return 0;
}
/*
u_int32_t hashkey_faddr;
#if INET6
- if (inp->inp_vflag & INP_IPV6)
+ if (inp->inp_vflag & INP_IPV6) {
hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
- else
+ } else
#endif /* INET6 */
- hashkey_faddr = inp->inp_faddr.s_addr;
+ hashkey_faddr = inp->inp_faddr.s_addr;
inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
inp->inp_fport, inp->inp_pcbinfo->ipi_hashmask);
}
if (inp->inp_flags2 & INP2_IN_FCTREE) {
- inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED|INPFC_REMOVE));
+ inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED | INPFC_REMOVE));
VERIFY(!(inp->inp_flags2 & INP2_IN_FCTREE));
}
* STOPUSING, if success we're good, if it's in use, will
* be marked later
*/
- if (locked == 0)
+ if (locked == 0) {
socket_lock(pcb->inp_socket, 1);
+ }
pcb->inp_state = INPCB_STATE_DEAD;
stopusing:
__func__, pcb, pcb->inp_socket);
/* NOTREACHED */
}
- if (locked == 0)
+ if (locked == 0) {
socket_unlock(pcb->inp_socket, 1);
+ }
inpcb_gc_sched(pcb->inp_pcbinfo, INPCB_TIMER_FAST);
origwant = *wantcnt;
- if ((UInt16) origwant == 0xffff) /* should stop using */
- return (WNT_STOPUSING);
+ if ((UInt16) origwant == 0xffff) { /* should stop using */
+ return WNT_STOPUSING;
+ }
newwant = 0xffff;
if ((UInt16) origwant == 0) {
/* try to mark it as unsuable now */
OSCompareAndSwap(origwant, newwant, wantcnt);
}
- return (WNT_STOPUSING);
+ return WNT_STOPUSING;
case WNT_ACQUIRE:
/*
origwant = *wantcnt;
if ((UInt16) origwant == 0xffff) {
/* should stop using */
- return (WNT_STOPUSING);
+ return WNT_STOPUSING;
}
newwant = origwant + 1;
} while (!OSCompareAndSwap(origwant, newwant, wantcnt));
- return (WNT_ACQUIRE);
+ return WNT_ACQUIRE;
case WNT_RELEASE:
/*
* Release reference. If result is null and pcb state
* is DEAD, set wanted bit to STOPUSING
*/
- if (locked == 0)
+ if (locked == 0) {
socket_lock(pcb->inp_socket, 1);
+ }
do {
origwant = *wantcnt;
}
if ((UInt16) origwant == 0xffff) {
/* should stop using */
- if (locked == 0)
+ if (locked == 0) {
socket_unlock(pcb->inp_socket, 1);
- return (WNT_STOPUSING);
+ }
+ return WNT_STOPUSING;
}
newwant = origwant - 1;
} while (!OSCompareAndSwap(origwant, newwant, wantcnt));
- if (pcb->inp_state == INPCB_STATE_DEAD)
+ if (pcb->inp_state == INPCB_STATE_DEAD) {
goto stopusing;
+ }
if (pcb->inp_socket->so_usecount < 0) {
panic("%s: RELEASE pcb=%p so=%p usecount is negative\n",
__func__, pcb, pcb->inp_socket);
/* NOTREACHED */
}
- if (locked == 0)
+ if (locked == 0) {
socket_unlock(pcb->inp_socket, 1);
- return (WNT_RELEASE);
+ }
+ return WNT_RELEASE;
default:
panic("%s: so=%p not a valid state =%x\n", __func__,
}
/* NOTREACHED */
- return (mode);
+ return mode;
}
/*
void
inpcb_to_compat(struct inpcb *inp, struct inpcb_compat *inp_compat)
{
- bzero(inp_compat, sizeof (*inp_compat));
+ bzero(inp_compat, sizeof(*inp_compat));
inp_compat->inp_fport = inp->inp_fport;
inp_compat->inp_lport = inp->inp_lport;
inp_compat->nat_owner = 0;
inp_compat->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
}
+#if !CONFIG_EMBEDDED
void
inpcb_to_xinpcb64(struct inpcb *inp, struct xinpcb64 *xinp)
{
xinp->inp_depend6.inp6_ifindex = 0;
xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
}
+#endif /* !CONFIG_EMBEDDED */
/*
* The following routines implement this scheme:
{
struct route *src = &inp->inp_route;
- lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+ socket_lock_assert_owned(inp->inp_socket);
/*
* If the route in the PCB is stale or not for IPv4, blow it away;
* this is possible in the case of IPv4-mapped address case.
*/
- if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET)
+ if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET) {
ROUTE_RELEASE(src);
+ }
- route_copyout(dst, src, sizeof (*dst));
+ route_copyout(dst, src, sizeof(*dst));
}
void
{
struct route *dst = &inp->inp_route;
- lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+ socket_lock_assert_owned(inp->inp_socket);
/* Minor sanity check */
- if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET)
+ if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
panic("%s: wrong or corrupted route: %p", __func__, src);
+ }
- route_copyin(src, dst, sizeof (*src));
+ route_copyin(src, dst, sizeof(*src));
}
/*
if ((ifscope > (unsigned)if_index) || (ifscope != IFSCOPE_NONE &&
(ifp = ifindex2ifnet[ifscope]) == NULL)) {
ifnet_head_done();
- return (ENXIO);
+ return ENXIO;
}
ifnet_head_done();
* exact match for the embedded interface scope.
*/
inp->inp_boundifp = ifp;
- if (inp->inp_boundifp == NULL)
+ if (inp->inp_boundifp == NULL) {
inp->inp_flags &= ~INP_BOUND_IF;
- else
+ } else {
inp->inp_flags |= INP_BOUND_IF;
+ }
/* Blow away any cached route in the PCB */
ROUTE_RELEASE(&inp->inp_route);
- if (pifp != NULL)
+ if (pifp != NULL) {
*pifp = ifp;
+ }
- return (0);
+ return 0;
}
/*
ROUTE_RELEASE(&inp->inp_route);
}
+void
+inp_set_noconstrained(struct inpcb *inp)
+{
+ inp->inp_flags2 |= INP2_NO_IFF_CONSTRAINED;
+
+ /* Blow away any cached route in the PCB */
+ ROUTE_RELEASE(&inp->inp_route);
+}
+
void
inp_set_awdl_unrestricted(struct inpcb *inp)
{
u_int32_t flowhash = 0;
struct inpcb *tmp_inp = NULL;
- if (inp_hash_seed == 0)
+ if (inp_hash_seed == 0) {
inp_hash_seed = RandomULong();
+ }
- bzero(&fh, sizeof (fh));
+ bzero(&fh, sizeof(fh));
- bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof (fh.infh_laddr));
- bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof (fh.infh_faddr));
+ bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof(fh.infh_laddr));
+ bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof(fh.infh_faddr));
fh.infh_lport = inp->inp_lport;
fh.infh_fport = inp->inp_fport;
fh.infh_rand2 = RandomULong();
try_again:
- flowhash = net_flowhash(&fh, sizeof (fh), inp_hash_seed);
+ flowhash = net_flowhash(&fh, sizeof(fh), inp_hash_seed);
if (flowhash == 0) {
/* try to get a non-zero flowhash */
inp_hash_seed = RandomULong();
inp->inp_flags2 |= INP2_IN_FCTREE;
lck_mtx_unlock(&inp_fc_lck);
- return (flowhash);
+ return flowhash;
}
void
inp = inp_fc_getinp(flowhash, 0);
- if (inp == NULL)
+ if (inp == NULL) {
return;
+ }
inp_fc_feedback(inp);
}
static inline int
infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2)
{
- return (memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
- sizeof(inp1->inp_flowhash)));
+ return memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
+ sizeof(inp1->inp_flowhash));
}
static struct inpcb *
if (inp == NULL) {
/* inp is not present, return */
lck_mtx_unlock(&inp_fc_lck);
- return (NULL);
+ return NULL;
}
if (flags & INPFC_REMOVE) {
RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp);
lck_mtx_unlock(&inp_fc_lck);
- bzero(&(inp->infc_link), sizeof (inp->infc_link));
+ bzero(&(inp->infc_link), sizeof(inp->infc_link));
inp->inp_flags2 &= ~INP2_IN_FCTREE;
- return (NULL);
+ return NULL;
}
- if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
inp = NULL;
+ }
lck_mtx_unlock(&inp_fc_lck);
- return (inp);
+ return inp;
}
static void
return;
}
- if (inp->inp_sndinprog_cnt > 0)
+ if (inp->inp_sndinprog_cnt > 0) {
inp->inp_flags |= INP_FC_FEEDBACK;
+ }
/*
* Return if the connection is not in flow-controlled state.
}
inp_reset_fc_state(inp);
- if (SOCK_TYPE(so) == SOCK_STREAM)
+ if (SOCK_TYPE(so) == SOCK_STREAM) {
inp_fc_unthrottle_tcp(inp);
+ }
socket_unlock(so, 1);
}
}
/* Give a write wakeup to unblock the socket */
- if (needwakeup)
+ if (needwakeup) {
sowwakeup(so);
+ }
}
int
* flow controlled state and receiving feedback from
* the interface
*/
- if (inp->inp_flags & INP_FC_FEEDBACK)
- return (0);
+ if (inp->inp_flags & INP_FC_FEEDBACK) {
+ return 0;
+ }
inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash,
INPFC_SOLOCKED)) != NULL) {
- if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING)
- return (0);
+ if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ return 0;
+ }
VERIFY(tmp_inp == inp);
switch (advcode) {
case FADV_FLOW_CONTROLLED:
inp->inp_socket->so_flags |= SOF_SUSPENDED;
break;
}
- return (1);
+ return 1;
}
- return (0);
+ return 0;
}
/*
struct ifnet *rtifp, *oifp;
/* Either all classes or one of the valid ones */
- if (optval != SO_TC_ALL && !SO_VALID_TC(optval))
- return (EINVAL);
+ if (optval != SO_TC_ALL && !SO_VALID_TC(optval)) {
+ return EINVAL;
+ }
/* We need a flow hash for identification */
- if (flowhash == 0)
- return (0);
+ if (flowhash == 0) {
+ return 0;
+ }
/* Grab the interfaces from the route and pcb */
rtifp = ((inp->inp_route.ro_rt != NULL) ?
inp->inp_route.ro_rt->rt_ifp : NULL);
oifp = inp->inp_last_outifp;
- if (rtifp != NULL)
+ if (rtifp != NULL) {
if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
- if (oifp != NULL && oifp != rtifp)
+ }
+ if (oifp != NULL && oifp != rtifp) {
if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
+ }
- return (0);
+ return 0;
}
/*
struct socket *so = inp->inp_socket;
soprocinfo->spi_pid = so->last_pid;
- if (so->last_pid != 0)
+ strlcpy(&soprocinfo->spi_proc_name[0], &inp->inp_last_proc_name[0],
+ sizeof(soprocinfo->spi_proc_name));
+ if (so->last_pid != 0) {
uuid_copy(soprocinfo->spi_uuid, so->last_uuid);
+ }
/*
* When not delegated, the effective pid is the same as the real pid
*/
soprocinfo->spi_delegated = 0;
soprocinfo->spi_epid = so->last_pid;
}
+ strlcpy(&soprocinfo->spi_e_proc_name[0], &inp->inp_e_proc_name[0],
+ sizeof(soprocinfo->spi_e_proc_name));
}
int
struct inpcb *inp = NULL;
int found = 0;
- bzero(soprocinfo, sizeof (struct so_procinfo));
+ bzero(soprocinfo, sizeof(struct so_procinfo));
- if (!flowhash)
- return (-1);
+ if (!flowhash) {
+ return -1;
+ }
lck_rw_lock_shared(pcbinfo->ipi_lock);
LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
}
lck_rw_done(pcbinfo->ipi_lock);
- return (found);
+ return found;
}
#if CONFIG_PROC_UUID_POLICY
{
necp_socket_find_policy_match(inp, override_local_addr, override_remote_addr, override_bound_interface);
if (necp_socket_should_rescope(inp) &&
- inp->inp_lport == 0 &&
- inp->inp_laddr.s_addr == INADDR_ANY &&
- IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
+ inp->inp_lport == 0 &&
+ inp->inp_laddr.s_addr == INADDR_ANY &&
+ IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
// If we should rescope, and the socket is not yet bound
inp_bindif(inp, necp_socket_get_rescope_if_index(inp), NULL);
}
int err = 0;
if (!net_io_policy_uuid ||
- so == NULL || inp->inp_state == INPCB_STATE_DEAD)
- return (0);
+ so == NULL || inp->inp_state == INPCB_STATE_DEAD) {
+ return 0;
+ }
/*
* Kernel-created sockets that aren't delegating other sockets
* are currently exempted from UUID policy checks.
*/
- if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED))
- return (0);
+ if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED)) {
+ return 0;
+ }
ogencnt = so->so_policy_gencnt;
err = proc_uuid_policy_lookup(((so->so_flags & SOF_DELEGATED) ?
* Discard cached generation count if the entry is gone (ENOENT),
* so that we go thru the checks below.
*/
- if (err == ENOENT && ogencnt != 0)
+ if (err == ENOENT && ogencnt != 0) {
so->so_policy_gencnt = 0;
+ }
/*
* If the generation count has changed, inspect the policy flags
#endif /* NECP */
}
- return ((err == ENOENT) ? 0 : err);
+ return (err == ENOENT) ? 0 : err;
#else /* !CONFIG_PROC_UUID_POLICY */
#pragma unused(inp)
- return (0);
+ return 0;
#endif /* !CONFIG_PROC_UUID_POLICY */
}
/*
* Inbound restrictions.
*/
- if (!sorestrictrecv)
- return (FALSE);
+ if (!sorestrictrecv) {
+ return FALSE;
+ }
- if (ifp == NULL)
- return (FALSE);
+ if (ifp == NULL) {
+ return FALSE;
+ }
- if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp))
- return (TRUE);
+ if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
+ return TRUE;
+ }
- if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp))
- return (TRUE);
+ if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
+ return TRUE;
+ }
- if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp))
- return (TRUE);
+ if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
+ return TRUE;
+ }
- if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV))
- return (FALSE);
+ if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
+ return TRUE;
+ }
- if (inp->inp_flags & INP_RECV_ANYIF)
- return (FALSE);
+ if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV)) {
+ return FALSE;
+ }
- if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp)
- return (FALSE);
+ if (inp->inp_flags & INP_RECV_ANYIF) {
+ return FALSE;
+ }
- if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp))
- return (TRUE);
+ if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp) {
+ return FALSE;
+ }
- return (TRUE);
+ if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
+ return TRUE;
+ }
+
+ return TRUE;
}
boolean_t
current_proc()->p_pid, proc_best_name(current_proc()),
ifp->if_xname);
}
- return (ret);
+ return ret;
}
/*
/*
* Outbound restrictions.
*/
- if (!sorestrictsend)
- return (FALSE);
+ if (!sorestrictsend) {
+ return FALSE;
+ }
- if (ifp == NULL)
- return (FALSE);
+ if (ifp == NULL) {
+ return FALSE;
+ }
- if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp))
- return (TRUE);
+ if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
+ return TRUE;
+ }
+
+ if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
+ return TRUE;
+ }
- if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp))
- return (TRUE);
+ if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
+ return TRUE;
+ }
- if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp))
- return (TRUE);
+ if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
+ return TRUE;
+ }
- if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp))
- return (TRUE);
+ if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
+ return TRUE;
+ }
- return (FALSE);
+ return FALSE;
}
boolean_t
current_proc()->p_pid, proc_best_name(current_proc()),
ifp->if_xname);
}
- return (ret);
+ return ret;
}
inline void
struct ifnet *ifp = inp->inp_last_outifp;
struct socket *so = inp->inp_socket;
if (ifp != NULL && !(so->so_flags & SOF_MP_SUBFLOW) &&
- (ifp->if_type == IFT_CELLULAR ||
- ifp->if_subfamily == IFNET_SUBFAMILY_WIFI)) {
+ (ifp->if_type == IFT_CELLULAR || IFNET_IS_WIFI(ifp))) {
int32_t unsent;
so->so_snd.sb_flags |= SB_SNDBYTE_CNT;
* There can be data outstanding before the connection
* becomes established -- TFO case
*/
- if (so->so_snd.sb_cc > 0)
+ if (so->so_snd.sb_cc > 0) {
inp_incr_sndbytes_total(so, so->so_snd.sb_cc);
+ }
unsent = inp_get_sndbytes_allunsent(so, th_ack);
- if (unsent > 0)
+ if (unsent > 0) {
inp_incr_sndbytes_unsent(so, unsent);
+ }
}
}
inline void
inp_decr_sndbytes_unsent(struct socket *so, int32_t len)
{
+ if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
+ return;
+ }
+
struct inpcb *inp = (struct inpcb *)so->so_pcb;
struct ifnet *ifp = inp->inp_last_outifp;
- if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT))
- return;
-
if (ifp != NULL) {
- if (ifp->if_sndbyte_unsent >= len)
+ if (ifp->if_sndbyte_unsent >= len) {
OSAddAtomic64(-len, &ifp->if_sndbyte_unsent);
- else
+ } else {
ifp->if_sndbyte_unsent = 0;
+ }
}
}
{
int32_t len;
- if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT))
+ if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
return;
+ }
len = inp_get_sndbytes_allunsent(so, th_ack);
inp_decr_sndbytes_unsent(so, len);
}
+
+
+inline void
+inp_set_activity_bitmap(struct inpcb *inp)
+{
+ in_stat_set_activity_bitmap(&inp->inp_nw_activity, net_uptime());
+}
+
+inline void
+inp_get_activity_bitmap(struct inpcb *inp, activity_bitmap_t *ab)
+{
+ bcopy(&inp->inp_nw_activity, ab, sizeof(*ab));
+}
+
+void
+inp_update_last_owner(struct socket *so, struct proc *p, struct proc *ep)
+{
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+
+ if (inp == NULL) {
+ return;
+ }
+
+ if (p != NULL) {
+ strlcpy(&inp->inp_last_proc_name[0], proc_name_address(p), sizeof(inp->inp_last_proc_name));
+ }
+ if (so->so_flags & SOF_DELEGATED) {
+ if (ep != NULL) {
+ strlcpy(&inp->inp_e_proc_name[0], proc_name_address(ep), sizeof(inp->inp_e_proc_name));
+ } else {
+ inp->inp_e_proc_name[0] = 0;
+ }
+ } else {
+ inp->inp_e_proc_name[0] = 0;
+ }
+}
+
+void
+inp_copy_last_owner(struct socket *so, struct socket *head)
+{
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+ struct inpcb *head_inp = (struct inpcb *)head->so_pcb;
+
+ if (inp == NULL || head_inp == NULL) {
+ return;
+ }
+
+ strlcpy(&inp->inp_last_proc_name[0], &head_inp->inp_last_proc_name[0], sizeof(inp->inp_last_proc_name));
+ strlcpy(&inp->inp_e_proc_name[0], &head_inp->inp_e_proc_name[0], sizeof(inp->inp_e_proc_name));
+}