X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/490019cf9519204c5fb36b2fba54ceb983bb6b72..527f99514973766e9c0382a4d8550dfb00f54939:/bsd/kern/uipc_socket.c diff --git a/bsd/kern/uipc_socket.c b/bsd/kern/uipc_socket.c index 29568b48a..7d744eff9 100644 --- a/bsd/kern/uipc_socket.c +++ b/bsd/kern/uipc_socket.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2015 Apple Inc. All rights reserved. + * Copyright (c) 1998-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -98,10 +98,13 @@ #include #include #include +#include #include #include #include #include +#include +#include #include #include #include @@ -112,12 +115,14 @@ #include #include #include +#include + #include #include #include +#include #if CONFIG_MACF -#include #include #endif /* MAC */ @@ -136,6 +141,7 @@ /* TODO: this should be in a header file somewhere */ extern char *proc_name_address(void *p); +extern char *proc_best_name(proc_t); static u_int32_t so_cache_hw; /* High water mark for socache */ static u_int32_t so_cache_timeouts; /* number of timeouts */ @@ -154,35 +160,61 @@ static lck_mtx_t *so_cache_mtx; #include +static int filt_sorattach(struct knote *kn, struct kevent_internal_s *kev); static void filt_sordetach(struct knote *kn); static int filt_soread(struct knote *kn, long hint); +static int filt_sortouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_sorprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); + +static int filt_sowattach(struct knote *kn, struct kevent_internal_s *kev); static void filt_sowdetach(struct knote *kn); static int filt_sowrite(struct knote *kn, long hint); +static int filt_sowtouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_sowprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); + +static int filt_sockattach(struct knote *kn, struct kevent_internal_s *kev); static void filt_sockdetach(struct knote *kn); static int filt_sockev(struct knote *kn, long hint); -static void filt_socktouch(struct knote *kn, struct kevent_internal_s *kev, - long type); +static int filt_socktouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_sockprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); static int sooptcopyin_timeval(struct sockopt *, struct timeval *); static int sooptcopyout_timeval(struct sockopt *, const struct timeval *); -static struct filterops soread_filtops = { +SECURITY_READ_ONLY_EARLY(struct filterops) soread_filtops = { .f_isfd = 1, + .f_attach = filt_sorattach, .f_detach = filt_sordetach, .f_event = filt_soread, + .f_touch = filt_sortouch, + .f_process = filt_sorprocess, }; -static struct filterops sowrite_filtops = { +SECURITY_READ_ONLY_EARLY(struct filterops) sowrite_filtops = { .f_isfd = 1, + .f_attach = filt_sowattach, .f_detach = filt_sowdetach, .f_event = filt_sowrite, + .f_touch = filt_sowtouch, + .f_process = filt_sowprocess, }; -static struct filterops sock_filtops = { +SECURITY_READ_ONLY_EARLY(struct filterops) sock_filtops = { .f_isfd = 1, + .f_attach = filt_sockattach, .f_detach = filt_sockdetach, .f_event = filt_sockev, .f_touch = filt_socktouch, + .f_process = filt_sockprocess, +}; + +SECURITY_READ_ONLY_EARLY(struct filterops) soexcept_filtops = { + .f_isfd = 1, + .f_attach = filt_sorattach, + .f_detach = filt_sordetach, + .f_event = filt_soread, + .f_touch = filt_sortouch, + .f_process = filt_sorprocess, }; SYSCTL_DECL(_kern_ipc); @@ -193,6 +225,10 @@ int socket_debug = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, socket_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &socket_debug, 0, ""); +static unsigned long sodefunct_calls = 0; +SYSCTL_LONG(_kern_ipc, OID_AUTO, sodefunct_calls, CTLFLAG_LOCKED, + &sodefunct_calls, ""); + static int socket_zone = M_SOCKET; so_gen_t so_gencnt; /* generation count for sockets */ @@ -280,18 +316,24 @@ int soreserveheadroom = 1; SYSCTL_INT(_kern_ipc, OID_AUTO, soreserveheadroom, CTLFLAG_RW | CTLFLAG_LOCKED, &soreserveheadroom, 0, "To allocate contiguous datagram buffers"); +#if (DEBUG || DEVELOPMENT) +int so_notsent_lowat_check = 1; +SYSCTL_INT(_kern_ipc, OID_AUTO, notsent_lowat, CTLFLAG_RW|CTLFLAG_LOCKED, + &so_notsent_lowat_check, 0, "enable/disable notsnet lowat check"); +#endif /* DEBUG || DEVELOPMENT */ + +int so_accept_list_waits = 0; +#if (DEBUG || DEVELOPMENT) +SYSCTL_INT(_kern_ipc, OID_AUTO, accept_list_waits, CTLFLAG_RW|CTLFLAG_LOCKED, + &so_accept_list_waits, 0, "number of waits for listener incomp list"); +#endif /* DEBUG || DEVELOPMENT */ + extern struct inpcbinfo tcbinfo; /* TODO: these should be in header file */ extern int get_inpcb_str_size(void); extern int get_tcp_str_size(void); -static unsigned int sl_zone_size; /* size of sockaddr_list */ -static struct zone *sl_zone; /* zone for sockaddr_list */ - -static unsigned int se_zone_size; /* size of sockaddr_entry */ -static struct zone *se_zone; /* zone for sockaddr_entry */ - vm_size_t so_cache_zone_element_size; static int sodelayed_copy(struct socket *, struct uio *, struct mbuf **, @@ -327,12 +369,13 @@ SYSCTL_STRUCT(_kern_ipc, OID_AUTO, extbkidlestat, CTLFLAG_RD | CTLFLAG_LOCKED, int so_set_extended_bk_idle(struct socket *, int); + /* * SOTCDB_NO_DSCP is set by default, to prevent the networking stack from * setting the DSCP code on the packet based on the service class; see * for details. */ -__private_extern__ u_int32_t sotcdb = SOTCDB_NO_DSCP; +__private_extern__ u_int32_t sotcdb = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, sotcdb, CTLFLAG_RW | CTLFLAG_LOCKED, &sotcdb, 0, ""); @@ -395,24 +438,6 @@ socketinit(void) zone_change(so_cache_zone, Z_CALLERACCT, FALSE); zone_change(so_cache_zone, Z_NOENCRYPT, TRUE); - sl_zone_size = sizeof (struct sockaddr_list); - if ((sl_zone = zinit(sl_zone_size, 1024 * sl_zone_size, 1024, - "sockaddr_list")) == NULL) { - panic("%s: unable to allocate sockaddr_list zone\n", __func__); - /* NOTREACHED */ - } - zone_change(sl_zone, Z_CALLERACCT, FALSE); - zone_change(sl_zone, Z_EXPAND, TRUE); - - se_zone_size = sizeof (struct sockaddr_entry); - if ((se_zone = zinit(se_zone_size, 1024 * se_zone_size, 1024, - "sockaddr_entry")) == NULL) { - panic("%s: unable to allocate sockaddr_entry zone\n", __func__); - /* NOTREACHED */ - } - zone_change(se_zone, Z_CALLERACCT, FALSE); - zone_change(se_zone, Z_EXPAND, TRUE); - bzero(&soextbkidlestat, sizeof(struct soextbkidlestat)); soextbkidlestat.so_xbkidle_maxperproc = SO_IDLE_BK_IDLE_MAX_PER_PROC; soextbkidlestat.so_xbkidle_time = SO_IDLE_BK_IDLE_TIME; @@ -607,6 +632,12 @@ soalloc(int waitok, int dom, int type) if (so != NULL) { so->so_gencnt = OSIncrementAtomic64((SInt64 *)&so_gencnt); so->so_zone = socket_zone; + + /* + * Increment the socket allocation statistics + */ + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_alloc_total); + #if CONFIG_MACF_SOCKET /* Convert waitok to M_WAITOK/M_NOWAIT for MAC Framework. */ if (mac_socket_label_init(so, !waitok) != 0) { @@ -654,19 +685,48 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, if (so == NULL) return (ENOBUFS); + switch (dom) { + case PF_LOCAL: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_local_total); + break; + case PF_INET: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_inet_total); + if (type == SOCK_STREAM) { + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_stream_total); + } else { + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_total); + } + break; + case PF_ROUTE: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_route_total); + break; + case PF_NDRV: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_ndrv_total); + break; + case PF_KEY: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_key_total); + break; + case PF_INET6: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_inet6_total); + if (type == SOCK_STREAM) { + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet6_stream_total); + } else { + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet6_dgram_total); + } + break; + case PF_SYSTEM: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_system_total); + break; + case PF_MULTIPATH: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_multipath_total); + break; + default: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_other_total); + break; + } + if (flags & SOCF_ASYNC) so->so_state |= SS_NBIO; -#if MULTIPATH - if (flags & SOCF_MP_SUBFLOW) { - /* - * A multipath subflow socket is used internally in the kernel, - * therefore it does not have a file desciptor associated by - * default. - */ - so->so_state |= SS_NOFDREF; - so->so_flags |= SOF_MP_SUBFLOW; - } -#endif /* MULTIPATH */ TAILQ_INIT(&so->so_incomp); TAILQ_INIT(&so->so_comp); @@ -712,6 +772,7 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, * so protocol attachment handler must be coded carefuly */ so->so_state |= SS_NOFDREF; + VERIFY(so->so_usecount > 0); so->so_usecount--; sofreelastref(so, 1); /* will deallocate the socket */ return (error); @@ -840,7 +901,6 @@ sobindlock(struct socket *so, struct sockaddr *nam, int dolock) if (dolock) socket_lock(so, 1); - VERIFY(so->so_usecount > 1); so_update_last_owner_locked(so, p); so_update_policy(so); @@ -855,9 +915,10 @@ sobindlock(struct socket *so, struct sockaddr *nam, int dolock) */ if (so->so_flags & SOF_DEFUNCT) { error = EINVAL; - SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] (%d)\n", - __func__, proc_pid(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), error)); + SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] (%d)\n", + __func__, proc_pid(p), proc_best_name(p), + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + SOCK_DOM(so), SOCK_TYPE(so), error); goto out; } @@ -961,10 +1022,11 @@ solisten(struct socket *so, int backlog) (so->so_flags & SOF_DEFUNCT)) { error = EINVAL; if (so->so_flags & SOF_DEFUNCT) { - SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] " + SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] " "(%d)\n", __func__, proc_pid(p), + proc_best_name(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), error)); + SOCK_DOM(so), SOCK_TYPE(so), error); } goto out; } @@ -1009,6 +1071,75 @@ out: return (error); } +/* + * The "accept list lock" protects the fields related to the listener queues + * because we can unlock a socket to respect the lock ordering between + * the listener socket and its clients sockets. The lock ordering is first to + * acquire the client socket before the listener socket. + * + * The accept list lock serializes access to the following fields: + * - of the listener socket: + * - so_comp + * - so_incomp + * - so_qlen + * - so_inqlen + * - of client sockets that are in so_comp or so_incomp: + * - so_head + * - so_list + * + * As one can see the accept list lock protects the consistent of the + * linkage of the client sockets. + * + * Note that those fields may be read without holding the accept list lock + * for a preflight provided the accept list lock is taken when committing + * to take an action based on the result of the preflight. The preflight + * saves the cost of doing the unlock/lock dance. + */ +void +so_acquire_accept_list(struct socket *head, struct socket *so) +{ + lck_mtx_t *mutex_held; + + if (head->so_proto->pr_getlock == NULL) { + return; + } + mutex_held = (*head->so_proto->pr_getlock)(head, PR_F_WILLUNLOCK); + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); + + if (!(head->so_flags1 & SOF1_ACCEPT_LIST_HELD)) { + head->so_flags1 |= SOF1_ACCEPT_LIST_HELD; + return; + } + if (so != NULL) { + socket_unlock(so, 0); + } + while (head->so_flags1 & SOF1_ACCEPT_LIST_HELD) { + so_accept_list_waits += 1; + msleep((caddr_t)&head->so_incomp, mutex_held, + PSOCK | PCATCH, __func__, NULL); + } + head->so_flags1 |= SOF1_ACCEPT_LIST_HELD; + if (so != NULL) { + socket_unlock(head, 0); + socket_lock(so, 0); + socket_lock(head, 0); + } +} + +void +so_release_accept_list(struct socket *head) +{ + if (head->so_proto->pr_getlock != NULL) { + lck_mtx_t *mutex_held; + + mutex_held = (*head->so_proto->pr_getlock)(head, 0); + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); + + head->so_flags1 &= ~SOF1_ACCEPT_LIST_HELD; + wakeup((caddr_t)&head->so_incomp); + } +} + void sofreelastref(struct socket *so, int dealloc) { @@ -1025,11 +1156,30 @@ sofreelastref(struct socket *so, int dealloc) return; } if (head != NULL) { - socket_lock(head, 1); + /* + * Need to lock the listener when the protocol has + * per socket locks + */ + if (head->so_proto->pr_getlock != NULL) { + socket_lock(head, 1); + so_acquire_accept_list(head, so); + } if (so->so_state & SS_INCOMP) { + so->so_state &= ~SS_INCOMP; TAILQ_REMOVE(&head->so_incomp, so, so_list); head->so_incqlen--; + head->so_qlen--; + so->so_head = NULL; + + if (head->so_proto->pr_getlock != NULL) { + so_release_accept_list(head); + socket_unlock(head, 1); + } } else if (so->so_state & SS_COMP) { + if (head->so_proto->pr_getlock != NULL) { + so_release_accept_list(head); + socket_unlock(head, 1); + } /* * We must not decommission a socket that's * on the accept(2) queue. If we do, then @@ -1041,15 +1191,14 @@ sofreelastref(struct socket *so, int dealloc) so->so_rcv.sb_flags &= ~(SB_SEL|SB_UPCALL); so->so_snd.sb_flags &= ~(SB_SEL|SB_UPCALL); so->so_event = sonullevent; - socket_unlock(head, 1); return; } else { - panic("sofree: not queued"); + if (head->so_proto->pr_getlock != NULL) { + so_release_accept_list(head); + socket_unlock(head, 1); + } + printf("sofree: not queued\n"); } - head->so_qlen--; - so->so_state &= ~SS_INCOMP; - so->so_head = NULL; - socket_unlock(head, 1); } sowflush(so); sorflush(so); @@ -1062,7 +1211,7 @@ sofreelastref(struct socket *so, int dealloc) /* 3932268: disable upcall */ so->so_rcv.sb_flags &= ~SB_UPCALL; - so->so_snd.sb_flags &= ~SB_UPCALL; + so->so_snd.sb_flags &= ~(SB_UPCALL|SB_SNDBYTE_CNT); so->so_event = sonullevent; if (dealloc) @@ -1075,10 +1224,10 @@ soclose_wait_locked(struct socket *so) lck_mtx_t *mutex_held; if (so->so_proto->pr_getlock != NULL) - mutex_held = (*so->so_proto->pr_getlock)(so, 0); + mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); else mutex_held = so->so_proto->pr_domain->dom_mtx; - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); /* * Double check here and return if there's no outstanding upcall; @@ -1089,9 +1238,10 @@ soclose_wait_locked(struct socket *so) so->so_rcv.sb_flags &= ~SB_UPCALL; so->so_snd.sb_flags &= ~SB_UPCALL; so->so_flags |= SOF_CLOSEWAIT; + (void) msleep((caddr_t)&so->so_upcallusecount, mutex_held, (PZERO - 1), "soclose_wait_locked", NULL); - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); so->so_flags &= ~SOF_CLOSEWAIT; } @@ -1104,7 +1254,6 @@ int soclose_locked(struct socket *so) { int error = 0; - lck_mtx_t *mutex_held; struct timespec ts; if (so->so_usecount == 0) { @@ -1135,7 +1284,8 @@ soclose_locked(struct socket *so) if ((so->so_options & SO_ACCEPTCONN)) { struct socket *sp, *sonext; - int socklock = 0; + int persocklock = 0; + int incomp_overflow_only; /* * We do not want new connection to be added @@ -1143,10 +1293,19 @@ soclose_locked(struct socket *so) */ so->so_options &= ~SO_ACCEPTCONN; - for (sp = TAILQ_FIRST(&so->so_incomp); - sp != NULL; sp = sonext) { - sonext = TAILQ_NEXT(sp, so_list); + /* + * We can drop the lock on the listener once + * we've acquired the incoming list + */ + if (so->so_proto->pr_getlock != NULL) { + persocklock = 1; + so_acquire_accept_list(so, NULL); + socket_unlock(so, 0); + } +again: + incomp_overflow_only = 1; + TAILQ_FOREACH_SAFE(sp, &so->so_incomp, so_list, sonext) { /* * Radar 5350314 * skip sockets thrown away by tcpdropdropblreq @@ -1157,53 +1316,72 @@ soclose_locked(struct socket *so) if (sp->so_flags & SOF_OVERFLOW) continue; - if (so->so_proto->pr_getlock != NULL) { - /* - * Lock ordering for consistency with the - * rest of the stack, we lock the socket - * first and then grabb the head. - */ - socket_unlock(so, 0); + if (persocklock != 0) socket_lock(sp, 1); - socket_lock(so, 0); - socklock = 1; - } - - TAILQ_REMOVE(&so->so_incomp, sp, so_list); - so->so_incqlen--; + /* + * Radar 27945981 + * The extra reference for the list insure the + * validity of the socket pointer when we perform the + * unlock of the head above + */ if (sp->so_state & SS_INCOMP) { sp->so_state &= ~SS_INCOMP; sp->so_head = NULL; + TAILQ_REMOVE(&so->so_incomp, sp, so_list); + so->so_incqlen--; + so->so_qlen--; (void) soabort(sp); + } else { + panic("%s sp %p in so_incomp but !SS_INCOMP", + __func__, sp); } - if (socklock) + if (persocklock != 0) socket_unlock(sp, 1); } - while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { + TAILQ_FOREACH_SAFE(sp, &so->so_comp, so_list, sonext) { /* Dequeue from so_comp since sofree() won't do it */ - TAILQ_REMOVE(&so->so_comp, sp, so_list); - so->so_qlen--; - - if (so->so_proto->pr_getlock != NULL) { - socket_unlock(so, 0); + if (persocklock != 0) socket_lock(sp, 1); - } if (sp->so_state & SS_COMP) { sp->so_state &= ~SS_COMP; sp->so_head = NULL; + TAILQ_REMOVE(&so->so_comp, sp, so_list); + so->so_qlen--; (void) soabort(sp); + } else { + panic("%s sp %p in so_comp but !SS_COMP", + __func__, sp); } - if (so->so_proto->pr_getlock != NULL) { + if (persocklock) socket_unlock(sp, 1); - socket_lock(so, 0); } + + if (incomp_overflow_only == 0 && !TAILQ_EMPTY(&so->so_incomp)) { +#if (DEBUG|DEVELOPMENT) + panic("%s head %p so_comp not empty\n", __func__, so); +#endif /* (DEVELOPMENT || DEBUG) */ + + goto again; + } + + if (!TAILQ_EMPTY(&so->so_comp)) { +#if (DEBUG|DEVELOPMENT) + panic("%s head %p so_comp not empty\n", __func__, so); +#endif /* (DEVELOPMENT || DEBUG) */ + + goto again; + } + + if (persocklock) { + socket_lock(so, 0); + so_release_accept_list(so); } } if (so->so_pcb == NULL) { @@ -1218,11 +1396,13 @@ soclose_locked(struct socket *so) goto drop; } if (so->so_options & SO_LINGER) { + lck_mtx_t *mutex_held; + if ((so->so_state & SS_ISDISCONNECTING) && (so->so_state & SS_NBIO)) goto drop; if (so->so_proto->pr_getlock != NULL) - mutex_held = (*so->so_proto->pr_getlock)(so, 0); + mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); else mutex_held = so->so_proto->pr_domain->dom_mtx; while (so->so_state & SS_ISCONNECTED) { @@ -1265,15 +1445,13 @@ discard: } so->so_state |= SS_NOFDREF; - if (so->so_flags & SOF_MP_SUBFLOW) - so->so_flags &= ~SOF_MP_SUBFLOW; - if ((so->so_flags & SOF_KNOTE) != 0) KNOTE(&so->so_klist, SO_FILT_HINT_LOCKED); atomic_add_32(&so->so_proto->pr_domain->dom_refs, -1); evsofree(so); + VERIFY(so->so_usecount > 0); so->so_usecount--; sofree(so); return (error); @@ -1317,7 +1495,7 @@ soabort(struct socket *so) mutex_held = (*so->so_proto->pr_getlock)(so, 0); else mutex_held = so->so_proto->pr_domain->dom_mtx; - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); #endif if ((so->so_flags & SOF_ABORTED) == 0) { @@ -1362,11 +1540,10 @@ soaccept(struct socket *so, struct sockaddr **nam) } int -soacceptfilter(struct socket *so) +soacceptfilter(struct socket *so, struct socket *head) { struct sockaddr *local = NULL, *remote = NULL; int error = 0; - struct socket *head = so->so_head; /* * Hold the lock even if this socket has not been made visible @@ -1376,8 +1553,7 @@ soacceptfilter(struct socket *so) socket_lock(so, 1); if (sogetaddr_locked(so, &remote, 1) != 0 || sogetaddr_locked(so, &local, 0) != 0) { - so->so_state &= ~(SS_NOFDREF | SS_COMP); - so->so_head = NULL; + so->so_state &= ~SS_NOFDREF; socket_unlock(so, 1); soclose(so); /* Out of resources; try it again next time */ @@ -1405,8 +1581,7 @@ soacceptfilter(struct socket *so) * the following is done while holding the lock since * the socket has been exposed to the filter(s) earlier. */ - so->so_state &= ~(SS_NOFDREF | SS_COMP); - so->so_head = NULL; + so->so_state &= ~SS_NOFDREF; socket_unlock(so, 1); soclose(so); /* Propagate socket filter's error code to the caller */ @@ -1456,10 +1631,11 @@ soconnectlock(struct socket *so, struct sockaddr *nam, int dolock) if ((so->so_options & SO_ACCEPTCONN) || (so->so_flags & SOF_DEFUNCT)) { error = EOPNOTSUPP; if (so->so_flags & SOF_DEFUNCT) { - SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] " + SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] " "(%d)\n", __func__, proc_pid(p), + proc_best_name(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), error)); + SOCK_DOM(so), SOCK_TYPE(so), error); } if (dolock) socket_unlock(so, 1); @@ -1533,8 +1709,8 @@ soconnect2(struct socket *so1, struct socket *so2) } int -soconnectxlocked(struct socket *so, struct sockaddr_list **src_sl, - struct sockaddr_list **dst_sl, struct proc *p, uint32_t ifscope, +soconnectxlocked(struct socket *so, struct sockaddr *src, + struct sockaddr *dst, struct proc *p, uint32_t ifscope, sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg, uint32_t arglen, uio_t auio, user_ssize_t *bytes_written) { @@ -1550,10 +1726,11 @@ soconnectxlocked(struct socket *so, struct sockaddr_list **src_sl, if ((so->so_options & SO_ACCEPTCONN) || (so->so_flags & SOF_DEFUNCT)) { error = EOPNOTSUPP; if (so->so_flags & SOF_DEFUNCT) { - SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] " + SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] " "(%d)\n", __func__, proc_pid(p), + proc_best_name(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), error)); + SOCK_DOM(so), SOCK_TYPE(so), error); } return (error); } @@ -1577,7 +1754,7 @@ soconnectxlocked(struct socket *so, struct sockaddr_list **src_sl, * Run connect filter before calling protocol: * - non-blocking connect returns before completion; */ - error = sflt_connectxout(so, dst_sl); + error = sflt_connectout(so, dst); if (error != 0) { /* Disable PRECONNECT_DATA, as we don't need to send a SYN anymore. */ so->so_flags1 &= ~SOF1_PRECONNECT_DATA; @@ -1585,7 +1762,7 @@ soconnectxlocked(struct socket *so, struct sockaddr_list **src_sl, error = 0; } else { error = (*so->so_proto->pr_usrreqs->pru_connectx) - (so, src_sl, dst_sl, p, ifscope, aid, pcid, + (so, src, dst, p, ifscope, aid, pcid, flags, arg, arglen, auio, bytes_written); } } @@ -1659,12 +1836,6 @@ sodisconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid) return (error); } -int -sopeelofflocked(struct socket *so, sae_associd_t aid, struct socket **psop) -{ - return ((*so->so_proto->pr_usrreqs->pru_peeloff)(so, aid, psop)); -} - #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) /* @@ -1718,10 +1889,10 @@ restart: if (so->so_flags & SOF_DEFUNCT) { defunct: error = EPIPE; - SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] (%d)\n", - __func__, proc_selfpid(), + SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] (%d)\n", + __func__, proc_selfpid(), proc_best_name(current_proc()), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), error)); + SOCK_DOM(so), SOCK_TYPE(so), error); return (error); } @@ -1750,18 +1921,9 @@ defunct: if ((so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) { if (((so->so_state & SS_ISCONFIRMING) == 0) && (resid != 0 || clen == 0) && - !(so->so_flags1 & SOF1_PRECONNECT_DATA)) { -#if MPTCP - /* - * MPTCP Fast Join sends data before the - * socket is truly connected. - */ - if ((so->so_flags & (SOF_MP_SUBFLOW | - SOF_MPTCP_FASTJOIN)) != - (SOF_MP_SUBFLOW | SOF_MPTCP_FASTJOIN)) -#endif /* MPTCP */ + !(so->so_flags1 & SOF1_PRECONNECT_DATA)) return (ENOTCONN); - } + } else if (addr == 0 && !(flags&MSG_HOLD)) { return ((so->so_proto->pr_flags & PR_CONNREQUIRED) ? ENOTCONN : EDESTADDRREQ); @@ -1929,8 +2091,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, if (so->so_type != SOCK_STREAM && (flags & MSG_OOB) != 0) { error = EOPNOTSUPP; - socket_unlock(so, 1); - goto out; + goto out_locked; } /* @@ -1949,8 +2110,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, if (resid < 0 || resid > INT_MAX || (so->so_type == SOCK_STREAM && !(so->so_flags & SOF_ENABLE_MSGS) && (flags & MSG_EOR))) { error = EINVAL; - socket_unlock(so, 1); - goto out; + goto out_locked; } dontroute = (flags & MSG_DONTROUTE) && @@ -1968,7 +2128,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, error = sosendcheck(so, addr, resid, clen, atomic, flags, &sblocked, control); if (error) - goto release; + goto out_locked; mp = ⊤ if (so->so_flags & SOF_ENABLE_MSGS) @@ -2137,7 +2297,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, top == NULL && headroom > 0) { freelist->m_data += headroom; } - + /* * Fall back to regular mbufs without * reserving the socket headroom @@ -2153,7 +2313,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, if (freelist == NULL) { error = ENOBUFS; socket_lock(so, 0); - goto release; + goto out_locked; } /* * For datagram protocols, @@ -2209,7 +2369,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, socket_lock(so, 0); if (error) - goto release; + goto out_locked; } if (flags & (MSG_HOLD|MSG_SEND)) { @@ -2229,7 +2389,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, so->so_tail = mb1; if (flags & MSG_HOLD) { top = NULL; - goto release; + goto out_locked; } top = so->so_temp; } @@ -2264,7 +2424,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, control = NULL; top = NULL; } - goto release; + goto out_locked; } #if CONTENT_FILTER /* @@ -2280,7 +2440,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, control = NULL; top = NULL; } - goto release; + goto out_locked; } #endif /* CONTENT_FILTER */ } @@ -2307,16 +2467,15 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, top = NULL; mp = ⊤ if (error) - goto release; + goto out_locked; } while (resid && space > 0); } while (resid); -release: +out_locked: if (sblocked) sbunlock(&so->so_snd, FALSE); /* will unlock socket */ else socket_unlock(so, 1); -out: if (top != NULL) m_freem(top); if (control != NULL) @@ -2326,12 +2485,7 @@ out: if (control_copy != NULL) m_freem(control_copy); - /* - * One write has been done. This was enough. Get back to "normal" - * behavior. - */ - if (so->so_flags1 & SOF1_PRECONNECT_DATA) - so->so_flags1 &= ~SOF1_PRECONNECT_DATA; + soclearfastopen(so); if (en_tracing) { /* resid passed here is the bytes left in uio */ @@ -2475,7 +2629,7 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) /* * Allocate buffer large enough to include headroom space for * network and link header - * + * */ bytes_to_alloc = maxpktlen + headroom; @@ -2500,7 +2654,7 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) (unsigned int *)&num_needed, bytes_to_alloc, NULL, M_WAIT, 1, 0); } - + if (freelist == NULL) { socket_lock(so, 0); error = ENOMEM; @@ -2992,9 +3146,10 @@ soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, struct sockbuf *sb = &so->so_rcv; error = ENOTCONN; - SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] (%d)\n", - __func__, proc_pid(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), error)); + SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] (%d)\n", + __func__, proc_pid(p), proc_best_name(p), + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + SOCK_DOM(so), SOCK_TYPE(so), error); /* * This socket should have been disconnected and flushed * prior to being returned from sodefunct(); there should @@ -3852,9 +4007,10 @@ soreceive_list(struct socket *so, struct recv_msg_elem *msgarray, u_int uiocnt, struct sockbuf *sb = &so->so_rcv; error = ENOTCONN; - SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] (%d)\n", - __func__, proc_pid(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), error)); + SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] (%d)\n", + __func__, proc_pid(p), proc_best_name(p), + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + SOCK_DOM(so), SOCK_TYPE(so), error); /* * This socket should have been disconnected and flushed * prior to being returned from sodefunct(); there should @@ -4300,20 +4456,6 @@ void sowflush(struct socket *so) { struct sockbuf *sb = &so->so_snd; -#ifdef notyet - lck_mtx_t *mutex_held; - /* - * XXX: This code is currently commented out, because we may get here - * as part of sofreelastref(), and at that time, pr_getlock() may no - * longer be able to return us the lock; this will be fixed in future. - */ - if (so->so_proto->pr_getlock != NULL) - mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else - mutex_held = so->so_proto->pr_domain->dom_mtx; - - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); -#endif /* notyet */ /* * Obtain lock on the socket buffer (SB_LOCK). This is required @@ -4354,7 +4496,7 @@ sorflush(struct socket *so) else mutex_held = so->so_proto->pr_domain->dom_mtx; - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); #endif /* notyet */ sflt_notify(so, sock_evt_flush_read, NULL); @@ -4533,6 +4675,33 @@ sooptcopyin_timeval(struct sockopt *sopt, struct timeval *tv_p) return (0); } +int +soopt_cred_check(struct socket *so, int priv, boolean_t allow_root) +{ + kauth_cred_t cred = NULL; + proc_t ep = PROC_NULL; + uid_t uid; + int error = 0; + + if (so->so_flags & SOF_DELEGATED) { + ep = proc_find(so->e_pid); + if (ep) + cred = kauth_cred_proc_ref(ep); + } + + uid = kauth_cred_getuid(cred ? cred : so->so_cred); + + /* uid is 0 for root */ + if (uid != 0 || !allow_root) + error = priv_check_cred(cred ? cred : so->so_cred, priv, 0); + if (cred) + kauth_cred_unref(&cred); + if (ep != PROC_NULL) + proc_rele(ep); + + return (error); +} + /* * Returns: 0 Success * EINVAL @@ -4633,6 +4802,7 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_WANTMORE: case SO_WANTOOBFLAG: case SO_NOWAKEFROMSLEEP: + case SO_NOAPNFALLBK: error = sooptcopyin(sopt, &optval, sizeof (optval), sizeof (optval)); if (error != 0) @@ -4812,27 +4982,33 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) if (error != 0) goto out; if (optval != 0) { - kauth_cred_t cred = NULL; - proc_t ep = PROC_NULL; - - if (so->so_flags & SOF_DELEGATED) { - ep = proc_find(so->e_pid); - if (ep) - cred = kauth_cred_proc_ref(ep); - } - error = priv_check_cred( - cred ? cred : so->so_cred, - PRIV_NET_RESTRICTED_AWDL, 0); + error = soopt_cred_check(so, + PRIV_NET_RESTRICTED_AWDL, false); if (error == 0) inp_set_awdl_unrestricted( sotoinpcb(so)); - if (cred) - kauth_cred_unref(&cred); - if (ep != PROC_NULL) - proc_rele(ep); } else inp_clear_awdl_unrestricted(sotoinpcb(so)); break; + case SO_INTCOPROC_ALLOW: + if (SOCK_DOM(so) != PF_INET6) { + error = EOPNOTSUPP; + goto out; + } + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) + goto out; + if (optval != 0 && + inp_get_intcoproc_allowed(sotoinpcb(so)) == FALSE) { + error = soopt_cred_check(so, + PRIV_NET_RESTRICTED_INTCOPROC, false); + if (error == 0) + inp_set_intcoproc_allowed( + sotoinpcb(so)); + } else if (optval == 0) + inp_clear_intcoproc_allowed(sotoinpcb(so)); + break; case SO_LABEL: #if CONFIG_MACF_SOCKET @@ -4897,9 +5073,16 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) sizeof (optval)); if (error != 0) goto out; + if (optval >= SO_TC_NET_SERVICE_OFFSET) { + int netsvc = optval - SO_TC_NET_SERVICE_OFFSET; + error = so_set_net_service_type(so, netsvc); + goto out; + } error = so_set_traffic_class(so, optval); if (error != 0) goto out; + so->so_flags1 &= ~SOF1_TC_NET_SERV_TYPE; + so->so_netsvctype = _NET_SERVICE_TYPE_UNSPEC; break; } @@ -4915,6 +5098,7 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) break; } +#if (DEVELOPMENT || DEBUG) case SO_TRAFFIC_CLASS_DBG: { struct so_tcdbg so_tcdbg; @@ -4927,6 +5111,7 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) goto out; break; } +#endif /* (DEVELOPMENT || DEBUG) */ case SO_PRIVILEGED_TRAFFIC_CLASS: error = priv_check_cred(kauth_cred_get(), @@ -4972,9 +5157,11 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) char d[MAX_IPv6_STR_LEN]; struct inpcb *inp = sotoinpcb(so); - SODEFUNCTLOG(("%s[%d]: so 0x%llx [%s %s:%d -> " - "%s:%d] is now marked as %seligible for " + SODEFUNCTLOG("%s[%d, %s]: so 0x%llx " + "[%s %s:%d -> %s:%d] is now marked " + "as %seligible for " "defunct\n", __func__, proc_selfpid(), + proc_best_name(current_proc()), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), (SOCK_TYPE(so) == SOCK_STREAM) ? "TCP" : "UDP", inet_ntop(SOCK_DOM(so), @@ -4988,15 +5175,17 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) (void *)&inp->in6p_faddr, d, sizeof (d)), ntohs(inp->in6p_fport), (so->so_flags & SOF_NODEFUNCT) ? - "not " : "")); + "not " : ""); } else { - SODEFUNCTLOG(("%s[%d]: so 0x%llx [%d,%d] is " - "now marked as %seligible for defunct\n", + SODEFUNCTLOG("%s[%d, %s]: so 0x%llx [%d,%d] " + "is now marked as %seligible for " + "defunct\n", __func__, proc_selfpid(), + proc_best_name(current_proc()), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), (so->so_flags & SOF_NODEFUNCT) ? - "not " : "")); + "not " : ""); } break; @@ -5060,27 +5249,50 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_NECP_ATTRIBUTES: error = necp_set_socket_attributes(so, sopt); break; -#endif /* NECP */ -#if MPTCP - case SO_MPTCP_FASTJOIN: - if (!((so->so_flags & SOF_MP_SUBFLOW) || - ((SOCK_CHECK_DOM(so, PF_MULTIPATH)) && - (SOCK_CHECK_PROTO(so, IPPROTO_TCP))))) { - error = ENOPROTOOPT; + case SO_NECP_CLIENTUUID: + if (SOCK_DOM(so) == PF_MULTIPATH) { + /* Handled by MPTCP itself */ break; } - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) { + error = EINVAL; goto out; - if (optval == 0) - so->so_flags &= ~SOF_MPTCP_FASTJOIN; - else - so->so_flags |= SOF_MPTCP_FASTJOIN; + } + + struct inpcb *inp = sotoinpcb(so); + if (!uuid_is_null(inp->necp_client_uuid)) { + // Clear out the old client UUID if present + necp_inpcb_remove_cb(inp); + } + + error = sooptcopyin(sopt, &inp->necp_client_uuid, + sizeof(uuid_t), sizeof(uuid_t)); + if (error != 0) { + goto out; + } + + if (uuid_is_null(inp->necp_client_uuid)) { + error = EINVAL; + goto out; + } + + error = necp_client_register_socket_flow(so->last_pid, + inp->necp_client_uuid, inp); + if (error != 0) { + uuid_clear(inp->necp_client_uuid); + goto out; + } + + if (inp->inp_lport != 0) { + // There is bound local port, so this is not + // a fresh socket. Assign to the client. + necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp); + } + break; -#endif /* MPTCP */ +#endif /* NECP */ case SO_EXTENDED_BK_IDLE: error = sooptcopyin(sopt, &optval, sizeof (optval), @@ -5103,6 +5315,31 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) else so->so_flags1 |= SOF1_CELLFALLBACK; break; + + case SO_NET_SERVICE_TYPE: { + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) + goto out; + error = so_set_net_service_type(so, optval); + break; + } + + case SO_QOSMARKING_POLICY_OVERRIDE: + error = priv_check_cred(kauth_cred_get(), + PRIV_NET_QOSMARKING_POLICY_OVERRIDE, 0); + if (error != 0) + goto out; + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) + goto out; + if (optval == 0) + so->so_flags1 &= ~SOF1_QOSMARKING_POLICY_OVERRIDE; + else + so->so_flags1 |= SOF1_QOSMARKING_POLICY_OVERRIDE; + break; + default: error = ENOPROTOOPT; break; @@ -5152,8 +5389,8 @@ sooptcopyout_timeval(struct sockopt *sopt, const struct timeval *tv_p) { int error; size_t len; - struct user64_timeval tv64; - struct user32_timeval tv32; + struct user64_timeval tv64 = {}; + struct user32_timeval tv32 = {}; const void * val; size_t valsize; @@ -5253,6 +5490,7 @@ sogetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_WANTMORE: case SO_WANTOOBFLAG: case SO_NOWAKEFROMSLEEP: + case SO_NOAPNFALLBK: optval = so->so_options & sopt->sopt_name; integer: error = sooptcopyout(sopt, &optval, sizeof (optval)); @@ -5376,6 +5614,15 @@ integer: error = EOPNOTSUPP; break; + case SO_INTCOPROC_ALLOW: + if (SOCK_DOM(so) == PF_INET6) { + optval = inp_get_intcoproc_allowed( + sotoinpcb(so)); + goto integer; + } else + error = EOPNOTSUPP; + break; + case SO_LABEL: #if CONFIG_MACF_SOCKET if ((error = sooptcopyin(sopt, &extmac, sizeof (extmac), @@ -5414,7 +5661,7 @@ integer: goto integer; case SO_NP_EXTENSIONS: { - struct so_np_extensions sonpx; + struct so_np_extensions sonpx = {}; sonpx.npx_flags = (so->so_flags & SOF_NPX_SETOPTSHUT) ? SONPX_SETOPTSHUT : 0; @@ -5438,9 +5685,11 @@ integer: sizeof (so->so_tc_stats)); break; +#if (DEVELOPMENT || DEBUG) case SO_TRAFFIC_CLASS_DBG: error = sogetopt_tcdbg(so, sopt); break; +#endif /* (DEVELOPMENT || DEBUG) */ case SO_PRIVILEGED_TRAFFIC_CLASS: optval = (so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS); @@ -5485,6 +5734,23 @@ integer: case SO_NECP_ATTRIBUTES: error = necp_get_socket_attributes(so, sopt); break; + + case SO_NECP_CLIENTUUID: + { + uuid_t *ncu; + + if (SOCK_DOM(so) == PF_MULTIPATH) { + ncu = &mpsotomppcb(so)->necp_client_uuid; + } else if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { + ncu = &sotoinpcb(so)->necp_client_uuid; + } else { + error = EINVAL; + goto out; + } + + error = sooptcopyout(sopt, ncu, sizeof(uuid_t)); + break; + } #endif /* NECP */ #if CONTENT_FILTER @@ -5499,19 +5765,6 @@ integer: } #endif /* CONTENT_FILTER */ -#if MPTCP - case SO_MPTCP_FASTJOIN: - if (!((so->so_flags & SOF_MP_SUBFLOW) || - ((SOCK_CHECK_DOM(so, PF_MULTIPATH)) && - (SOCK_CHECK_PROTO(so, IPPROTO_TCP))))) { - error = ENOPROTOOPT; - break; - } - optval = (so->so_flags & SOF_MPTCP_FASTJOIN); - /* Fixed along with rdar://19391339 */ - goto integer; -#endif /* MPTCP */ - case SO_EXTENDED_BK_IDLE: optval = (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED); goto integer; @@ -5519,9 +5772,20 @@ integer: optval = ((so->so_flags1 & SOF1_CELLFALLBACK) > 0) ? 1 : 0; goto integer; - default: - error = ENOPROTOOPT; - break; + case SO_NET_SERVICE_TYPE: { + if ((so->so_flags1 & SOF1_TC_NET_SERV_TYPE)) + optval = so->so_netsvctype; + else + optval = NET_SERVICE_TYPE_BE; + goto integer; + } + case SO_NETSVC_MARKING_LEVEL: + optval = so_get_netsvc_marking_level(so); + goto integer; + + default: + error = ENOPROTOOPT; + break; } } out: @@ -5666,6 +5930,10 @@ sohasoutofband(struct socket *so) else if (so->so_pgid > 0) proc_signal(so->so_pgid, SIGURG); selwakeup(&so->so_rcv.sb_sel); + if (so->so_rcv.sb_flags & SB_KNOTE) { + KNOTE(&so->so_rcv.sb_sel.si_note, + (NOTE_OOB | SO_FILT_HINT_LOCKED)); + } } int @@ -5716,14 +5984,15 @@ sopoll(struct socket *so, int events, kauth_cred_t cred, void * wql) } int -soo_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx) +soo_kqfilter(struct fileproc *fp, struct knote *kn, + struct kevent_internal_s *kev, vfs_context_t ctx) { #pragma unused(fp) #if !CONFIG_MACF_SOCKET #pragma unused(ctx) #endif /* MAC_SOCKET */ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; - struct klist *skl; + int result; socket_lock(so, 1); so_update_last_owner_locked(so, PROC_NULL); @@ -5733,84 +6002,48 @@ soo_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx) if (mac_socket_check_kqfilter(proc_ucred(vfs_context_proc(ctx)), kn, so) != 0) { socket_unlock(so, 1); - return (1); + kn->kn_flags = EV_ERROR; + kn->kn_data = EPERM; + return 0; } #endif /* MAC_SOCKET */ switch (kn->kn_filter) { case EVFILT_READ: - kn->kn_fop = &soread_filtops; - /* - * If the caller explicitly asked for OOB results (e.g. poll()), - * save that off in the hookid field and reserve the kn_flags - * EV_OOBAND bit for output only. - */ - if (kn->kn_flags & EV_OOBAND) { - kn->kn_flags &= ~EV_OOBAND; - kn->kn_hookid = EV_OOBAND; - } else { - kn->kn_hookid = 0; - } - skl = &so->so_rcv.sb_sel.si_note; + kn->kn_filtid = EVFILTID_SOREAD; break; case EVFILT_WRITE: - kn->kn_fop = &sowrite_filtops; - skl = &so->so_snd.sb_sel.si_note; + kn->kn_filtid = EVFILTID_SOWRITE; break; case EVFILT_SOCK: - kn->kn_fop = &sock_filtops; - skl = &so->so_klist; - kn->kn_hookid = 0; - kn->kn_status |= KN_TOUCH; + kn->kn_filtid = EVFILTID_SCK; + break; + case EVFILT_EXCEPT: + kn->kn_filtid = EVFILTID_SOEXCEPT; break; default: socket_unlock(so, 1); - return (1); - } - - if (KNOTE_ATTACH(skl, kn)) { - switch (kn->kn_filter) { - case EVFILT_READ: - so->so_rcv.sb_flags |= SB_KNOTE; - break; - case EVFILT_WRITE: - so->so_snd.sb_flags |= SB_KNOTE; - break; - case EVFILT_SOCK: - so->so_flags |= SOF_KNOTE; - break; - default: - socket_unlock(so, 1); - return (1); - } + kn->kn_flags = EV_ERROR; + kn->kn_data = EINVAL; + return 0; } - socket_unlock(so, 1); - return (0); -} -static void -filt_sordetach(struct knote *kn) -{ - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + /* + * call the appropriate sub-filter attach + * with the socket still locked + */ + result = knote_fops(kn)->f_attach(kn, kev); - socket_lock(so, 1); - if (so->so_rcv.sb_flags & SB_KNOTE) - if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn)) - so->so_rcv.sb_flags &= ~SB_KNOTE; socket_unlock(so, 1); + + return result; } -/*ARGSUSED*/ static int -filt_soread(struct knote *kn, long hint) +filt_soread_common(struct knote *kn, struct socket *so) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; - - if ((hint & SO_FILT_HINT_LOCKED) == 0) - socket_lock(so, 1); - if (so->so_options & SO_ACCEPTCONN) { - int isempty; + int is_not_empty; /* * Radar 6615193 handle the listen case dynamically @@ -5819,12 +6052,9 @@ filt_soread(struct knote *kn, long hint) */ kn->kn_data = so->so_qlen; - isempty = ! TAILQ_EMPTY(&so->so_comp); + is_not_empty = ! TAILQ_EMPTY(&so->so_comp); - if ((hint & SO_FILT_HINT_LOCKED) == 0) - socket_unlock(so, 1); - - return (isempty); + return (is_not_empty); } /* socket isn't a listener */ @@ -5835,26 +6065,10 @@ filt_soread(struct knote *kn, long hint) */ kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; - /* - * Clear out EV_OOBAND that filt_soread may have set in the - * past. - */ - kn->kn_flags &= ~EV_OOBAND; - if ((so->so_oobmark) || (so->so_state & SS_RCVATMARK)) { - kn->kn_flags |= EV_OOBAND; - /* - * If caller registered explicit interest in OOB data, - * return immediately (data == amount beyond mark, for - * legacy reasons - that should be changed later). - */ - if (kn->kn_hookid == EV_OOBAND) { - /* - * When so_state is SS_RCVATMARK, so_oobmark - * is 0. - */ + if (kn->kn_sfflags & NOTE_OOB) { + if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) { + kn->kn_fflags |= NOTE_OOB; kn->kn_data -= so->so_oobmark; - if ((hint & SO_FILT_HINT_LOCKED) == 0) - socket_unlock(so, 1); return (1); } } @@ -5866,14 +6080,10 @@ filt_soread(struct knote *kn, long hint) ) { kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; - if ((hint & SO_FILT_HINT_LOCKED) == 0) - socket_unlock(so, 1); return (1); } if (so->so_error) { /* temporary udp error */ - if ((hint & SO_FILT_HINT_LOCKED) == 0) - socket_unlock(so, 1); return (1); } @@ -5890,9 +6100,6 @@ filt_soread(struct knote *kn, long hint) lowwat = kn->kn_sdata; } - if ((hint & SO_FILT_HINT_LOCKED) == 0) - socket_unlock(so, 1); - /* * The order below is important. Since NOTE_LOWAT * overrides sb_lowat, check for NOTE_LOWAT case @@ -5904,16 +6111,103 @@ filt_soread(struct knote *kn, long hint) return (so->so_rcv.sb_cc >= lowwat); } +static int +filt_sorattach(struct knote *kn, __unused struct kevent_internal_s *kev) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + + /* socket locked */ + + /* + * If the caller explicitly asked for OOB results (e.g. poll()) + * from EVFILT_READ, then save that off in the hookid field + * and reserve the kn_flags EV_OOBAND bit for output only. + */ + if (kn->kn_filter == EVFILT_READ && + kn->kn_flags & EV_OOBAND) { + kn->kn_flags &= ~EV_OOBAND; + kn->kn_hookid = EV_OOBAND; + } else { + kn->kn_hookid = 0; + } + if (KNOTE_ATTACH(&so->so_rcv.sb_sel.si_note, kn)) + so->so_rcv.sb_flags |= SB_KNOTE; + + /* indicate if event is already fired */ + return filt_soread_common(kn, so); +} + static void -filt_sowdetach(struct knote *kn) +filt_sordetach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + socket_lock(so, 1); + if (so->so_rcv.sb_flags & SB_KNOTE) + if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn)) + so->so_rcv.sb_flags &= ~SB_KNOTE; + socket_unlock(so, 1); +} + +/*ARGSUSED*/ +static int +filt_soread(struct knote *kn, long hint) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + int retval; + + if ((hint & SO_FILT_HINT_LOCKED) == 0) + socket_lock(so, 1); + + retval = filt_soread_common(kn, so); + + if ((hint & SO_FILT_HINT_LOCKED) == 0) + socket_unlock(so, 1); + + return retval; +} + +static int +filt_sortouch(struct knote *kn, struct kevent_internal_s *kev) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + int retval; + + socket_lock(so, 1); + + /* save off the new input fflags and data */ + kn->kn_sfflags = kev->fflags; + kn->kn_sdata = kev->data; + if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) + kn->kn_udata = kev->udata; + + /* determine if changes result in fired events */ + retval = filt_soread_common(kn, so); - if (so->so_snd.sb_flags & SB_KNOTE) - if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn)) - so->so_snd.sb_flags &= ~SB_KNOTE; socket_unlock(so, 1); + + return retval; +} + +static int +filt_sorprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev) +{ +#pragma unused(data) + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + int retval; + + socket_lock(so, 1); + retval = filt_soread_common(kn, so); + if (retval) { + *kev = kn->kn_kevent; + if (kn->kn_flags & EV_CLEAR) { + kn->kn_fflags = 0; + kn->kn_data = 0; + } + } + socket_unlock(so, 1); + + return retval; } int @@ -5928,34 +6222,25 @@ so_wait_for_if_feedback(struct socket *so) return (0); } -/*ARGSUSED*/ static int -filt_sowrite(struct knote *kn, long hint) +filt_sowrite_common(struct knote *kn, struct socket *so) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; int ret = 0; - if ((hint & SO_FILT_HINT_LOCKED) == 0) - socket_lock(so, 1); - kn->kn_data = sbspace(&so->so_snd); if (so->so_state & SS_CANTSENDMORE) { kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; - ret = 1; - goto out; + return 1; } if (so->so_error) { /* temporary udp error */ - ret = 1; - goto out; + return 1; } if (!socanwrite(so)) { - ret = 0; - goto out; + return 0; } if (so->so_flags1 & SOF1_PRECONNECT_DATA) { - ret = 1; - goto out; + return 1; } int64_t lowwat = so->so_snd.sb_lowat; if (kn->kn_sfflags & NOTE_LOWAT) { @@ -5965,10 +6250,14 @@ filt_sowrite(struct knote *kn, long hint) lowwat = kn->kn_sdata; } if (kn->kn_data >= lowwat) { - if (so->so_flags & SOF_NOTSENT_LOWAT) { - if ((SOCK_DOM(so) == PF_INET - || SOCK_DOM(so) == PF_INET6) - && so->so_type == SOCK_STREAM) { + if ((so->so_flags & SOF_NOTSENT_LOWAT) +#if (DEBUG || DEVELOPMENT) + && so_notsent_lowat_check == 1 +#endif /* DEBUG || DEVELOPMENT */ + ) { + if ((SOCK_DOM(so) == PF_INET || + SOCK_DOM(so) == PF_INET6) && + so->so_type == SOCK_STREAM) { ret = tcp_notsent_lowat_check(so); } #if MPTCP @@ -5978,8 +6267,7 @@ filt_sowrite(struct knote *kn, long hint) } #endif else { - ret = 1; - goto out; + return 1; } } else { ret = 1; @@ -5987,36 +6275,99 @@ filt_sowrite(struct knote *kn, long hint) } if (so_wait_for_if_feedback(so)) ret = 0; -out: - if ((hint & SO_FILT_HINT_LOCKED) == 0) - socket_unlock(so, 1); return (ret); } +static int +filt_sowattach(struct knote *kn, __unused struct kevent_internal_s *kev) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + + /* socket locked */ + if (KNOTE_ATTACH(&so->so_snd.sb_sel.si_note, kn)) + so->so_snd.sb_flags |= SB_KNOTE; + + /* determine if its already fired */ + return filt_sowrite_common(kn, so); +} + static void -filt_sockdetach(struct knote *kn) +filt_sowdetach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; socket_lock(so, 1); - if ((so->so_flags & SOF_KNOTE) != 0) - if (KNOTE_DETACH(&so->so_klist, kn)) - so->so_flags &= ~SOF_KNOTE; + if (so->so_snd.sb_flags & SB_KNOTE) + if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn)) + so->so_snd.sb_flags &= ~SB_KNOTE; socket_unlock(so, 1); } +/*ARGSUSED*/ static int -filt_sockev(struct knote *kn, long hint) +filt_sowrite(struct knote *kn, long hint) { - int ret = 0, locked = 0; struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; - long ev_hint = (hint & SO_FILT_HINT_EV); - uint32_t level_trigger = 0; + int ret; - if ((hint & SO_FILT_HINT_LOCKED) == 0) { + if ((hint & SO_FILT_HINT_LOCKED) == 0) socket_lock(so, 1); - locked = 1; + + ret = filt_sowrite_common(kn, so); + + if ((hint & SO_FILT_HINT_LOCKED) == 0) + socket_unlock(so, 1); + + return ret; +} + +static int +filt_sowtouch(struct knote *kn, struct kevent_internal_s *kev) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + int ret; + + socket_lock(so, 1); + + /*save off the new input fflags and data */ + kn->kn_sfflags = kev->fflags; + kn->kn_sdata = kev->data; + if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) + kn->kn_udata = kev->udata; + + /* determine if these changes result in a triggered event */ + ret = filt_sowrite_common(kn, so); + + socket_unlock(so, 1); + + return ret; +} + +static int +filt_sowprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev) +{ +#pragma unused(data) + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + int ret; + + socket_lock(so, 1); + ret = filt_sowrite_common(kn, so); + if (ret) { + *kev = kn->kn_kevent; + if (kn->kn_flags & EV_CLEAR) { + kn->kn_fflags = 0; + kn->kn_data = 0; + } } + socket_unlock(so, 1); + return ret; +} + +static int +filt_sockev_common(struct knote *kn, struct socket *so, long ev_hint) +{ + int ret = 0; + uint32_t level_trigger = 0; if (ev_hint & SO_FILT_HINT_CONNRESET) { kn->kn_fflags |= NOTE_CONNRESET; @@ -6055,6 +6406,11 @@ filt_sockev(struct knote *kn, long hint) kn->kn_fflags |= NOTE_CONNINFO_UPDATED; } + if ((ev_hint & SO_FILT_HINT_NOTIFY_ACK) || + tcp_notify_ack_active(so)) { + kn->kn_fflags |= NOTE_NOTIFY_ACK; + } + if ((so->so_state & SS_CANTRCVMORE) #if CONTENT_FILTER && cfil_sock_data_pending(&so->so_rcv) == 0 @@ -6111,32 +6467,123 @@ filt_sockev(struct knote *kn, long hint) if ((kn->kn_fflags & ~level_trigger) != 0) ret = 1; - if (locked) - socket_unlock(so, 1); - return (ret); } +static int +filt_sockattach(struct knote *kn, __unused struct kevent_internal_s *kev) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + + /* socket locked */ + kn->kn_hookid = 0; + if (KNOTE_ATTACH(&so->so_klist, kn)) + so->so_flags |= SOF_KNOTE; + + /* determine if event already fired */ + return filt_sockev_common(kn, so, 0); +} + static void -filt_socktouch(struct knote *kn, struct kevent_internal_s *kev, long type) +filt_sockdetach(struct knote *kn) { -#pragma unused(kev) - switch (type) { - case EVENT_REGISTER: - { - uint32_t changed_flags; - changed_flags = (kn->kn_sfflags ^ kn->kn_hookid); + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + socket_lock(so, 1); - /* - * Since we keep track of events that are already - * delivered, if any of those events are not requested - * anymore the state related to them can be reset - */ - kn->kn_hookid &= - ~(changed_flags & EVFILT_SOCK_LEVEL_TRIGGER_MASK); - break; + if ((so->so_flags & SOF_KNOTE) != 0) + if (KNOTE_DETACH(&so->so_klist, kn)) + so->so_flags &= ~SOF_KNOTE; + socket_unlock(so, 1); +} + +static int +filt_sockev(struct knote *kn, long hint) +{ + int ret = 0, locked = 0; + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + long ev_hint = (hint & SO_FILT_HINT_EV); + + if ((hint & SO_FILT_HINT_LOCKED) == 0) { + socket_lock(so, 1); + locked = 1; } - case EVENT_PROCESS: + + ret = filt_sockev_common(kn, so, ev_hint); + + if (locked) + socket_unlock(so, 1); + + return ret; +} + + + +/* + * filt_socktouch - update event state + */ +static int +filt_socktouch( + struct knote *kn, + struct kevent_internal_s *kev) +{ + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + uint32_t changed_flags; + int ret; + + socket_lock(so, 1); + + /* save off the [result] data and fflags */ + changed_flags = (kn->kn_sfflags ^ kn->kn_hookid); + + /* save off the new input fflags and data */ + kn->kn_sfflags = kev->fflags; + kn->kn_sdata = kev->data; + if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) + kn->kn_udata = kev->udata; + + /* restrict the current results to the (smaller?) set of new interest */ + /* + * For compatibility with previous implementations, we leave kn_fflags + * as they were before. + */ + //kn->kn_fflags &= kev->fflags; + + /* + * Since we keep track of events that are already + * delivered, if any of those events are not requested + * anymore the state related to them can be reset + */ + kn->kn_hookid &= + ~(changed_flags & EVFILT_SOCK_LEVEL_TRIGGER_MASK); + + /* determine if we have events to deliver */ + ret = filt_sockev_common(kn, so, 0); + + socket_unlock(so, 1); + + return ret; +} + +/* + * filt_sockprocess - query event fired state and return data + */ +static int +filt_sockprocess( + struct knote *kn, + struct filt_process_s *data, + struct kevent_internal_s *kev) +{ +#pragma unused(data) + + struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + int ret = 0; + + socket_lock(so, 1); + + ret = filt_sockev_common(kn, so, 0); + if (ret) { + *kev = kn->kn_kevent; + /* * Store the state of the events being delivered. This * state can be used to deliver level triggered events @@ -6145,7 +6592,7 @@ filt_socktouch(struct knote *kn, struct kevent_internal_s *kev, long type) */ if (kn->kn_fflags != 0) kn->kn_hookid |= (kn->kn_fflags & - EVFILT_SOCK_LEVEL_TRIGGER_MASK); + EVFILT_SOCK_LEVEL_TRIGGER_MASK); /* * NOTE_RESUME and NOTE_SUSPEND are an exception, deliver @@ -6156,10 +6603,16 @@ filt_socktouch(struct knote *kn, struct kevent_internal_s *kev, long type) kn->kn_hookid &= ~NOTE_RESUME; if (kn->kn_fflags & NOTE_RESUME) kn->kn_hookid &= ~NOTE_SUSPEND; - break; - default: - break; + + if (kn->kn_flags & EV_CLEAR) { + kn->kn_data = 0; + kn->kn_fflags = 0; + } } + + socket_unlock(so, 1); + + return ret; } void @@ -6167,6 +6620,13 @@ get_sockev_state(struct socket *so, u_int32_t *statep) { u_int32_t state = *(statep); + /* + * If the state variable is already used by a previous event, + * reset it. + */ + if (state != 0) + return; + if (so->so_state & SS_ISCONNECTED) state |= SOCKEV_CONNECTED; else @@ -6195,19 +6655,18 @@ solockhistory_nr(struct socket *so) return (lock_history_str); } -int +void socket_lock(struct socket *so, int refcount) { - int error = 0; void *lr_saved; lr_saved = __builtin_return_address(0); if (so->so_proto->pr_lock) { - error = (*so->so_proto->pr_lock)(so, refcount, lr_saved); + (*so->so_proto->pr_lock)(so, refcount, lr_saved); } else { #ifdef MORE_LOCKING_DEBUG - lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, + LCK_MTX_ASSERT(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_NOTOWNED); #endif lck_mtx_lock(so->so_proto->pr_domain->dom_mtx); @@ -6216,14 +6675,37 @@ socket_lock(struct socket *so, int refcount) so->lock_lr[so->next_lock_lr] = lr_saved; so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; } +} - return (error); +void +socket_lock_assert_owned(struct socket *so) +{ + lck_mtx_t *mutex_held; + + if (so->so_proto->pr_getlock != NULL) + mutex_held = (*so->so_proto->pr_getlock)(so, 0); + else + mutex_held = so->so_proto->pr_domain->dom_mtx; + + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); } int +socket_try_lock(struct socket *so) +{ + lck_mtx_t *mtx; + + if (so->so_proto->pr_getlock != NULL) + mtx = (*so->so_proto->pr_getlock)(so, 0); + else + mtx = so->so_proto->pr_domain->dom_mtx; + + return (lck_mtx_try_lock(mtx)); +} + +void socket_unlock(struct socket *so, int refcount) { - int error = 0; void *lr_saved; lck_mtx_t *mutex_held; @@ -6235,11 +6717,11 @@ socket_unlock(struct socket *so, int refcount) } if (so && so->so_proto->pr_unlock) { - error = (*so->so_proto->pr_unlock)(so, refcount, lr_saved); + (*so->so_proto->pr_unlock)(so, refcount, lr_saved); } else { mutex_held = so->so_proto->pr_domain->dom_mtx; #ifdef MORE_LOCKING_DEBUG - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); #endif so->unlock_lr[so->next_unlock_lr] = lr_saved; so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; @@ -6259,8 +6741,6 @@ socket_unlock(struct socket *so, int refcount) } lck_mtx_unlock(mutex_held); } - - return (error); } /* Called with socket locked, will unlock socket */ @@ -6273,7 +6753,7 @@ sofree(struct socket *so) mutex_held = (*so->so_proto->pr_getlock)(so, 0); else mutex_held = so->so_proto->pr_domain->dom_mtx; - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); sofreelastref(so, 0); } @@ -6349,18 +6829,23 @@ sosetdefunct(struct proc *p, struct socket *so, int level, boolean_t noforce) if (so->so_flags & SOF_NODEFUNCT) { if (noforce) { err = EOPNOTSUPP; - SODEFUNCTLOG(("%s[%d]: (target pid %d level %d) " - "so 0x%llx [%d,%d] is not eligible for defunct " - "(%d)\n", __func__, proc_selfpid(), proc_pid(p), - level, (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), err)); + SODEFUNCTLOG("%s[%d, %s]: (target pid %d " + "name %s level %d) so 0x%llx [%d,%d] " + "is not eligible for defunct " + "(%d)\n", __func__, proc_selfpid(), + proc_best_name(current_proc()), proc_pid(p), + proc_best_name(p), level, + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + SOCK_DOM(so), SOCK_TYPE(so), err); return (err); } so->so_flags &= ~SOF_NODEFUNCT; - SODEFUNCTLOG(("%s[%d]: (target pid %d level %d) so 0x%llx " - "[%d,%d] defunct by force\n", __func__, proc_selfpid(), - proc_pid(p), level, (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so))); + SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s level %d) " + "so 0x%llx [%d,%d] defunct by force\n", __func__, + proc_selfpid(), proc_best_name(current_proc()), + proc_pid(p), proc_best_name(p), level, + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + SOCK_DOM(so), SOCK_TYPE(so)); } else if (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) { struct inpcb *inp = (struct inpcb *)so->so_pcb; struct ifnet *ifp = inp->inp_last_outifp; @@ -6373,20 +6858,22 @@ sosetdefunct(struct proc *p, struct socket *so, int level, boolean_t noforce) OSIncrementAtomic(&soextbkidlestat.so_xbkidle_notime); } else if (noforce) { OSIncrementAtomic(&soextbkidlestat.so_xbkidle_active); - + so->so_flags1 |= SOF1_EXTEND_BK_IDLE_INPROG; so->so_extended_bk_start = net_uptime(); OSBitOrAtomic(P_LXBKIDLEINPROG, &p->p_ladvflag); - + inpcb_timer_sched(inp->inp_pcbinfo, INPCB_TIMER_LAZY); - + err = EOPNOTSUPP; - SODEFUNCTLOG(("%s[%d]: (target pid %d level %d) " - "extend bk idle " - "so 0x%llx rcv hw %d cc %d\n", - __func__, proc_selfpid(), proc_pid(p), - level, (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - so->so_rcv.sb_hiwat, so->so_rcv.sb_cc)); + SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s " + "level %d) extend bk idle so 0x%llx rcv hw %d " + "cc %d\n", + __func__, proc_selfpid(), + proc_best_name(current_proc()), proc_pid(p), + proc_best_name(p), level, + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); return (err); } else { OSIncrementAtomic(&soextbkidlestat.so_xbkidle_forced); @@ -6412,11 +6899,12 @@ sosetdefunct(struct proc *p, struct socket *so, int level, boolean_t noforce) } done: - SODEFUNCTLOG(("%s[%d]: (target pid %d level %d) so 0x%llx [%d,%d] %s " - "defunct%s\n", __func__, proc_selfpid(), proc_pid(p), level, - (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), - defunct ? "is already" : "marked as", - (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ? " extbkidle" : "")); + SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s level %d) " + "so 0x%llx [%d,%d] %s defunct%s\n", __func__, proc_selfpid(), + proc_best_name(current_proc()), proc_pid(p), proc_best_name(p), + level, (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), + SOCK_TYPE(so), defunct ? "is already" : "marked as", + (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ? " extbkidle" : ""); return (err); } @@ -6441,10 +6929,12 @@ sodefunct(struct proc *p, struct socket *so, int level) char d[MAX_IPv6_STR_LEN]; struct inpcb *inp = sotoinpcb(so); - SODEFUNCTLOG(("%s[%d]: (target pid %d level %d) so 0x%llx [%s " - "%s:%d -> %s:%d] is now defunct [rcv_si 0x%x, snd_si 0x%x, " - "rcv_fl 0x%x, snd_fl 0x%x]\n", __func__, proc_selfpid(), - proc_pid(p), level, (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s level %d) " + "so 0x%llx [%s %s:%d -> %s:%d] is now defunct " + "[rcv_si 0x%x, snd_si 0x%x, rcv_fl 0x%x, snd_fl 0x%x]\n", + __func__, proc_selfpid(), proc_best_name(current_proc()), + proc_pid(p), proc_best_name(p), level, + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), (SOCK_TYPE(so) == SOCK_STREAM) ? "TCP" : "UDP", inet_ntop(SOCK_DOM(so), ((SOCK_DOM(so) == PF_INET) ? (void *)&inp->inp_laddr.s_addr : (void *)&inp->in6p_laddr), @@ -6454,15 +6944,18 @@ sodefunct(struct proc *p, struct socket *so, int level) d, sizeof (d)), ntohs(inp->in6p_fport), (uint32_t)rcv->sb_sel.si_flags, (uint32_t)snd->sb_sel.si_flags, - rcv->sb_flags, snd->sb_flags)); + rcv->sb_flags, snd->sb_flags); } else { - SODEFUNCTLOG(("%s[%d]: (target pid %d level %d) so 0x%llx " - "[%d,%d] is now defunct [rcv_si 0x%x, snd_si 0x%x, " - "rcv_fl 0x%x, snd_fl 0x%x]\n", __func__, proc_selfpid(), - proc_pid(p), level, (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so), (uint32_t)rcv->sb_sel.si_flags, + SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s level %d) " + "so 0x%llx [%d,%d] is now defunct [rcv_si 0x%x, " + "snd_si 0x%x, rcv_fl 0x%x, snd_fl 0x%x]\n", __func__, + proc_selfpid(), proc_best_name(current_proc()), + proc_pid(p), proc_best_name(p), level, + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + SOCK_DOM(so), SOCK_TYPE(so), + (uint32_t)rcv->sb_sel.si_flags, (uint32_t)snd->sb_sel.si_flags, rcv->sb_flags, - snd->sb_flags)); + snd->sb_flags); } /* @@ -6491,7 +6984,7 @@ sodefunct(struct proc *p, struct socket *so, int level) * Explicitly handle connectionless-protocol disconnection * and release any remaining data in the socket buffers. */ - if (!(so->so_flags & SS_ISDISCONNECTED)) + if (!(so->so_state & SS_ISDISCONNECTED)) (void) soisdisconnected(so); if (so->so_error == 0) @@ -6508,6 +7001,7 @@ sodefunct(struct proc *p, struct socket *so, int level) sbrelease(snd); } so->so_state |= SS_DEFUNCT; + OSIncrementAtomicLong((volatile long *)&sodefunct_calls); done: return (0); @@ -6520,11 +7014,12 @@ soresume(struct proc *p, struct socket *so, int locked) socket_lock(so, 1); if (so->so_flags1 & SOF1_EXTEND_BK_IDLE_INPROG) { - SODEFUNCTLOG(("%s[%d]: )target pid %d) so 0x%llx [%d,%d] " - "resumed from bk idle\n", - __func__, proc_selfpid(), proc_pid(p), + SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s) so 0x%llx " + "[%d,%d] resumed from bk idle\n", + __func__, proc_selfpid(), proc_best_name(current_proc()), + proc_pid(p), proc_best_name(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so))); + SOCK_DOM(so), SOCK_TYPE(so)); so->so_flags1 &= ~SOF1_EXTEND_BK_IDLE_INPROG; so->so_extended_bk_start = 0; @@ -6563,6 +7058,12 @@ so_set_extended_bk_idle(struct socket *so, int optval) struct filedesc *fdp; int count = 0; + /* + * Unlock socket to avoid lock ordering issue with + * the proc fd table lock + */ + socket_unlock(so, 0); + proc_fdlock(p); fdp = p->p_fd; @@ -6582,6 +7083,10 @@ so_set_extended_bk_idle(struct socket *so, int optval) if (count >= soextbkidlestat.so_xbkidle_maxperproc) break; } + proc_fdunlock(p); + + socket_lock(so, 0); + if (count >= soextbkidlestat.so_xbkidle_maxperproc) { OSIncrementAtomic(&soextbkidlestat.so_xbkidle_toomany); error = EBUSY; @@ -6592,15 +7097,13 @@ so_set_extended_bk_idle(struct socket *so, int optval) so->so_flags1 |= SOF1_EXTEND_BK_IDLE_WANTED; OSIncrementAtomic(&soextbkidlestat.so_xbkidle_wantok); } - SODEFUNCTLOG(("%s[%d]: so 0x%llx [%d,%d] " + SODEFUNCTLOG("%s[%d, %s]: so 0x%llx [%d,%d] " "%s marked for extended bk idle\n", - __func__, proc_selfpid(), + __func__, proc_selfpid(), proc_best_name(current_proc()), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ? - "is" : "not")); - - proc_fdunlock(p); + "is" : "not"); } return (error); @@ -6649,10 +7152,10 @@ so_check_extended_bk_idle_time(struct socket *so) int ret = 1; if ((so->so_flags1 & SOF1_EXTEND_BK_IDLE_INPROG)) { - SODEFUNCTLOG(("%s[%d]: so 0x%llx [%d,%d]\n", - __func__, proc_selfpid(), + SODEFUNCTLOG("%s[%d, %s]: so 0x%llx [%d,%d]\n", + __func__, proc_selfpid(), proc_best_name(current_proc()), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - SOCK_DOM(so), SOCK_TYPE(so))); + SOCK_DOM(so), SOCK_TYPE(so)); if (net_uptime() - so->so_extended_bk_start > soextbkidlestat.so_xbkidle_time) { so_stop_extended_bk_idle(so); @@ -6667,7 +7170,7 @@ so_check_extended_bk_idle_time(struct socket *so) OSIncrementAtomic(&soextbkidlestat.so_xbkidle_resched); } } - + return (ret); } @@ -6685,7 +7188,7 @@ resume_proc_sockets(proc_t p) struct socket *so; fp = fdp->fd_ofiles[i]; - if (fp == NULL || + if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) continue; @@ -6715,6 +7218,7 @@ so_set_recv_anyif(struct socket *so, int optval) sotoinpcb(so)->inp_flags &= ~INP_RECV_ANYIF; } + return (ret); } @@ -6782,6 +7286,9 @@ so_set_restrictions(struct socket *so, uint32_t vals) } } + if (SOCK_DOM(so) == PF_MULTIPATH) + mptcp_set_restrictions(so); + return (0); } @@ -6793,125 +7300,6 @@ so_get_restrictions(struct socket *so) SO_RESTRICT_DENY_CELLULAR | SO_RESTRICT_DENY_EXPENSIVE)); } -struct sockaddr_entry * -sockaddrentry_alloc(int how) -{ - struct sockaddr_entry *se; - - se = (how == M_WAITOK) ? zalloc(se_zone) : zalloc_noblock(se_zone); - if (se != NULL) - bzero(se, se_zone_size); - - return (se); -} - -void -sockaddrentry_free(struct sockaddr_entry *se) -{ - if (se->se_addr != NULL) { - FREE(se->se_addr, M_SONAME); - se->se_addr = NULL; - } - zfree(se_zone, se); -} - -struct sockaddr_entry * -sockaddrentry_dup(const struct sockaddr_entry *src_se, int how) -{ - struct sockaddr_entry *dst_se; - - dst_se = sockaddrentry_alloc(how); - if (dst_se != NULL) { - int len = src_se->se_addr->sa_len; - - MALLOC(dst_se->se_addr, struct sockaddr *, - len, M_SONAME, how | M_ZERO); - if (dst_se->se_addr != NULL) { - bcopy(src_se->se_addr, dst_se->se_addr, len); - } else { - sockaddrentry_free(dst_se); - dst_se = NULL; - } - } - - return (dst_se); -} - -struct sockaddr_list * -sockaddrlist_alloc(int how) -{ - struct sockaddr_list *sl; - - sl = (how == M_WAITOK) ? zalloc(sl_zone) : zalloc_noblock(sl_zone); - if (sl != NULL) { - bzero(sl, sl_zone_size); - TAILQ_INIT(&sl->sl_head); - } - return (sl); -} - -void -sockaddrlist_free(struct sockaddr_list *sl) -{ - struct sockaddr_entry *se, *tse; - - TAILQ_FOREACH_SAFE(se, &sl->sl_head, se_link, tse) { - sockaddrlist_remove(sl, se); - sockaddrentry_free(se); - } - VERIFY(sl->sl_cnt == 0 && TAILQ_EMPTY(&sl->sl_head)); - zfree(sl_zone, sl); -} - -void -sockaddrlist_insert(struct sockaddr_list *sl, struct sockaddr_entry *se) -{ - VERIFY(!(se->se_flags & SEF_ATTACHED)); - se->se_flags |= SEF_ATTACHED; - TAILQ_INSERT_TAIL(&sl->sl_head, se, se_link); - sl->sl_cnt++; - VERIFY(sl->sl_cnt != 0); -} - -void -sockaddrlist_remove(struct sockaddr_list *sl, struct sockaddr_entry *se) -{ - VERIFY(se->se_flags & SEF_ATTACHED); - se->se_flags &= ~SEF_ATTACHED; - VERIFY(sl->sl_cnt != 0); - sl->sl_cnt--; - TAILQ_REMOVE(&sl->sl_head, se, se_link); -} - -struct sockaddr_list * -sockaddrlist_dup(const struct sockaddr_list *src_sl, int how) -{ - struct sockaddr_entry *src_se, *tse; - struct sockaddr_list *dst_sl; - - dst_sl = sockaddrlist_alloc(how); - if (dst_sl == NULL) - return (NULL); - - TAILQ_FOREACH_SAFE(src_se, &src_sl->sl_head, se_link, tse) { - struct sockaddr_entry *dst_se; - - if (src_se->se_addr == NULL) - continue; - - dst_se = sockaddrentry_dup(src_se, how); - if (dst_se == NULL) { - sockaddrlist_free(dst_sl); - return (NULL); - } - - sockaddrlist_insert(dst_sl, dst_se); - } - VERIFY(src_sl->sl_cnt == dst_sl->sl_cnt); - - return (dst_sl); -} - int so_set_effective_pid(struct socket *so, int epid, struct proc *p) {