2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1991, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/in_pcb.c,v 1.59.2.17 2001/08/13 16:26:17 ume Exp $
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
68 #include <sys/domain.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
76 #include <sys/kernel.h>
77 #include <sys/sysctl.h>
78 #include <sys/mcache.h>
79 #include <sys/kauth.h>
81 #include <libkern/OSAtomic.h>
82 #include <kern/locks.h>
84 #include <machine/limits.h>
87 #include <kern/zalloc.h>
91 #include <net/if_types.h>
92 #include <net/route.h>
93 #include <net/flowhash.h>
94 #include <net/flowadv.h>
96 #include <netinet/in.h>
97 #include <netinet/in_pcb.h>
98 #include <netinet/in_var.h>
99 #include <netinet/ip_var.h>
101 #include <netinet/ip6.h>
102 #include <netinet6/ip6_var.h>
106 #include <netinet6/ipsec.h>
107 #include <netkey/key.h>
110 #include <sys/kdebug.h>
111 #include <sys/random.h>
112 #include <dev/random/randomdev.h>
115 extern int ipsec_bypass
;
118 #define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8))
119 #define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
121 struct in_addr zeroin_addr
;
124 * These configure the range of local port addresses assigned to
125 * "unspecified" outgoing connections/packets/whatever.
127 int ipport_lowfirstauto
= IPPORT_RESERVED
- 1; /* 1023 */
128 int ipport_lowlastauto
= IPPORT_RESERVEDSTART
; /* 600 */
130 int ipport_firstauto
= IPPORT_RESERVED
; /* 1024 */
131 int ipport_lastauto
= IPPORT_USERRESERVED
; /* 5000 */
133 int ipport_firstauto
= IPPORT_HIFIRSTAUTO
; /* 49152 */
134 int ipport_lastauto
= IPPORT_HILASTAUTO
; /* 65535 */
136 int ipport_hifirstauto
= IPPORT_HIFIRSTAUTO
; /* 49152 */
137 int ipport_hilastauto
= IPPORT_HILASTAUTO
; /* 65535 */
139 #define RANGECHK(var, min, max) \
140 if ((var) < (min)) { (var) = (min); } \
141 else if ((var) > (max)) { (var) = (max); }
144 sysctl_net_ipport_check SYSCTL_HANDLER_ARGS
146 #pragma unused(arg1, arg2)
147 int error
= sysctl_handle_int(oidp
,
148 oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
150 RANGECHK(ipport_lowfirstauto
, 1, IPPORT_RESERVED
- 1);
151 RANGECHK(ipport_lowlastauto
, 1, IPPORT_RESERVED
- 1);
152 RANGECHK(ipport_firstauto
, IPPORT_RESERVED
, USHRT_MAX
);
153 RANGECHK(ipport_lastauto
, IPPORT_RESERVED
, USHRT_MAX
);
154 RANGECHK(ipport_hifirstauto
, IPPORT_RESERVED
, USHRT_MAX
);
155 RANGECHK(ipport_hilastauto
, IPPORT_RESERVED
, USHRT_MAX
);
162 SYSCTL_NODE(_net_inet_ip
, IPPROTO_IP
, portrange
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "IP Ports");
164 SYSCTL_PROC(_net_inet_ip_portrange
, OID_AUTO
, lowfirst
, CTLTYPE_INT
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
165 &ipport_lowfirstauto
, 0, &sysctl_net_ipport_check
, "I", "");
166 SYSCTL_PROC(_net_inet_ip_portrange
, OID_AUTO
, lowlast
, CTLTYPE_INT
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
167 &ipport_lowlastauto
, 0, &sysctl_net_ipport_check
, "I", "");
168 SYSCTL_PROC(_net_inet_ip_portrange
, OID_AUTO
, first
, CTLTYPE_INT
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
169 &ipport_firstauto
, 0, &sysctl_net_ipport_check
, "I", "");
170 SYSCTL_PROC(_net_inet_ip_portrange
, OID_AUTO
, last
, CTLTYPE_INT
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
171 &ipport_lastauto
, 0, &sysctl_net_ipport_check
, "I", "");
172 SYSCTL_PROC(_net_inet_ip_portrange
, OID_AUTO
, hifirst
, CTLTYPE_INT
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
173 &ipport_hifirstauto
, 0, &sysctl_net_ipport_check
, "I", "");
174 SYSCTL_PROC(_net_inet_ip_portrange
, OID_AUTO
, hilast
, CTLTYPE_INT
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
175 &ipport_hilastauto
, 0, &sysctl_net_ipport_check
, "I", "");
177 extern int udp_use_randomport
;
178 extern int tcp_use_randomport
;
180 /* Structs used for flowhash computation */
181 struct inp_flowhash_key_addr
{
191 struct inp_flowhash_key
{
192 struct inp_flowhash_key_addr infh_laddr
;
193 struct inp_flowhash_key_addr infh_faddr
;
194 u_int32_t infh_lport
;
195 u_int32_t infh_fport
;
197 u_int32_t infh_proto
;
198 u_int32_t infh_rand1
;
199 u_int32_t infh_rand2
;
202 u_int32_t inp_hash_seed
= 0;
204 static __inline
int infc_cmp(const struct inp_fc_entry
*,
205 const struct inp_fc_entry
*);
206 lck_grp_t
*inp_lck_grp
;
207 lck_grp_attr_t
*inp_lck_grp_attr
;
208 lck_attr_t
*inp_lck_attr
;
209 decl_lck_mtx_data(, inp_fc_lck
);
211 RB_HEAD(inp_fc_tree
, inp_fc_entry
) inp_fc_tree
;
212 RB_PROTOTYPE(inp_fc_tree
, inp_fc_entry
, infc_link
, infc_cmp
);
214 RB_GENERATE(inp_fc_tree
, inp_fc_entry
, infc_link
, infc_cmp
);
216 static unsigned int inp_fcezone_size
;
217 static struct zone
*inp_fcezone
;
218 #define INP_FCEZONE_NAME "inp_fcezone"
219 #define INP_FCEZONE_MAX 32
222 * in_pcb.c: manage the Protocol Control Blocks.
226 * Initialize data structures required to deliver
230 socket_flowadv_init(void)
232 inp_lck_grp_attr
= lck_grp_attr_alloc_init();
233 inp_lck_grp
= lck_grp_alloc_init("inp_lck_grp", inp_lck_grp_attr
);
235 inp_lck_attr
= lck_attr_alloc_init();
236 lck_mtx_init(&inp_fc_lck
, inp_lck_grp
, inp_lck_attr
);
238 RB_INIT(&inp_fc_tree
);
240 inp_fcezone_size
= P2ROUNDUP(sizeof (struct inp_fc_entry
),
242 inp_fcezone
= zinit(inp_fcezone_size
,
243 INP_FCEZONE_MAX
* inp_fcezone_size
, 0, INP_FCEZONE_NAME
);
244 if (inp_fcezone
== NULL
) {
245 panic("%s: failed allocating %s", __func__
,
249 zone_change(inp_fcezone
, Z_EXPAND
, TRUE
);
250 zone_change(inp_fcezone
, Z_CALLERACCT
, FALSE
);
254 * Allocate a PCB and associate it with the socket.
259 * ipsec_init_policy:??? [IPSEC]
262 in_pcballoc(struct socket
*so
, struct inpcbinfo
*pcbinfo
, __unused
struct proc
*p
)
275 if (so
->cached_in_sock_layer
== 0) {
277 printf("PCBALLOC calling zalloc for socket %x\n", so
);
279 inp
= (struct inpcb
*) zalloc(pcbinfo
->ipi_zone
);
282 bzero((caddr_t
)inp
, sizeof(*inp
));
286 printf("PCBALLOC reusing PCB for socket %x\n", so
);
288 inp
= (struct inpcb
*)(void *)so
->so_saved_pcb
;
289 temp
= inp
->inp_saved_ppcb
;
290 bzero((caddr_t
) inp
, sizeof(*inp
));
291 inp
->inp_saved_ppcb
= temp
;
294 inp
->inp_gencnt
= ++pcbinfo
->ipi_gencnt
;
295 inp
->inp_pcbinfo
= pcbinfo
;
296 inp
->inp_socket
= so
;
298 mac_error
= mac_inpcb_label_init(inp
, M_WAITOK
);
299 if (mac_error
!= 0) {
300 if (so
->cached_in_sock_layer
== 0)
301 zfree(pcbinfo
->ipi_zone
, inp
);
304 mac_inpcb_label_associate(so
, inp
);
306 // make sure inp_stat is always 64bit aligned
307 inp
->inp_stat
= (struct inp_stat
*)P2ROUNDUP(inp
->inp_stat_store
, sizeof(u_int64_t
));
308 if (((uintptr_t)inp
->inp_stat
- (uintptr_t)inp
->inp_stat_store
)
309 + sizeof(*inp
->inp_stat
) > sizeof(inp
->inp_stat_store
)) {
310 panic("insufficient space to align inp_stat");
313 so
->so_pcb
= (caddr_t
)inp
;
315 if (so
->so_proto
->pr_flags
& PR_PCBLOCK
) {
316 lck_mtx_init(&inp
->inpcb_mtx
, pcbinfo
->mtx_grp
, pcbinfo
->mtx_attr
);
321 if (ipsec_bypass
== 0) {
322 error
= ipsec_init_policy(so
, &inp
->inp_sp
);
324 zfree(pcbinfo
->ipi_zone
, inp
);
331 if (INP_SOCKAF(so
) == AF_INET6
&& !ip6_mapped_addr_on
)
332 inp
->inp_flags
|= IN6P_IPV6_V6ONLY
;
336 if (ip6_auto_flowlabel
)
337 inp
->inp_flags
|= IN6P_AUTOFLOWLABEL
;
339 lck_rw_lock_exclusive(pcbinfo
->mtx
);
340 inp
->inp_gencnt
= ++pcbinfo
->ipi_gencnt
;
341 LIST_INSERT_HEAD(pcbinfo
->listhead
, inp
, inp_list
);
342 pcbinfo
->ipi_count
++;
343 lck_rw_done(pcbinfo
->mtx
);
349 in_pcblookup_local_and_cleanup does everything
350 in_pcblookup_local does but it checks for a socket
351 that's going away. Since we know that the lock is
352 held read+write when this funciton is called, we
353 can safely dispose of this socket like the slow
354 timer would usually do and return NULL. This is
358 in_pcblookup_local_and_cleanup(
359 struct inpcbinfo
*pcbinfo
,
360 struct in_addr laddr
,
366 /* Perform normal lookup */
367 inp
= in_pcblookup_local(pcbinfo
, laddr
, lport_arg
, wild_okay
);
369 /* Check if we found a match but it's waiting to be disposed */
370 if (inp
&& inp
->inp_wantcnt
== WNT_STOPUSING
) {
371 struct socket
*so
= inp
->inp_socket
;
373 lck_mtx_lock(&inp
->inpcb_mtx
);
375 if (so
->so_usecount
== 0) {
376 if (inp
->inp_state
!= INPCB_STATE_DEAD
)
382 lck_mtx_unlock(&inp
->inpcb_mtx
);
389 #ifdef __APPLE_API_PRIVATE
391 in_pcb_conflict_post_msg(u_int16_t port
)
394 * Radar 5523020 send a kernel event notification if a non-participating socket tries to bind
395 * the port a socket who has set SOF_NOTIFYCONFLICT owns.
397 struct kev_msg ev_msg
;
398 struct kev_in_portinuse in_portinuse
;
400 bzero(&in_portinuse
, sizeof(struct kev_in_portinuse
));
401 bzero(&ev_msg
, sizeof(struct kev_msg
));
402 in_portinuse
.port
= ntohs(port
); /* port in host order */
403 in_portinuse
.req_pid
= proc_selfpid();
404 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
405 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
406 ev_msg
.kev_subclass
= KEV_INET_SUBCLASS
;
407 ev_msg
.event_code
= KEV_INET_PORTINUSE
;
408 ev_msg
.dv
[0].data_ptr
= &in_portinuse
;
409 ev_msg
.dv
[0].data_length
= sizeof(struct kev_in_portinuse
);
410 ev_msg
.dv
[1].data_length
= 0;
411 kev_post_msg(&ev_msg
);
416 * EADDRNOTAVAIL Address not available.
417 * EINVAL Invalid argument
418 * EAFNOSUPPORT Address family not supported [notdef]
419 * EACCES Permission denied
420 * EADDRINUSE Address in use
421 * EAGAIN Resource unavailable, try again
422 * priv_check_cred:EPERM Operation not permitted
425 in_pcbbind(struct inpcb
*inp
, struct sockaddr
*nam
, struct proc
*p
)
427 struct socket
*so
= inp
->inp_socket
;
428 unsigned short *lastport
;
429 struct sockaddr_in
*sin
;
430 struct inpcbinfo
*pcbinfo
= inp
->inp_pcbinfo
;
431 u_short lport
= 0, rand_port
= 0;
432 int wild
= 0, reuseport
= (so
->so_options
& SO_REUSEPORT
);
433 int error
, randomport
, conflict
= 0;
436 if (TAILQ_EMPTY(&in_ifaddrhead
)) /* XXX broken! */
437 return (EADDRNOTAVAIL
);
438 if (inp
->inp_lport
|| inp
->inp_laddr
.s_addr
!= INADDR_ANY
)
440 if ((so
->so_options
& (SO_REUSEADDR
|SO_REUSEPORT
)) == 0)
442 socket_unlock(so
, 0); /* keep reference on socket */
443 lck_rw_lock_exclusive(pcbinfo
->mtx
);
445 struct ifnet
*outif
= NULL
;
447 sin
= (struct sockaddr_in
*)(void *)nam
;
448 if (nam
->sa_len
!= sizeof (*sin
)) {
449 lck_rw_done(pcbinfo
->mtx
);
455 * We should check the family, but old programs
456 * incorrectly fail to initialize it.
458 if (sin
->sin_family
!= AF_INET
) {
459 lck_rw_done(pcbinfo
->mtx
);
461 return (EAFNOSUPPORT
);
464 lport
= sin
->sin_port
;
465 if (IN_MULTICAST(ntohl(sin
->sin_addr
.s_addr
))) {
467 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
468 * allow complete duplication of binding if
469 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
470 * and a multicast address is bound on both
471 * new and duplicated sockets.
473 if (so
->so_options
& SO_REUSEADDR
)
474 reuseport
= SO_REUSEADDR
|SO_REUSEPORT
;
475 } else if (sin
->sin_addr
.s_addr
!= INADDR_ANY
) {
477 sin
->sin_port
= 0; /* yech... */
478 if ((ifa
= ifa_ifwithaddr((struct sockaddr
*)sin
)) == 0) {
479 lck_rw_done(pcbinfo
->mtx
);
481 return (EADDRNOTAVAIL
);
485 outif
= ifa
->ifa_ifp
;
495 if (ntohs(lport
) < IPPORT_RESERVED
) {
496 cred
= kauth_cred_proc_ref(p
);
497 error
= priv_check_cred(cred
, PRIV_NETINET_RESERVEDPORT
, 0);
498 kauth_cred_unref(&cred
);
500 lck_rw_done(pcbinfo
->mtx
);
506 if (kauth_cred_getuid(so
->so_cred
) &&
507 !IN_MULTICAST(ntohl(sin
->sin_addr
.s_addr
))) {
508 t
= in_pcblookup_local_and_cleanup(inp
->inp_pcbinfo
,
509 sin
->sin_addr
, lport
, INPLOOKUP_WILDCARD
);
511 (ntohl(sin
->sin_addr
.s_addr
) != INADDR_ANY
||
512 ntohl(t
->inp_laddr
.s_addr
) != INADDR_ANY
||
513 (t
->inp_socket
->so_options
&
514 SO_REUSEPORT
) == 0) &&
515 (kauth_cred_getuid(so
->so_cred
) !=
516 kauth_cred_getuid(t
->inp_socket
->so_cred
)) &&
517 ((t
->inp_socket
->so_flags
& SOF_REUSESHAREUID
) == 0) &&
518 (ntohl(sin
->sin_addr
.s_addr
) != INADDR_ANY
||
519 ntohl(t
->inp_laddr
.s_addr
) != INADDR_ANY
))
521 #ifdef __APPLE_API_PRIVATE
523 if ((t
->inp_socket
->so_flags
& SOF_NOTIFYCONFLICT
) && ((so
->so_flags
& SOF_NOTIFYCONFLICT
) == 0))
526 lck_rw_done(pcbinfo
->mtx
);
529 in_pcb_conflict_post_msg(lport
);
531 lck_rw_done(pcbinfo
->mtx
);
532 #endif /* __APPLE_API_PRIVATE */
538 t
= in_pcblookup_local_and_cleanup(pcbinfo
, sin
->sin_addr
,
541 (reuseport
& t
->inp_socket
->so_options
) == 0) {
543 if (ntohl(sin
->sin_addr
.s_addr
) !=
545 ntohl(t
->inp_laddr
.s_addr
) !=
547 INP_SOCKAF(so
) != AF_INET6
||
548 INP_SOCKAF(t
->inp_socket
) != AF_INET6
)
551 #ifdef __APPLE_API_PRIVATE
553 if ((t
->inp_socket
->so_flags
& SOF_NOTIFYCONFLICT
) && ((so
->so_flags
& SOF_NOTIFYCONFLICT
) == 0))
556 lck_rw_done(pcbinfo
->mtx
);
559 in_pcb_conflict_post_msg(lport
);
561 lck_rw_done(pcbinfo
->mtx
);
562 #endif /* __APPLE_API_PRIVATE */
568 inp
->inp_laddr
= sin
->sin_addr
;
569 inp
->inp_last_outifp
= outif
;
575 randomport
= (so
->so_flags
& SOF_BINDRANDOMPORT
) ||
576 (so
->so_type
== SOCK_STREAM
? tcp_use_randomport
: udp_use_randomport
);
578 inp
->inp_flags
|= INP_ANONPORT
;
580 if (inp
->inp_flags
& INP_HIGHPORT
) {
581 first
= ipport_hifirstauto
; /* sysctl */
582 last
= ipport_hilastauto
;
583 lastport
= &pcbinfo
->lasthi
;
584 } else if (inp
->inp_flags
& INP_LOWPORT
) {
585 cred
= kauth_cred_proc_ref(p
);
586 error
= priv_check_cred(cred
, PRIV_NETINET_RESERVEDPORT
, 0);
587 kauth_cred_unref(&cred
);
589 lck_rw_done(pcbinfo
->mtx
);
593 first
= ipport_lowfirstauto
; /* 1023 */
594 last
= ipport_lowlastauto
; /* 600 */
595 lastport
= &pcbinfo
->lastlow
;
597 first
= ipport_firstauto
; /* sysctl */
598 last
= ipport_lastauto
;
599 lastport
= &pcbinfo
->lastport
;
601 /* No point in randomizing if only one port is available */
606 * Simple check to ensure all ports are not used up causing
609 * We split the two cases (up and down) so that the direction
610 * is not being tested on each round of the loop.
617 read_random(&rand_port
, sizeof(rand_port
));
618 *lastport
= first
- (rand_port
% (first
- last
));
620 count
= first
- last
;
623 if (count
-- < 0) { /* completely used? */
624 lck_rw_done(pcbinfo
->mtx
);
626 inp
->inp_laddr
.s_addr
= INADDR_ANY
;
627 inp
->inp_last_outifp
= NULL
;
628 return (EADDRNOTAVAIL
);
631 if (*lastport
> first
|| *lastport
< last
)
633 lport
= htons(*lastport
);
634 } while (in_pcblookup_local_and_cleanup(pcbinfo
,
635 inp
->inp_laddr
, lport
, wild
));
641 read_random(&rand_port
, sizeof(rand_port
));
642 *lastport
= first
+ (rand_port
% (first
- last
));
644 count
= last
- first
;
647 if (count
-- < 0) { /* completely used? */
648 lck_rw_done(pcbinfo
->mtx
);
650 inp
->inp_laddr
.s_addr
= INADDR_ANY
;
651 inp
->inp_last_outifp
= NULL
;
652 return (EADDRNOTAVAIL
);
655 if (*lastport
< first
|| *lastport
> last
)
657 lport
= htons(*lastport
);
658 } while (in_pcblookup_local_and_cleanup(pcbinfo
,
659 inp
->inp_laddr
, lport
, wild
));
663 inp
->inp_lport
= lport
;
664 if (in_pcbinshash(inp
, 1) != 0) {
665 inp
->inp_laddr
.s_addr
= INADDR_ANY
;
667 inp
->inp_last_outifp
= NULL
;
668 lck_rw_done(pcbinfo
->mtx
);
671 lck_rw_done(pcbinfo
->mtx
);
672 sflt_notify(so
, sock_evt_bound
, NULL
);
677 * Transform old in_pcbconnect() into an inner subroutine for new
678 * in_pcbconnect(): Do some validity-checking on the remote
679 * address (in mbuf 'nam') and then determine local host address
680 * (i.e., which interface) to use to access that remote host.
682 * This preserves definition of in_pcbconnect(), while supporting a
683 * slightly different version for T/TCP. (This is more than
684 * a bit of a kludge, but cleaning up the internal interfaces would
685 * have forced minor changes in every protocol).
688 * EINVAL Invalid argument
689 * EAFNOSUPPORT Address family not supported
690 * EADDRNOTAVAIL Address not available
693 in_pcbladdr(struct inpcb
*inp
, struct sockaddr
*nam
,
694 struct sockaddr_in
*plocal_sin
, struct ifnet
**outif
)
696 struct in_ifaddr
*ia
;
697 struct sockaddr_in
*sin
= (struct sockaddr_in
*)(void *)nam
;
699 if (nam
->sa_len
!= sizeof (*sin
))
701 if (sin
->sin_family
!= AF_INET
)
702 return (EAFNOSUPPORT
);
703 if (sin
->sin_port
== 0)
704 return (EADDRNOTAVAIL
);
706 lck_rw_lock_shared(in_ifaddr_rwlock
);
707 if (!TAILQ_EMPTY(&in_ifaddrhead
)) {
708 ia
= TAILQ_FIRST(&in_ifaddrhead
);
710 * If the destination address is INADDR_ANY,
711 * use the primary local address.
712 * If the supplied address is INADDR_BROADCAST,
713 * and the primary interface supports broadcast,
714 * choose the broadcast address for that interface.
716 IFA_LOCK_SPIN(&ia
->ia_ifa
);
717 if (sin
->sin_addr
.s_addr
== INADDR_ANY
)
718 sin
->sin_addr
= IA_SIN(ia
)->sin_addr
;
719 else if (sin
->sin_addr
.s_addr
== (u_int32_t
)INADDR_BROADCAST
&&
720 (ia
->ia_ifp
->if_flags
& IFF_BROADCAST
))
721 sin
->sin_addr
= satosin(&ia
->ia_broadaddr
)->sin_addr
;
722 IFA_UNLOCK(&ia
->ia_ifa
);
725 lck_rw_done(in_ifaddr_rwlock
);
727 if (inp
->inp_laddr
.s_addr
== INADDR_ANY
) {
729 unsigned int ifscope
= IFSCOPE_NONE
;
732 * If the socket is bound to a specifc interface, the
733 * optional scoped takes precedence over that if it
734 * is set by the caller.
736 ia
= (struct in_ifaddr
*)0;
738 if (outif
!= NULL
&& *outif
!= NULL
)
739 ifscope
= (*outif
)->if_index
;
740 else if (inp
->inp_flags
& INP_BOUND_IF
)
741 ifscope
= inp
->inp_boundifp
->if_index
;
743 nocell
= (inp
->inp_flags
& INP_NO_IFT_CELLULAR
) ? 1 : 0;
745 * If route is known or can be allocated now,
746 * our src addr is taken from the i/f, else punt.
747 * Note that we should check the address family of the cached
748 * destination, in case of sharing the cache with IPv6.
750 ro
= &inp
->inp_route
;
751 if (ro
->ro_rt
!= NULL
)
752 RT_LOCK_SPIN(ro
->ro_rt
);
753 if (ro
->ro_rt
&& (ro
->ro_dst
.sa_family
!= AF_INET
||
754 satosin(&ro
->ro_dst
)->sin_addr
.s_addr
!=
755 sin
->sin_addr
.s_addr
||
756 inp
->inp_socket
->so_options
& SO_DONTROUTE
||
757 ro
->ro_rt
->generation_id
!= route_generation
)) {
758 RT_UNLOCK(ro
->ro_rt
);
762 if ((inp
->inp_socket
->so_options
& SO_DONTROUTE
) == 0 && /*XXX*/
763 (ro
->ro_rt
== NULL
|| ro
->ro_rt
->rt_ifp
== NULL
)) {
764 if (ro
->ro_rt
!= NULL
)
765 RT_UNLOCK(ro
->ro_rt
);
766 /* No route yet, so try to acquire one */
767 bzero(&ro
->ro_dst
, sizeof(struct sockaddr_in
));
768 ro
->ro_dst
.sa_family
= AF_INET
;
769 ro
->ro_dst
.sa_len
= sizeof(struct sockaddr_in
);
770 ((struct sockaddr_in
*)(void *)&ro
->ro_dst
)->sin_addr
=
772 rtalloc_scoped(ro
, ifscope
);
773 if (ro
->ro_rt
!= NULL
)
774 RT_LOCK_SPIN(ro
->ro_rt
);
777 * If the route points to a cellular interface and the
778 * caller forbids our using interfaces of such type,
779 * pretend that there is no route.
781 if (nocell
&& ro
->ro_rt
!= NULL
) {
782 RT_LOCK_ASSERT_HELD(ro
->ro_rt
);
783 if (ro
->ro_rt
->rt_ifp
->if_type
== IFT_CELLULAR
) {
784 RT_UNLOCK(ro
->ro_rt
);
787 soevent(inp
->inp_socket
,
788 (SO_FILT_HINT_LOCKED
|
789 SO_FILT_HINT_IFDENIED
));
793 * If we found a route, use the address
794 * corresponding to the outgoing interface
795 * unless it is the loopback (in case a route
796 * to our address on another net goes to loopback).
798 if (ro
->ro_rt
!= NULL
) {
799 /* Become a regular mutex */
800 RT_CONVERT_LOCK(ro
->ro_rt
);
801 if (!(ro
->ro_rt
->rt_ifp
->if_flags
& IFF_LOOPBACK
)) {
802 ia
= ifatoia(ro
->ro_rt
->rt_ifa
);
804 IFA_ADDREF(&ia
->ia_ifa
);
807 RT_UNLOCK(ro
->ro_rt
);
810 u_short fport
= sin
->sin_port
;
813 ia
= ifatoia(ifa_ifwithdstaddr(sintosa(sin
)));
815 ia
= ifatoia(ifa_ifwithnet_scoped(sintosa(sin
),
818 sin
->sin_port
= fport
;
820 lck_rw_lock_shared(in_ifaddr_rwlock
);
821 ia
= TAILQ_FIRST(&in_ifaddrhead
);
823 IFA_ADDREF(&ia
->ia_ifa
);
824 lck_rw_done(in_ifaddr_rwlock
);
827 * If the source address belongs to a cellular interface
828 * and the socket forbids our using interfaces of such
829 * type, pretend that there is no source address.
831 if (nocell
&& ia
!= NULL
&&
832 ia
->ia_ifa
.ifa_ifp
->if_type
== IFT_CELLULAR
) {
833 IFA_REMREF(&ia
->ia_ifa
);
835 soevent(inp
->inp_socket
,
836 (SO_FILT_HINT_LOCKED
|
837 SO_FILT_HINT_IFDENIED
));
840 return (EADDRNOTAVAIL
);
843 * If the destination address is multicast and an outgoing
844 * interface has been set as a multicast option, use the
845 * address of that interface as our source address.
847 if (IN_MULTICAST(ntohl(sin
->sin_addr
.s_addr
)) &&
848 inp
->inp_moptions
!= NULL
) {
849 struct ip_moptions
*imo
;
852 imo
= inp
->inp_moptions
;
854 if (imo
->imo_multicast_ifp
!= NULL
&& (ia
== NULL
||
855 ia
->ia_ifp
!= imo
->imo_multicast_ifp
)) {
856 ifp
= imo
->imo_multicast_ifp
;
858 IFA_REMREF(&ia
->ia_ifa
);
859 lck_rw_lock_shared(in_ifaddr_rwlock
);
860 TAILQ_FOREACH(ia
, &in_ifaddrhead
, ia_link
) {
861 if (ia
->ia_ifp
== ifp
)
865 IFA_ADDREF(&ia
->ia_ifa
);
866 lck_rw_done(in_ifaddr_rwlock
);
869 return (EADDRNOTAVAIL
);
875 * Don't do pcblookup call here; return interface in plocal_sin
876 * and exit to caller, that will do the lookup.
878 IFA_LOCK_SPIN(&ia
->ia_ifa
);
879 *plocal_sin
= ia
->ia_addr
;
882 IFA_UNLOCK(&ia
->ia_ifa
);
883 IFA_REMREF(&ia
->ia_ifa
);
890 * Connect from a socket to a specified address.
891 * Both address and port must be specified in argument sin.
892 * If don't have a local address for this socket yet,
896 in_pcbconnect(struct inpcb
*inp
, struct sockaddr
*nam
, struct proc
*p
,
897 struct ifnet
**outif
)
899 struct sockaddr_in ifaddr
;
900 struct sockaddr_in
*sin
= (struct sockaddr_in
*)(void *)nam
;
905 * Call inner routine, to assign local interface address.
907 if ((error
= in_pcbladdr(inp
, nam
, &ifaddr
, outif
)) != 0)
910 socket_unlock(inp
->inp_socket
, 0);
911 pcb
= in_pcblookup_hash(inp
->inp_pcbinfo
, sin
->sin_addr
, sin
->sin_port
,
912 inp
->inp_laddr
.s_addr
? inp
->inp_laddr
: ifaddr
.sin_addr
,
913 inp
->inp_lport
, 0, NULL
);
914 socket_lock(inp
->inp_socket
, 0);
916 /* Check if the socket is still in a valid state. When we unlock this
917 * embryonic socket, it can get aborted if another thread is closing
918 * the listener (radar 7947600).
920 if ((inp
->inp_socket
->so_flags
& SOF_ABORTED
) != 0) {
925 in_pcb_checkstate(pcb
, WNT_RELEASE
, pcb
== inp
? 1 : 0);
928 if (inp
->inp_laddr
.s_addr
== INADDR_ANY
) {
929 if (inp
->inp_lport
== 0) {
930 error
= in_pcbbind(inp
, (struct sockaddr
*)0, p
);
934 if (!lck_rw_try_lock_exclusive(inp
->inp_pcbinfo
->mtx
)) {
935 /*lock inversion issue, mostly with udp multicast packets */
936 socket_unlock(inp
->inp_socket
, 0);
937 lck_rw_lock_exclusive(inp
->inp_pcbinfo
->mtx
);
938 socket_lock(inp
->inp_socket
, 0);
940 inp
->inp_laddr
= ifaddr
.sin_addr
;
941 inp
->inp_last_outifp
= (outif
!= NULL
) ? *outif
: NULL
;
942 inp
->inp_flags
|= INP_INADDR_ANY
;
945 if (!lck_rw_try_lock_exclusive(inp
->inp_pcbinfo
->mtx
)) {
946 /*lock inversion issue, mostly with udp multicast packets */
947 socket_unlock(inp
->inp_socket
, 0);
948 lck_rw_lock_exclusive(inp
->inp_pcbinfo
->mtx
);
949 socket_lock(inp
->inp_socket
, 0);
952 inp
->inp_faddr
= sin
->sin_addr
;
953 inp
->inp_fport
= sin
->sin_port
;
955 lck_rw_done(inp
->inp_pcbinfo
->mtx
);
960 in_pcbdisconnect(struct inpcb
*inp
)
963 inp
->inp_faddr
.s_addr
= INADDR_ANY
;
966 if (!lck_rw_try_lock_exclusive(inp
->inp_pcbinfo
->mtx
)) {
967 /*lock inversion issue, mostly with udp multicast packets */
968 socket_unlock(inp
->inp_socket
, 0);
969 lck_rw_lock_exclusive(inp
->inp_pcbinfo
->mtx
);
970 socket_lock(inp
->inp_socket
, 0);
974 lck_rw_done(inp
->inp_pcbinfo
->mtx
);
976 if (inp
->inp_socket
->so_state
& SS_NOFDREF
)
981 in_pcbdetach(struct inpcb
*inp
)
983 struct socket
*so
= inp
->inp_socket
;
985 if (so
->so_pcb
== 0) { /* we've been called twice */
986 panic("in_pcbdetach: inp=%p so=%p proto=%d so_pcb is null!\n",
987 inp
, so
, so
->so_proto
->pr_protocol
);
991 if (ipsec_bypass
== 0) {
992 ipsec4_delete_pcbpolicy(inp
);
996 /* mark socket state as dead */
997 if (in_pcb_checkstate(inp
, WNT_STOPUSING
, 1) != WNT_STOPUSING
)
998 panic("in_pcbdetach so=%p prot=%x couldn't set to STOPUSING\n", so
, so
->so_proto
->pr_protocol
);
1001 if (so
->cached_in_sock_layer
)
1002 printf("in_pcbdetach for cached socket %x flags=%x\n", so
, so
->so_flags
);
1004 printf("in_pcbdetach for allocated socket %x flags=%x\n", so
, so
->so_flags
);
1006 if ((so
->so_flags
& SOF_PCBCLEARING
) == 0) {
1008 struct ip_moptions
*imo
;
1011 if (inp
->inp_options
)
1012 (void)m_free(inp
->inp_options
);
1013 if ((rt
= inp
->inp_route
.ro_rt
) != NULL
) {
1014 inp
->inp_route
.ro_rt
= NULL
;
1017 imo
= inp
->inp_moptions
;
1018 inp
->inp_moptions
= NULL
;
1021 sofreelastref(so
, 0);
1022 inp
->inp_state
= INPCB_STATE_DEAD
;
1023 so
->so_flags
|= SOF_PCBCLEARING
; /* makes sure we're not called twice from so_close */
1029 in_pcbdispose(struct inpcb
*inp
)
1031 struct socket
*so
= inp
->inp_socket
;
1032 struct inpcbinfo
*ipi
= inp
->inp_pcbinfo
;
1035 if (inp
->inp_state
!= INPCB_STATE_DEAD
) {
1036 printf("in_pcbdispose: not dead yet? so=%p\n", so
);
1039 if (so
&& so
->so_usecount
!= 0)
1040 panic("%s: so %p so_usecount %d so_lockhistory %s\n",
1041 __func__
, so
, so
->so_usecount
,
1042 (so
!= NULL
) ? solockhistory_nr(so
) : "--");
1044 lck_rw_assert(ipi
->mtx
, LCK_RW_ASSERT_EXCLUSIVE
);
1046 inp
->inp_gencnt
= ++ipi
->ipi_gencnt
;
1047 /* access ipi in in_pcbremlists */
1048 in_pcbremlists(inp
);
1051 if (so
->so_proto
->pr_flags
& PR_PCBLOCK
) {
1052 sofreelastref(so
, 0);
1053 if (so
->so_rcv
.sb_cc
|| so
->so_snd
.sb_cc
) {
1055 printf("in_pcbdispose sb not cleaned up so=%p rc_cci=%x snd_cc=%x\n",
1056 so
, so
->so_rcv
.sb_cc
, so
->so_snd
.sb_cc
);
1058 sbrelease(&so
->so_rcv
);
1059 sbrelease(&so
->so_snd
);
1061 if (so
->so_head
!= NULL
)
1062 panic("in_pcbdispose, so=%p head still exist\n", so
);
1063 lck_mtx_unlock(&inp
->inpcb_mtx
);
1064 lck_mtx_destroy(&inp
->inpcb_mtx
, ipi
->mtx_grp
);
1066 so
->so_flags
|= SOF_PCBCLEARING
; /* makes sure we're not called twice from so_close */
1067 so
->so_saved_pcb
= (caddr_t
) inp
;
1069 inp
->inp_socket
= 0;
1071 mac_inpcb_label_destroy(inp
);
1074 * In case there a route cached after a detach (possible
1075 * in the tcp case), make sure that it is freed before
1076 * we deallocate the structure.
1078 if (inp
->inp_route
.ro_rt
!= NULL
) {
1079 rtfree(inp
->inp_route
.ro_rt
);
1080 inp
->inp_route
.ro_rt
= NULL
;
1082 if (so
->cached_in_sock_layer
== 0) {
1083 zfree(ipi
->ipi_zone
, inp
);
1089 printf("in_pcbdispose: no socket for inp=%p\n", inp
);
1094 * The calling convention of in_setsockaddr() and in_setpeeraddr() was
1095 * modified to match the pru_sockaddr() and pru_peeraddr() entry points
1096 * in struct pr_usrreqs, so that protocols can just reference then directly
1097 * without the need for a wrapper function. The socket must have a valid
1098 * (i.e., non-nil) PCB, but it should be impossible to get an invalid one
1099 * except through a kernel programming error, so it is acceptable to panic
1100 * (or in this case trap) if the PCB is invalid. (Actually, we don't trap
1101 * because there actually /is/ a programming error somewhere... XXX)
1103 * Returns: 0 Success
1104 * ENOBUFS No buffer space available
1105 * ECONNRESET Connection reset
1108 in_setsockaddr(struct socket
*so
, struct sockaddr
**nam
)
1111 struct sockaddr_in
*sin
;
1114 * Do the malloc first in case it blocks.
1116 MALLOC(sin
, struct sockaddr_in
*, sizeof *sin
, M_SONAME
, M_WAITOK
);
1119 bzero(sin
, sizeof *sin
);
1120 sin
->sin_family
= AF_INET
;
1121 sin
->sin_len
= sizeof(*sin
);
1123 inp
= sotoinpcb(so
);
1125 FREE(sin
, M_SONAME
);
1128 sin
->sin_port
= inp
->inp_lport
;
1129 sin
->sin_addr
= inp
->inp_laddr
;
1131 *nam
= (struct sockaddr
*)sin
;
1136 in_setpeeraddr(struct socket
*so
, struct sockaddr
**nam
)
1139 struct sockaddr_in
*sin
;
1142 * Do the malloc first in case it blocks.
1144 MALLOC(sin
, struct sockaddr_in
*, sizeof *sin
, M_SONAME
, M_WAITOK
);
1147 bzero((caddr_t
)sin
, sizeof (*sin
));
1148 sin
->sin_family
= AF_INET
;
1149 sin
->sin_len
= sizeof(*sin
);
1151 inp
= sotoinpcb(so
);
1153 FREE(sin
, M_SONAME
);
1156 sin
->sin_port
= inp
->inp_fport
;
1157 sin
->sin_addr
= inp
->inp_faddr
;
1159 *nam
= (struct sockaddr
*)sin
;
1164 in_pcbnotifyall(struct inpcbinfo
*pcbinfo
, struct in_addr faddr
,
1165 int errno
, void (*notify
)(struct inpcb
*, int))
1169 lck_rw_lock_shared(pcbinfo
->mtx
);
1171 LIST_FOREACH(inp
, pcbinfo
->listhead
, inp_list
) {
1173 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
1176 if (inp
->inp_faddr
.s_addr
!= faddr
.s_addr
||
1177 inp
->inp_socket
== NULL
)
1179 if (in_pcb_checkstate(inp
, WNT_ACQUIRE
, 0) == WNT_STOPUSING
)
1181 socket_lock(inp
->inp_socket
, 1);
1182 (*notify
)(inp
, errno
);
1183 (void)in_pcb_checkstate(inp
, WNT_RELEASE
, 1);
1184 socket_unlock(inp
->inp_socket
, 1);
1186 lck_rw_done(pcbinfo
->mtx
);
1190 * Check for alternatives when higher level complains
1191 * about service problems. For now, invalidate cached
1192 * routing information. If the route was created dynamically
1193 * (by a redirect), time to try a default gateway again.
1196 in_losing(struct inpcb
*inp
)
1199 struct rt_addrinfo info
;
1201 if ((rt
= inp
->inp_route
.ro_rt
) != NULL
) {
1202 struct in_ifaddr
*ia
;
1204 bzero((caddr_t
)&info
, sizeof(info
));
1206 info
.rti_info
[RTAX_DST
] =
1207 (struct sockaddr
*)&inp
->inp_route
.ro_dst
;
1208 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
1209 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
1210 rt_missmsg(RTM_LOSING
, &info
, rt
->rt_flags
, 0);
1211 if (rt
->rt_flags
& RTF_DYNAMIC
) {
1213 * Prevent another thread from modifying rt_key,
1214 * rt_gateway via rt_setgate() after rt_lock is
1215 * dropped by marking the route as defunct.
1217 rt
->rt_flags
|= RTF_CONDEMNED
;
1219 (void) rtrequest(RTM_DELETE
, rt_key(rt
),
1220 rt
->rt_gateway
, rt_mask(rt
), rt
->rt_flags
,
1221 (struct rtentry
**)0);
1225 /* if the address is gone keep the old route in the pcb */
1226 if ((ia
= ifa_foraddr(inp
->inp_laddr
.s_addr
)) != NULL
) {
1227 inp
->inp_route
.ro_rt
= NULL
;
1229 IFA_REMREF(&ia
->ia_ifa
);
1232 * A new route can be allocated
1233 * the next time output is attempted.
1239 * After a routing change, flush old routing
1240 * and allocate a (hopefully) better one.
1243 in_rtchange(struct inpcb
*inp
, __unused
int errno
)
1247 if ((rt
= inp
->inp_route
.ro_rt
) != NULL
) {
1248 struct in_ifaddr
*ia
;
1250 if ((ia
= ifa_foraddr(inp
->inp_laddr
.s_addr
)) == NULL
) {
1251 return; /* we can't remove the route now. not sure if still ok to use src */
1253 IFA_REMREF(&ia
->ia_ifa
);
1255 inp
->inp_route
.ro_rt
= NULL
;
1257 * A new route can be allocated the next time
1258 * output is attempted.
1264 * Lookup a PCB based on the local address and port.
1267 in_pcblookup_local(struct inpcbinfo
*pcbinfo
, struct in_addr laddr
,
1268 unsigned int lport_arg
, int wild_okay
)
1271 int matchwild
= 3, wildcard
;
1272 u_short lport
= lport_arg
;
1274 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP
| DBG_FUNC_START
, 0,0,0,0,0);
1277 struct inpcbhead
*head
;
1279 * Look for an unconnected (wildcard foreign addr) PCB that
1280 * matches the local address and port we're looking for.
1282 head
= &pcbinfo
->hashbase
[INP_PCBHASH(INADDR_ANY
, lport
, 0, pcbinfo
->hashmask
)];
1283 LIST_FOREACH(inp
, head
, inp_hash
) {
1285 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
1288 if (inp
->inp_faddr
.s_addr
== INADDR_ANY
&&
1289 inp
->inp_laddr
.s_addr
== laddr
.s_addr
&&
1290 inp
->inp_lport
== lport
) {
1300 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP
| DBG_FUNC_END
, 0,0,0,0,0);
1303 struct inpcbporthead
*porthash
;
1304 struct inpcbport
*phd
;
1305 struct inpcb
*match
= NULL
;
1307 * Best fit PCB lookup.
1309 * First see if this local port is in use by looking on the
1312 porthash
= &pcbinfo
->porthashbase
[INP_PCBPORTHASH(lport
,
1313 pcbinfo
->porthashmask
)];
1314 LIST_FOREACH(phd
, porthash
, phd_hash
) {
1315 if (phd
->phd_port
== lport
)
1320 * Port is in use by one or more PCBs. Look for best
1323 LIST_FOREACH(inp
, &phd
->phd_pcblist
, inp_portlist
) {
1326 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
1329 if (inp
->inp_faddr
.s_addr
!= INADDR_ANY
)
1331 if (inp
->inp_laddr
.s_addr
!= INADDR_ANY
) {
1332 if (laddr
.s_addr
== INADDR_ANY
)
1334 else if (inp
->inp_laddr
.s_addr
!= laddr
.s_addr
)
1337 if (laddr
.s_addr
!= INADDR_ANY
)
1340 if (wildcard
< matchwild
) {
1342 matchwild
= wildcard
;
1343 if (matchwild
== 0) {
1349 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP
| DBG_FUNC_END
, match
,0,0,0,0);
1355 * Check if PCB exists in hash list.
1358 in_pcblookup_hash_exists(
1359 struct inpcbinfo
*pcbinfo
,
1360 struct in_addr faddr
,
1362 struct in_addr laddr
,
1369 struct inpcbhead
*head
;
1371 u_short fport
= fport_arg
, lport
= lport_arg
;
1378 * We may have found the pcb in the last lookup - check this first.
1381 lck_rw_lock_shared(pcbinfo
->mtx
);
1384 * First look for an exact match.
1386 head
= &pcbinfo
->hashbase
[INP_PCBHASH(faddr
.s_addr
, lport
, fport
,
1387 pcbinfo
->hashmask
)];
1388 LIST_FOREACH(inp
, head
, inp_hash
) {
1390 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
1393 if (ip_restrictrecvif
&& ifp
!= NULL
&&
1394 (ifp
->if_eflags
& IFEF_RESTRICTED_RECV
) &&
1395 !(inp
->inp_flags
& INP_RECV_ANYIF
))
1398 if (inp
->inp_faddr
.s_addr
== faddr
.s_addr
&&
1399 inp
->inp_laddr
.s_addr
== laddr
.s_addr
&&
1400 inp
->inp_fport
== fport
&&
1401 inp
->inp_lport
== lport
) {
1402 if ((found
= (inp
->inp_socket
!= NULL
))) {
1406 *uid
= kauth_cred_getuid(
1407 inp
->inp_socket
->so_cred
);
1408 *gid
= kauth_cred_getgid(
1409 inp
->inp_socket
->so_cred
);
1411 lck_rw_done(pcbinfo
->mtx
);
1416 struct inpcb
*local_wild
= NULL
;
1418 struct inpcb
*local_wild_mapped
= NULL
;
1421 head
= &pcbinfo
->hashbase
[INP_PCBHASH(INADDR_ANY
, lport
, 0,
1422 pcbinfo
->hashmask
)];
1423 LIST_FOREACH(inp
, head
, inp_hash
) {
1425 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
1428 if (ip_restrictrecvif
&& ifp
!= NULL
&&
1429 (ifp
->if_eflags
& IFEF_RESTRICTED_RECV
) &&
1430 !(inp
->inp_flags
& INP_RECV_ANYIF
))
1433 if (inp
->inp_faddr
.s_addr
== INADDR_ANY
&&
1434 inp
->inp_lport
== lport
) {
1435 if (inp
->inp_laddr
.s_addr
== laddr
.s_addr
) {
1436 if ((found
= (inp
->inp_socket
!= NULL
))) {
1437 *uid
= kauth_cred_getuid(
1438 inp
->inp_socket
->so_cred
);
1439 *gid
= kauth_cred_getgid(
1440 inp
->inp_socket
->so_cred
);
1442 lck_rw_done(pcbinfo
->mtx
);
1445 else if (inp
->inp_laddr
.s_addr
== INADDR_ANY
) {
1447 if (inp
->inp_socket
&&
1448 INP_CHECK_SOCKAF(inp
->inp_socket
,
1450 local_wild_mapped
= inp
;
1457 if (local_wild
== NULL
) {
1459 if (local_wild_mapped
!= NULL
) {
1460 if ((found
= (local_wild_mapped
->inp_socket
!= NULL
))) {
1461 *uid
= kauth_cred_getuid(
1462 local_wild_mapped
->inp_socket
->so_cred
);
1463 *gid
= kauth_cred_getgid(
1464 local_wild_mapped
->inp_socket
->so_cred
);
1466 lck_rw_done(pcbinfo
->mtx
);
1470 lck_rw_done(pcbinfo
->mtx
);
1473 if (local_wild
!= NULL
) {
1474 if ((found
= (local_wild
->inp_socket
!= NULL
))) {
1475 *uid
= kauth_cred_getuid(
1476 local_wild
->inp_socket
->so_cred
);
1477 *gid
= kauth_cred_getgid(
1478 local_wild
->inp_socket
->so_cred
);
1480 lck_rw_done(pcbinfo
->mtx
);
1488 lck_rw_done(pcbinfo
->mtx
);
1493 * Lookup PCB in hash list.
1497 struct inpcbinfo
*pcbinfo
,
1498 struct in_addr faddr
,
1500 struct in_addr laddr
,
1505 struct inpcbhead
*head
;
1507 u_short fport
= fport_arg
, lport
= lport_arg
;
1510 * We may have found the pcb in the last lookup - check this first.
1513 lck_rw_lock_shared(pcbinfo
->mtx
);
1516 * First look for an exact match.
1518 head
= &pcbinfo
->hashbase
[INP_PCBHASH(faddr
.s_addr
, lport
, fport
, pcbinfo
->hashmask
)];
1519 LIST_FOREACH(inp
, head
, inp_hash
) {
1521 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
1524 if (ip_restrictrecvif
&& ifp
!= NULL
&&
1525 (ifp
->if_eflags
& IFEF_RESTRICTED_RECV
) &&
1526 !(inp
->inp_flags
& INP_RECV_ANYIF
))
1529 if (inp
->inp_faddr
.s_addr
== faddr
.s_addr
&&
1530 inp
->inp_laddr
.s_addr
== laddr
.s_addr
&&
1531 inp
->inp_fport
== fport
&&
1532 inp
->inp_lport
== lport
) {
1536 if (in_pcb_checkstate(inp
, WNT_ACQUIRE
, 0) != WNT_STOPUSING
) {
1537 lck_rw_done(pcbinfo
->mtx
);
1540 else { /* it's there but dead, say it isn't found */
1541 lck_rw_done(pcbinfo
->mtx
);
1547 struct inpcb
*local_wild
= NULL
;
1549 struct inpcb
*local_wild_mapped
= NULL
;
1552 head
= &pcbinfo
->hashbase
[INP_PCBHASH(INADDR_ANY
, lport
, 0, pcbinfo
->hashmask
)];
1553 LIST_FOREACH(inp
, head
, inp_hash
) {
1555 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
1558 if (ip_restrictrecvif
&& ifp
!= NULL
&&
1559 (ifp
->if_eflags
& IFEF_RESTRICTED_RECV
) &&
1560 !(inp
->inp_flags
& INP_RECV_ANYIF
))
1563 if (inp
->inp_faddr
.s_addr
== INADDR_ANY
&&
1564 inp
->inp_lport
== lport
) {
1565 if (inp
->inp_laddr
.s_addr
== laddr
.s_addr
) {
1566 if (in_pcb_checkstate(inp
, WNT_ACQUIRE
, 0) != WNT_STOPUSING
) {
1567 lck_rw_done(pcbinfo
->mtx
);
1570 else { /* it's there but dead, say it isn't found */
1571 lck_rw_done(pcbinfo
->mtx
);
1575 else if (inp
->inp_laddr
.s_addr
== INADDR_ANY
) {
1577 if (INP_CHECK_SOCKAF(inp
->inp_socket
,
1579 local_wild_mapped
= inp
;
1586 if (local_wild
== NULL
) {
1588 if (local_wild_mapped
!= NULL
) {
1589 if (in_pcb_checkstate(local_wild_mapped
, WNT_ACQUIRE
, 0) != WNT_STOPUSING
) {
1590 lck_rw_done(pcbinfo
->mtx
);
1591 return (local_wild_mapped
);
1593 else { /* it's there but dead, say it isn't found */
1594 lck_rw_done(pcbinfo
->mtx
);
1599 lck_rw_done(pcbinfo
->mtx
);
1602 if (in_pcb_checkstate(local_wild
, WNT_ACQUIRE
, 0) != WNT_STOPUSING
) {
1603 lck_rw_done(pcbinfo
->mtx
);
1604 return (local_wild
);
1606 else { /* it's there but dead, say it isn't found */
1607 lck_rw_done(pcbinfo
->mtx
);
1615 lck_rw_done(pcbinfo
->mtx
);
1620 * Insert PCB onto various hash lists.
1623 in_pcbinshash(struct inpcb
*inp
, int locked
)
1625 struct inpcbhead
*pcbhash
;
1626 struct inpcbporthead
*pcbporthash
;
1627 struct inpcbinfo
*pcbinfo
= inp
->inp_pcbinfo
;
1628 struct inpcbport
*phd
;
1629 u_int32_t hashkey_faddr
;
1632 if (!lck_rw_try_lock_exclusive(pcbinfo
->mtx
)) {
1633 /*lock inversion issue, mostly with udp multicast packets */
1634 socket_unlock(inp
->inp_socket
, 0);
1635 lck_rw_lock_exclusive(pcbinfo
->mtx
);
1636 socket_lock(inp
->inp_socket
, 0);
1637 if (inp
->inp_state
== INPCB_STATE_DEAD
) {
1638 /* The socket got dropped when it was unlocked */
1639 lck_rw_done(pcbinfo
->mtx
);
1640 return(ECONNABORTED
);
1646 if (inp
->inp_vflag
& INP_IPV6
)
1647 hashkey_faddr
= inp
->in6p_faddr
.s6_addr32
[3] /* XXX */;
1650 hashkey_faddr
= inp
->inp_faddr
.s_addr
;
1652 inp
->hash_element
= INP_PCBHASH(hashkey_faddr
, inp
->inp_lport
, inp
->inp_fport
, pcbinfo
->hashmask
);
1654 pcbhash
= &pcbinfo
->hashbase
[inp
->hash_element
];
1656 pcbporthash
= &pcbinfo
->porthashbase
[INP_PCBPORTHASH(inp
->inp_lport
,
1657 pcbinfo
->porthashmask
)];
1660 * Go through port list and look for a head for this lport.
1662 LIST_FOREACH(phd
, pcbporthash
, phd_hash
) {
1663 if (phd
->phd_port
== inp
->inp_lport
)
1667 VERIFY(inp
->inp_state
!= INPCB_STATE_DEAD
);
1670 * If none exists, malloc one and tack it on.
1673 MALLOC(phd
, struct inpcbport
*, sizeof(struct inpcbport
), M_PCB
, M_WAITOK
);
1676 lck_rw_done(pcbinfo
->mtx
);
1677 return (ENOBUFS
); /* XXX */
1679 phd
->phd_port
= inp
->inp_lport
;
1680 LIST_INIT(&phd
->phd_pcblist
);
1681 LIST_INSERT_HEAD(pcbporthash
, phd
, phd_hash
);
1684 LIST_INSERT_HEAD(&phd
->phd_pcblist
, inp
, inp_portlist
);
1685 LIST_INSERT_HEAD(pcbhash
, inp
, inp_hash
);
1687 lck_rw_done(pcbinfo
->mtx
);
1692 * Move PCB to the proper hash bucket when { faddr, fport } have been
1693 * changed. NOTE: This does not handle the case of the lport changing (the
1694 * hashed port list would have to be updated as well), so the lport must
1695 * not change after in_pcbinshash() has been called.
1698 in_pcbrehash(struct inpcb
*inp
)
1700 struct inpcbhead
*head
;
1701 u_int32_t hashkey_faddr
;
1704 if (inp
->inp_vflag
& INP_IPV6
)
1705 hashkey_faddr
= inp
->in6p_faddr
.s6_addr32
[3] /* XXX */;
1708 hashkey_faddr
= inp
->inp_faddr
.s_addr
;
1709 inp
->hash_element
= INP_PCBHASH(hashkey_faddr
, inp
->inp_lport
,
1710 inp
->inp_fport
, inp
->inp_pcbinfo
->hashmask
);
1711 head
= &inp
->inp_pcbinfo
->hashbase
[inp
->hash_element
];
1713 LIST_REMOVE(inp
, inp_hash
);
1714 LIST_INSERT_HEAD(head
, inp
, inp_hash
);
1718 * Remove PCB from various lists.
1719 * Must be called pcbinfo lock is held in exclusive mode.
1722 in_pcbremlists(struct inpcb
*inp
)
1724 struct inp_fc_entry
*infce
;
1725 inp
->inp_gencnt
= ++inp
->inp_pcbinfo
->ipi_gencnt
;
1727 if (inp
->inp_lport
) {
1728 struct inpcbport
*phd
= inp
->inp_phd
;
1730 LIST_REMOVE(inp
, inp_hash
);
1731 LIST_REMOVE(inp
, inp_portlist
);
1732 if (phd
!= NULL
&& (LIST_FIRST(&phd
->phd_pcblist
) == NULL
)) {
1733 LIST_REMOVE(phd
, phd_hash
);
1737 LIST_REMOVE(inp
, inp_list
);
1739 infce
= inp_fc_getinp(inp
->inp_flowhash
);
1741 inp_fc_entry_free(infce
);
1743 inp
->inp_pcbinfo
->ipi_count
--;
1746 /* Mechanism used to defer the memory release of PCBs
1747 * The pcb list will contain the pcb until the ripper can clean it up if
1748 * the following conditions are met: 1) state "DEAD", 2) wantcnt is STOPUSING
1749 * 3) usecount is null
1750 * This function will be called to either mark the pcb as
1753 in_pcb_checkstate(struct inpcb
*pcb
, int mode
, int locked
)
1756 volatile UInt32
*wantcnt
= (volatile UInt32
*)&pcb
->inp_wantcnt
;
1762 case WNT_STOPUSING
: /* try to mark the pcb as ready for recycling */
1764 /* compareswap with STOPUSING, if success we're good, if it's in use, will be marked later */
1767 socket_lock(pcb
->inp_socket
, 1);
1768 pcb
->inp_state
= INPCB_STATE_DEAD
;
1771 if (pcb
->inp_socket
->so_usecount
< 0)
1772 panic("in_pcb_checkstate STOP pcb=%p so=%p usecount is negative\n", pcb
, pcb
->inp_socket
);
1774 socket_unlock(pcb
->inp_socket
, 1);
1776 origwant
= *wantcnt
;
1777 if ((UInt16
) origwant
== 0xffff ) /* should stop using */
1778 return (WNT_STOPUSING
);
1780 if ((UInt16
) origwant
== 0) {/* try to mark it as unsuable now */
1781 OSCompareAndSwap(origwant
, newwant
, wantcnt
) ;
1783 return (WNT_STOPUSING
);
1786 case WNT_ACQUIRE
: /* try to increase reference to pcb */
1787 /* if WNT_STOPUSING should bail out */
1789 * if socket state DEAD, try to set count to STOPUSING, return failed
1790 * otherwise increase cnt
1793 origwant
= *wantcnt
;
1794 if ((UInt16
) origwant
== 0xffff ) {/* should stop using */
1795 // printf("in_pcb_checkstate: ACQ PCB was STOPUSING while release. odd pcb=%p\n", pcb);
1796 return (WNT_STOPUSING
);
1798 newwant
= origwant
+ 1;
1799 } while (!OSCompareAndSwap(origwant
, newwant
, wantcnt
));
1800 return (WNT_ACQUIRE
);
1803 case WNT_RELEASE
: /* release reference. if result is null and pcb state is DEAD,
1804 set wanted bit to STOPUSING
1808 socket_lock(pcb
->inp_socket
, 1);
1811 origwant
= *wantcnt
;
1812 if ((UInt16
) origwant
== 0x0 )
1813 panic("in_pcb_checkstate pcb=%p release with zero count", pcb
);
1814 if ((UInt16
) origwant
== 0xffff ) {/* should stop using */
1816 printf("in_pcb_checkstate: REL PCB was STOPUSING while release. odd pcb=%p\n", pcb
);
1819 socket_unlock(pcb
->inp_socket
, 1);
1820 return (WNT_STOPUSING
);
1822 newwant
= origwant
- 1;
1823 } while (!OSCompareAndSwap(origwant
, newwant
, wantcnt
));
1825 if (pcb
->inp_state
== INPCB_STATE_DEAD
)
1827 if (pcb
->inp_socket
->so_usecount
< 0)
1828 panic("in_pcb_checkstate RELEASE pcb=%p so=%p usecount is negative\n", pcb
, pcb
->inp_socket
);
1831 socket_unlock(pcb
->inp_socket
, 1);
1832 return (WNT_RELEASE
);
1837 panic("in_pcb_checkstate: so=%p not a valid state =%x\n", pcb
->inp_socket
, mode
);
1845 * inpcb_to_compat copies specific bits of an inpcb to a inpcb_compat.
1846 * The inpcb_compat data structure is passed to user space and must
1847 * not change. We intentionally avoid copying pointers.
1852 struct inpcb_compat
*inp_compat
)
1854 bzero(inp_compat
, sizeof(*inp_compat
));
1855 inp_compat
->inp_fport
= inp
->inp_fport
;
1856 inp_compat
->inp_lport
= inp
->inp_lport
;
1857 inp_compat
->nat_owner
= 0;
1858 inp_compat
->nat_cookie
= inp
->nat_cookie
;
1859 inp_compat
->inp_gencnt
= inp
->inp_gencnt
;
1860 inp_compat
->inp_flags
= inp
->inp_flags
;
1861 inp_compat
->inp_flow
= inp
->inp_flow
;
1862 inp_compat
->inp_vflag
= inp
->inp_vflag
;
1863 inp_compat
->inp_ip_ttl
= inp
->inp_ip_ttl
;
1864 inp_compat
->inp_ip_p
= inp
->inp_ip_p
;
1865 inp_compat
->inp_dependfaddr
.inp6_foreign
= inp
->inp_dependfaddr
.inp6_foreign
;
1866 inp_compat
->inp_dependladdr
.inp6_local
= inp
->inp_dependladdr
.inp6_local
;
1867 inp_compat
->inp_depend4
.inp4_ip_tos
= inp
->inp_depend4
.inp4_ip_tos
;
1868 inp_compat
->inp_depend6
.inp6_hlim
= inp
->inp_depend6
.inp6_hlim
;
1869 inp_compat
->inp_depend6
.inp6_cksum
= inp
->inp_depend6
.inp6_cksum
;
1870 inp_compat
->inp_depend6
.inp6_ifindex
= inp
->inp_depend6
.inp6_ifindex
;
1871 inp_compat
->inp_depend6
.inp6_hops
= inp
->inp_depend6
.inp6_hops
;
1874 #if !CONFIG_EMBEDDED
1879 struct xinpcb64
*xinp
)
1881 xinp
->inp_fport
= inp
->inp_fport
;
1882 xinp
->inp_lport
= inp
->inp_lport
;
1883 xinp
->inp_gencnt
= inp
->inp_gencnt
;
1884 xinp
->inp_flags
= inp
->inp_flags
;
1885 xinp
->inp_flow
= inp
->inp_flow
;
1886 xinp
->inp_vflag
= inp
->inp_vflag
;
1887 xinp
->inp_ip_ttl
= inp
->inp_ip_ttl
;
1888 xinp
->inp_ip_p
= inp
->inp_ip_p
;
1889 xinp
->inp_dependfaddr
.inp6_foreign
= inp
->inp_dependfaddr
.inp6_foreign
;
1890 xinp
->inp_dependladdr
.inp6_local
= inp
->inp_dependladdr
.inp6_local
;
1891 xinp
->inp_depend4
.inp4_ip_tos
= inp
->inp_depend4
.inp4_ip_tos
;
1892 xinp
->inp_depend6
.inp6_hlim
= inp
->inp_depend6
.inp6_hlim
;
1893 xinp
->inp_depend6
.inp6_cksum
= inp
->inp_depend6
.inp6_cksum
;
1894 xinp
->inp_depend6
.inp6_ifindex
= inp
->inp_depend6
.inp6_ifindex
;
1895 xinp
->inp_depend6
.inp6_hops
= inp
->inp_depend6
.inp6_hops
;
1898 #endif /* !CONFIG_EMBEDDED */
1902 * The following routines implement this scheme:
1904 * Callers of ip_output() that intend to cache the route in the inpcb pass
1905 * a local copy of the struct route to ip_output(). Using a local copy of
1906 * the cached route significantly simplifies things as IP no longer has to
1907 * worry about having exclusive access to the passed in struct route, since
1908 * it's defined in the caller's stack; in essence, this allows for a lock-
1909 * less operation when updating the struct route at the IP level and below,
1910 * whenever necessary. The scheme works as follows:
1912 * Prior to dropping the socket's lock and calling ip_output(), the caller
1913 * copies the struct route from the inpcb into its stack, and adds a reference
1914 * to the cached route entry, if there was any. The socket's lock is then
1915 * dropped and ip_output() is called with a pointer to the copy of struct
1916 * route defined on the stack (not to the one in the inpcb.)
1918 * Upon returning from ip_output(), the caller then acquires the socket's
1919 * lock and synchronizes the cache; if there is no route cached in the inpcb,
1920 * it copies the local copy of struct route (which may or may not contain any
1921 * route) back into the cache; otherwise, if the inpcb has a route cached in
1922 * it, the one in the local copy will be freed, if there's any. Trashing the
1923 * cached route in the inpcb can be avoided because ip_output() is single-
1924 * threaded per-PCB (i.e. multiple transmits on a PCB are always serialized
1925 * by the socket/transport layer.)
1928 inp_route_copyout(struct inpcb
*inp
, struct route
*dst
)
1930 struct route
*src
= &inp
->inp_route
;
1932 lck_mtx_assert(&inp
->inpcb_mtx
, LCK_MTX_ASSERT_OWNED
);
1935 * If the route in the PCB is not for IPv4, blow it away;
1936 * this is possible in the case of IPv4-mapped address case.
1938 if (src
->ro_rt
!= NULL
&& rt_key(src
->ro_rt
)->sa_family
!= AF_INET
) {
1943 route_copyout(dst
, src
, sizeof(*dst
));
1947 inp_route_copyin(struct inpcb
*inp
, struct route
*src
)
1949 struct route
*dst
= &inp
->inp_route
;
1951 lck_mtx_assert(&inp
->inpcb_mtx
, LCK_MTX_ASSERT_OWNED
);
1953 /* Minor sanity check */
1954 if (src
->ro_rt
!= NULL
&& rt_key(src
->ro_rt
)->sa_family
!= AF_INET
)
1955 panic("%s: wrong or corrupted route: %p", __func__
, src
);
1957 route_copyin(src
, dst
, sizeof(*src
));
1961 * Handler for setting IP_FORCE_OUT_IFP/IP_BOUND_IF/IPV6_BOUND_IF socket option.
1964 inp_bindif(struct inpcb
*inp
, unsigned int ifscope
)
1966 struct ifnet
*ifp
= NULL
;
1968 ifnet_head_lock_shared();
1969 if ((ifscope
> (unsigned)if_index
) || (ifscope
!= IFSCOPE_NONE
&&
1970 (ifp
= ifindex2ifnet
[ifscope
]) == NULL
)) {
1976 VERIFY(ifp
!= NULL
|| ifscope
== IFSCOPE_NONE
);
1979 * A zero interface scope value indicates an "unbind".
1980 * Otherwise, take in whatever value the app desires;
1981 * the app may already know the scope (or force itself
1982 * to such a scope) ahead of time before the interface
1983 * gets attached. It doesn't matter either way; any
1984 * route lookup from this point on will require an
1985 * exact match for the embedded interface scope.
1987 inp
->inp_boundifp
= ifp
;
1988 if (inp
->inp_boundifp
== NULL
)
1989 inp
->inp_flags
&= ~INP_BOUND_IF
;
1991 inp
->inp_flags
|= INP_BOUND_IF
;
1993 /* Blow away any cached route in the PCB */
1994 if (inp
->inp_route
.ro_rt
!= NULL
) {
1995 rtfree(inp
->inp_route
.ro_rt
);
1996 inp
->inp_route
.ro_rt
= NULL
;
2003 * Handler for setting IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option.
2006 inp_nocellular(struct inpcb
*inp
, unsigned int val
)
2009 inp
->inp_flags
|= INP_NO_IFT_CELLULAR
;
2010 } else if (inp
->inp_flags
& INP_NO_IFT_CELLULAR
) {
2011 /* once set, it cannot be unset */
2015 /* Blow away any cached route in the PCB */
2016 if (inp
->inp_route
.ro_rt
!= NULL
) {
2017 rtfree(inp
->inp_route
.ro_rt
);
2018 inp
->inp_route
.ro_rt
= NULL
;
2025 * Calculate flow hash for an inp, used by an interface to identify a
2026 * flow. When an interface provides flow control advisory, this flow
2027 * hash is used as an identifier.
2030 inp_calc_flowhash(struct inpcb
*inp
)
2032 struct inp_flowhash_key fh
__attribute__((aligned(8)));
2033 u_int32_t flowhash
= 0;
2035 if (inp_hash_seed
== 0)
2036 inp_hash_seed
= RandomULong();
2038 bzero(&fh
, sizeof (fh
));
2040 bcopy(&inp
->inp_dependladdr
, &fh
.infh_laddr
, sizeof (fh
.infh_laddr
));
2041 bcopy(&inp
->inp_dependfaddr
, &fh
.infh_faddr
, sizeof (fh
.infh_faddr
));
2043 fh
.infh_lport
= inp
->inp_lport
;
2044 fh
.infh_fport
= inp
->inp_fport
;
2045 fh
.infh_af
= (inp
->inp_vflag
& INP_IPV6
) ? AF_INET6
: AF_INET
;
2046 fh
.infh_proto
= inp
->inp_ip_p
;
2047 fh
.infh_rand1
= RandomULong();
2048 fh
.infh_rand2
= RandomULong();
2051 flowhash
= net_flowhash(&fh
, sizeof (fh
), inp_hash_seed
);
2052 if (flowhash
== 0) {
2053 /* try to get a non-zero flowhash */
2054 inp_hash_seed
= RandomULong();
2062 * Function to compare inp_fc_entries in inp flow control tree
2065 infc_cmp(const struct inp_fc_entry
*fc1
, const struct inp_fc_entry
*fc2
)
2067 return (fc1
->infc_flowhash
- fc2
->infc_flowhash
);
2071 inp_fc_addinp(struct inpcb
*inp
)
2073 struct inp_fc_entry keyfc
, *infc
;
2074 u_int32_t flowhash
= inp
->inp_flowhash
;
2076 keyfc
.infc_flowhash
= flowhash
;
2078 lck_mtx_lock_spin(&inp_fc_lck
);
2079 infc
= RB_FIND(inp_fc_tree
, &inp_fc_tree
, &keyfc
);
2080 if (infc
!= NULL
&& infc
->infc_inp
== inp
) {
2081 /* Entry is already in inp_fc_tree, return */
2082 lck_mtx_unlock(&inp_fc_lck
);
2088 * There is a different fc entry with the same
2089 * flow hash but different inp pointer. There
2090 * can be a collision on flow hash but the
2091 * probability is low. Let's just avoid
2092 * adding a second one when there is a collision
2094 lck_mtx_unlock(&inp_fc_lck
);
2098 /* become regular mutex */
2099 lck_mtx_convert_spin(&inp_fc_lck
);
2101 infc
= zalloc_noblock(inp_fcezone
);
2103 /* memory allocation failed */
2104 lck_mtx_unlock(&inp_fc_lck
);
2107 bzero(infc
, sizeof (*infc
));
2109 infc
->infc_flowhash
= flowhash
;
2110 infc
->infc_inp
= inp
;
2112 RB_INSERT(inp_fc_tree
, &inp_fc_tree
, infc
);
2113 lck_mtx_unlock(&inp_fc_lck
);
2117 struct inp_fc_entry
*
2118 inp_fc_getinp(u_int32_t flowhash
)
2120 struct inp_fc_entry keyfc
, *infc
;
2122 keyfc
.infc_flowhash
= flowhash
;
2124 lck_mtx_lock_spin(&inp_fc_lck
);
2125 infc
= RB_FIND(inp_fc_tree
, &inp_fc_tree
, &keyfc
);
2127 /* inp is not present, return */
2128 lck_mtx_unlock(&inp_fc_lck
);
2132 RB_REMOVE(inp_fc_tree
, &inp_fc_tree
, infc
);
2134 if (in_pcb_checkstate(infc
->infc_inp
, WNT_ACQUIRE
, 0) ==
2136 /* become regular mutex */
2137 lck_mtx_convert_spin(&inp_fc_lck
);
2140 * This inp is going away, just don't process it.
2142 inp_fc_entry_free(infc
);
2145 lck_mtx_unlock(&inp_fc_lck
);
2151 inp_fc_entry_free(struct inp_fc_entry
*infc
)
2153 zfree(inp_fcezone
, infc
);
2157 inp_fc_feedback(struct inpcb
*inp
)
2159 struct socket
*so
= inp
->inp_socket
;
2161 /* we already hold a want_cnt on this inp, socket can't be null */
2162 VERIFY (so
!= NULL
);
2165 if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) {
2166 socket_unlock(so
, 1);
2171 * Return if the connection is not in flow-controlled state.
2172 * This can happen if the connection experienced
2173 * loss while it was in flow controlled state
2175 if (!INP_WAIT_FOR_IF_FEEDBACK(inp
)) {
2176 socket_unlock(so
, 1);
2179 inp_reset_fc_state(inp
);
2181 if (so
->so_proto
->pr_type
== SOCK_STREAM
)
2182 inp_fc_unthrottle_tcp(inp
);
2184 socket_unlock(so
, 1);
2188 inp_reset_fc_state(struct inpcb
*inp
)
2190 struct socket
*so
= inp
->inp_socket
;
2191 int suspended
= (INP_IS_FLOW_SUSPENDED(inp
)) ? 1 : 0;
2192 int needwakeup
= (INP_WAIT_FOR_IF_FEEDBACK(inp
)) ? 1 : 0;
2194 inp
->inp_flags
&= ~(INP_FLOW_CONTROLLED
| INP_FLOW_SUSPENDED
);
2197 so
->so_flags
&= ~(SOF_SUSPENDED
);
2198 soevent(so
, (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_RESUME
));
2201 if (inp
->inp_sndinprog_cnt
> 0)
2202 inp
->inp_flags
|= INP_FC_FEEDBACK
;
2204 /* Give a write wakeup to unblock the socket */
2210 inp_set_fc_state(struct inpcb
*inp
, int advcode
)
2213 * If there was a feedback from the interface when
2214 * send operation was in progress, we should ignore
2215 * this flow advisory to avoid a race between setting
2216 * flow controlled state and receiving feedback from
2219 if (inp
->inp_flags
& INP_FC_FEEDBACK
)
2222 inp
->inp_flags
&= ~(INP_FLOW_CONTROLLED
| INP_FLOW_SUSPENDED
);
2223 if (inp_fc_addinp(inp
)) {
2225 case FADV_FLOW_CONTROLLED
:
2226 inp
->inp_flags
|= INP_FLOW_CONTROLLED
;
2228 case FADV_SUSPENDED
:
2229 inp
->inp_flags
|= INP_FLOW_SUSPENDED
;
2230 soevent(inp
->inp_socket
,
2231 (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_SUSPEND
));
2233 /* Record the fact that suspend event was sent */
2234 inp
->inp_socket
->so_flags
|= SOF_SUSPENDED
;
2242 * Handler for SO_FLUSH socket option.
2245 inp_flush(struct inpcb
*inp
, int optval
)
2247 u_int32_t flowhash
= inp
->inp_flowhash
;
2250 /* Either all classes or one of the valid ones */
2251 if (optval
!= SO_TC_ALL
&& !SO_VALID_TC(optval
))
2254 /* We need a flow hash for identification */
2258 /* We need a cached route for the interface */
2259 if ((rt
= inp
->inp_route
.ro_rt
) != NULL
) {
2260 struct ifnet
*ifp
= rt
->rt_ifp
;
2261 if_qflush_sc(ifp
, so_tc2msc(optval
), flowhash
, NULL
, NULL
, 0);
2268 * Clear the INP_INADDR_ANY flag (special case for PPP only)
2270 void inp_clear_INP_INADDR_ANY(struct socket
*so
)
2272 struct inpcb
*inp
= NULL
;
2275 inp
= sotoinpcb(so
);
2277 inp
->inp_flags
&= ~INP_INADDR_ANY
;
2279 socket_unlock(so
, 1);