]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet/in_pcb.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / bsd / netinet / in_pcb.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39236c6e 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39236c6e 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39236c6e 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39236c6e 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1982, 1986, 1991, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95
9bccf70c 61 * $FreeBSD: src/sys/netinet/in_pcb.c,v 1.59.2.17 2001/08/13 16:26:17 ume Exp $
1c79356b
A
62 */
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/malloc.h>
67#include <sys/mbuf.h>
1c79356b 68#include <sys/domain.h>
1c79356b
A
69#include <sys/protosw.h>
70#include <sys/socket.h>
71#include <sys/socketvar.h>
72#include <sys/proc.h>
73#include <sys/kernel.h>
74#include <sys/sysctl.h>
6d2010ae
A
75#include <sys/mcache.h>
76#include <sys/kauth.h>
77#include <sys/priv.h>
39236c6e
A
78#include <sys/proc_uuid_policy.h>
79#include <sys/syslog.h>
fe8ab488 80#include <sys/priv.h>
39037602 81#include <net/dlil.h>
39236c6e 82
91447636 83#include <libkern/OSAtomic.h>
316670eb 84#include <kern/locks.h>
1c79356b
A
85
86#include <machine/limits.h>
87
1c79356b 88#include <kern/zalloc.h>
1c79356b
A
89
90#include <net/if.h>
1c79356b 91#include <net/if_types.h>
9bccf70c 92#include <net/route.h>
316670eb
A
93#include <net/flowhash.h>
94#include <net/flowadv.h>
d9a64523 95#include <net/nat464_utils.h>
fe8ab488 96#include <net/ntstat.h>
cb323159 97#include <net/restricted_in_port.h>
1c79356b
A
98
99#include <netinet/in.h>
100#include <netinet/in_pcb.h>
101#include <netinet/in_var.h>
102#include <netinet/ip_var.h>
cb323159 103
1c79356b
A
104#if INET6
105#include <netinet/ip6.h>
106#include <netinet6/ip6_var.h>
107#endif /* INET6 */
108
1c79356b 109#include <sys/kdebug.h>
b0d623f7 110#include <sys/random.h>
39236c6e 111
316670eb 112#include <dev/random/randomdev.h>
39236c6e 113#include <mach/boolean.h>
1c79356b 114
39037602
A
115#include <pexpert/pexpert.h>
116
fe8ab488
A
117#if NECP
118#include <net/necp.h>
9bccf70c 119#endif
1c79356b 120
39037602
A
121#include <sys/stat.h>
122#include <sys/ubc.h>
123#include <sys/vnode.h>
124
cb323159
A
125#include <os/log.h>
126
127extern const char *proc_name_address(struct proc *);
128
0a7de745
A
129static lck_grp_t *inpcb_lock_grp;
130static lck_attr_t *inpcb_lock_attr;
131static lck_grp_attr_t *inpcb_lock_grp_attr;
132decl_lck_mtx_data(static, inpcb_lock); /* global INPCB lock */
39236c6e
A
133decl_lck_mtx_data(static, inpcb_timeout_lock);
134
135static TAILQ_HEAD(, inpcbinfo) inpcb_head = TAILQ_HEAD_INITIALIZER(inpcb_head);
136
0a7de745 137static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */
39236c6e 138static boolean_t inpcb_garbage_collecting = FALSE; /* gc timer is scheduled */
0a7de745 139static boolean_t inpcb_ticking = FALSE; /* "slow" timer is scheduled */
39236c6e 140static boolean_t inpcb_fast_timer_on = FALSE;
fe8ab488 141
0a7de745 142#define INPCB_GCREQ_THRESHOLD 50000
fe8ab488 143
39037602
A
144static thread_call_t inpcb_thread_call, inpcb_fast_thread_call;
145static void inpcb_sched_timeout(void);
146static void inpcb_sched_lazy_timeout(void);
147static void _inpcb_sched_timeout(unsigned int);
148static void inpcb_timeout(void *, void *);
0a7de745 149const int inpcb_timeout_lazy = 10; /* 10 seconds leeway for lazy timers */
39236c6e
A
150extern int tvtohz(struct timeval *);
151
152#if CONFIG_PROC_UUID_POLICY
153static void inp_update_cellular_policy(struct inpcb *, boolean_t);
fe8ab488
A
154#if NECP
155static void inp_update_necp_want_app_policy(struct inpcb *, boolean_t);
156#endif /* NECP */
39236c6e
A
157#endif /* !CONFIG_PROC_UUID_POLICY */
158
0a7de745
A
159#define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8))
160#define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
1c79356b 161
1c79356b
A
162/*
163 * These configure the range of local port addresses assigned to
164 * "unspecified" outgoing connections/packets/whatever.
165 */
0a7de745
A
166int ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */
167int ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */
168int ipport_firstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
169int ipport_lastauto = IPPORT_HILASTAUTO; /* 65535 */
170int ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
171int ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */
172
173#define RANGECHK(var, min, max) \
1c79356b
A
174 if ((var) < (min)) { (var) = (min); } \
175 else if ((var) > (max)) { (var) = (max); }
176
1c79356b
A
177static int
178sysctl_net_ipport_check SYSCTL_HANDLER_ARGS
179{
2d21ac55 180#pragma unused(arg1, arg2)
39236c6e 181 int error;
cb323159
A
182#if (DEBUG | DEVELOPMENT)
183 int old_value = *(int *)oidp->oid_arg1;
184 /*
185 * For unit testing allow a non-superuser process with the
186 * proper entitlement to modify the variables
187 */
188 if (req->newptr) {
189 if (proc_suser(current_proc()) != 0 &&
190 (error = priv_check_cred(kauth_cred_get(),
191 PRIV_NETINET_RESERVEDPORT, 0))) {
192 return EPERM;
193 }
194 }
195#endif /* (DEBUG | DEVELOPMENT) */
39236c6e
A
196
197 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
1c79356b
A
198 if (!error) {
199 RANGECHK(ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
200 RANGECHK(ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
201 RANGECHK(ipport_firstauto, IPPORT_RESERVED, USHRT_MAX);
202 RANGECHK(ipport_lastauto, IPPORT_RESERVED, USHRT_MAX);
203 RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, USHRT_MAX);
204 RANGECHK(ipport_hilastauto, IPPORT_RESERVED, USHRT_MAX);
205 }
cb323159
A
206
207#if (DEBUG | DEVELOPMENT)
208 os_log(OS_LOG_DEFAULT,
209 "%s:%u sysctl net.restricted_port.verbose: %d -> %d)",
210 proc_best_name(current_proc()), proc_selfpid(),
211 old_value, *(int *)oidp->oid_arg1);
212#endif /* (DEBUG | DEVELOPMENT) */
213
0a7de745 214 return error;
1c79356b
A
215}
216
217#undef RANGECHK
218
39236c6e 219SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange,
0a7de745 220 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IP Ports");
39236c6e 221
cb323159
A
222#if (DEBUG | DEVELOPMENT)
223#define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY)
224#else
225#define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED)
226#endif /* (DEBUG | DEVELOPMENT) */
227
39236c6e 228SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
cb323159 229 CTLFAGS_IP_PORTRANGE,
0a7de745 230 &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", "");
39236c6e 231SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
cb323159 232 CTLFAGS_IP_PORTRANGE,
0a7de745 233 &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", "");
39236c6e 234SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first,
cb323159 235 CTLFAGS_IP_PORTRANGE,
0a7de745 236 &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", "");
39236c6e 237SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last,
cb323159 238 CTLFAGS_IP_PORTRANGE,
0a7de745 239 &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", "");
39236c6e 240SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
cb323159 241 CTLFAGS_IP_PORTRANGE,
0a7de745 242 &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", "");
39236c6e 243SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
cb323159 244 CTLFAGS_IP_PORTRANGE,
0a7de745 245 &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", "");
1c79356b 246
39037602
A
247static uint32_t apn_fallbk_debug = 0;
248#define apn_fallbk_log(x) do { if (apn_fallbk_debug >= 1) log x; } while (0)
249
5ba3f43e
A
250#if CONFIG_EMBEDDED
251static boolean_t apn_fallbk_enabled = TRUE;
252
253SYSCTL_DECL(_net_inet);
0a7de745 254SYSCTL_NODE(_net_inet, OID_AUTO, apn_fallback, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "APN Fallback");
d9a64523
A
255SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED,
256 &apn_fallbk_enabled, 0, "APN fallback enable");
5ba3f43e
A
257SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
258 &apn_fallbk_debug, 0, "APN fallback debug enable");
259#else
39037602 260static boolean_t apn_fallbk_enabled = FALSE;
5ba3f43e 261#endif
39037602 262
0a7de745
A
263extern int udp_use_randomport;
264extern int tcp_use_randomport;
b0d623f7 265
316670eb
A
266/* Structs used for flowhash computation */
267struct inp_flowhash_key_addr {
268 union {
0a7de745 269 struct in_addr v4;
316670eb 270 struct in6_addr v6;
0a7de745
A
271 u_int8_t addr8[16];
272 u_int16_t addr16[8];
273 u_int32_t addr32[4];
316670eb
A
274 } infha;
275};
276
277struct inp_flowhash_key {
0a7de745
A
278 struct inp_flowhash_key_addr infh_laddr;
279 struct inp_flowhash_key_addr infh_faddr;
280 u_int32_t infh_lport;
281 u_int32_t infh_fport;
282 u_int32_t infh_af;
283 u_int32_t infh_proto;
284 u_int32_t infh_rand1;
285 u_int32_t infh_rand2;
316670eb
A
286};
287
39236c6e
A
288static u_int32_t inp_hash_seed = 0;
289
290static int infc_cmp(const struct inpcb *, const struct inpcb *);
291
292/* Flags used by inp_fc_getinp */
0a7de745
A
293#define INPFC_SOLOCKED 0x1
294#define INPFC_REMOVE 0x2
39236c6e
A
295static struct inpcb *inp_fc_getinp(u_int32_t, u_int32_t);
296
297static void inp_fc_feedback(struct inpcb *);
298extern void tcp_remove_from_time_wait(struct inpcb *inp);
316670eb 299
39236c6e 300decl_lck_mtx_data(static, inp_fc_lck);
316670eb 301
bd504ef0
A
302RB_HEAD(inp_fc_tree, inpcb) inp_fc_tree;
303RB_PROTOTYPE(inp_fc_tree, inpcb, infc_link, infc_cmp);
304RB_GENERATE(inp_fc_tree, inpcb, infc_link, infc_cmp);
316670eb 305
bd504ef0
A
306/*
307 * Use this inp as a key to find an inp in the flowhash tree.
308 * Accesses to it are protected by inp_fc_lck.
309 */
310struct inpcb key_inp;
316670eb 311
1c79356b
A
312/*
313 * in_pcb.c: manage the Protocol Control Blocks.
1c79356b
A
314 */
315
316670eb 316void
39236c6e 317in_pcbinit(void)
316670eb 318{
39236c6e 319 static int inpcb_initialized = 0;
316670eb 320
39236c6e
A
321 VERIFY(!inpcb_initialized);
322 inpcb_initialized = 1;
316670eb 323
39236c6e
A
324 inpcb_lock_grp_attr = lck_grp_attr_alloc_init();
325 inpcb_lock_grp = lck_grp_alloc_init("inpcb", inpcb_lock_grp_attr);
326 inpcb_lock_attr = lck_attr_alloc_init();
327 lck_mtx_init(&inpcb_lock, inpcb_lock_grp, inpcb_lock_attr);
328 lck_mtx_init(&inpcb_timeout_lock, inpcb_lock_grp, inpcb_lock_attr);
39037602
A
329 inpcb_thread_call = thread_call_allocate_with_priority(inpcb_timeout,
330 NULL, THREAD_CALL_PRIORITY_KERNEL);
331 inpcb_fast_thread_call = thread_call_allocate_with_priority(
0a7de745
A
332 inpcb_timeout, NULL, THREAD_CALL_PRIORITY_KERNEL);
333 if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL) {
39037602 334 panic("unable to alloc the inpcb thread call");
0a7de745 335 }
39236c6e
A
336
337 /*
338 * Initialize data structures required to deliver
339 * flow advisories.
340 */
341 lck_mtx_init(&inp_fc_lck, inpcb_lock_grp, inpcb_lock_attr);
bd504ef0 342 lck_mtx_lock(&inp_fc_lck);
316670eb 343 RB_INIT(&inp_fc_tree);
bd504ef0
A
344 bzero(&key_inp, sizeof(key_inp));
345 lck_mtx_unlock(&inp_fc_lck);
316670eb
A
346}
347
0a7de745 348#define INPCB_HAVE_TIMER_REQ(req) (((req).intimer_lazy > 0) || \
39236c6e
A
349 ((req).intimer_fast > 0) || ((req).intimer_nodelay > 0))
350static void
39037602 351inpcb_timeout(void *arg0, void *arg1)
39236c6e 352{
5ba3f43e 353#pragma unused(arg0, arg1)
39236c6e
A
354 struct inpcbinfo *ipi;
355 boolean_t t, gc;
356 struct intimercount gccnt, tmcnt;
39236c6e
A
357
358 /*
359 * Update coarse-grained networking timestamp (in sec.); the idea
360 * is to piggy-back on the timeout callout to update the counter
361 * returnable via net_uptime().
362 */
363 net_update_uptime();
364
fe8ab488
A
365 bzero(&gccnt, sizeof(gccnt));
366 bzero(&tmcnt, sizeof(tmcnt));
367
39236c6e
A
368 lck_mtx_lock_spin(&inpcb_timeout_lock);
369 gc = inpcb_garbage_collecting;
370 inpcb_garbage_collecting = FALSE;
39236c6e
A
371
372 t = inpcb_ticking;
373 inpcb_ticking = FALSE;
374
375 if (gc || t) {
376 lck_mtx_unlock(&inpcb_timeout_lock);
377
378 lck_mtx_lock(&inpcb_lock);
379 TAILQ_FOREACH(ipi, &inpcb_head, ipi_entry) {
380 if (INPCB_HAVE_TIMER_REQ(ipi->ipi_gc_req)) {
381 bzero(&ipi->ipi_gc_req,
0a7de745 382 sizeof(ipi->ipi_gc_req));
39236c6e
A
383 if (gc && ipi->ipi_gc != NULL) {
384 ipi->ipi_gc(ipi);
385 gccnt.intimer_lazy +=
386 ipi->ipi_gc_req.intimer_lazy;
387 gccnt.intimer_fast +=
388 ipi->ipi_gc_req.intimer_fast;
389 gccnt.intimer_nodelay +=
390 ipi->ipi_gc_req.intimer_nodelay;
391 }
392 }
393 if (INPCB_HAVE_TIMER_REQ(ipi->ipi_timer_req)) {
394 bzero(&ipi->ipi_timer_req,
0a7de745 395 sizeof(ipi->ipi_timer_req));
39236c6e
A
396 if (t && ipi->ipi_timer != NULL) {
397 ipi->ipi_timer(ipi);
398 tmcnt.intimer_lazy +=
399 ipi->ipi_timer_req.intimer_lazy;
5ba3f43e 400 tmcnt.intimer_fast +=
39236c6e
A
401 ipi->ipi_timer_req.intimer_fast;
402 tmcnt.intimer_nodelay +=
403 ipi->ipi_timer_req.intimer_nodelay;
404 }
405 }
406 }
407 lck_mtx_unlock(&inpcb_lock);
408 lck_mtx_lock_spin(&inpcb_timeout_lock);
409 }
410
411 /* lock was dropped above, so check first before overriding */
0a7de745 412 if (!inpcb_garbage_collecting) {
39236c6e 413 inpcb_garbage_collecting = INPCB_HAVE_TIMER_REQ(gccnt);
0a7de745
A
414 }
415 if (!inpcb_ticking) {
39236c6e 416 inpcb_ticking = INPCB_HAVE_TIMER_REQ(tmcnt);
0a7de745 417 }
39236c6e
A
418
419 /* re-arm the timer if there's work to do */
5ba3f43e
A
420 inpcb_timeout_run--;
421 VERIFY(inpcb_timeout_run >= 0 && inpcb_timeout_run < 2);
39236c6e 422
0a7de745 423 if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0) {
39037602 424 inpcb_sched_timeout();
0a7de745 425 } else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5) {
39236c6e 426 /* be lazy when idle with little activity */
39037602 427 inpcb_sched_lazy_timeout();
0a7de745 428 } else {
39037602 429 inpcb_sched_timeout();
0a7de745 430 }
39236c6e
A
431
432 lck_mtx_unlock(&inpcb_timeout_lock);
433}
434
435static void
39037602 436inpcb_sched_timeout(void)
39236c6e 437{
39037602
A
438 _inpcb_sched_timeout(0);
439}
440
441static void
442inpcb_sched_lazy_timeout(void)
443{
444 _inpcb_sched_timeout(inpcb_timeout_lazy);
445}
39236c6e 446
39037602
A
447static void
448_inpcb_sched_timeout(unsigned int offset)
449{
450 uint64_t deadline, leeway;
451
452 clock_interval_to_deadline(1, NSEC_PER_SEC, &deadline);
5ba3f43e 453 LCK_MTX_ASSERT(&inpcb_timeout_lock, LCK_MTX_ASSERT_OWNED);
39236c6e 454 if (inpcb_timeout_run == 0 &&
39037602 455 (inpcb_garbage_collecting || inpcb_ticking)) {
39236c6e
A
456 lck_mtx_convert_spin(&inpcb_timeout_lock);
457 inpcb_timeout_run++;
39037602 458 if (offset == 0) {
39236c6e 459 inpcb_fast_timer_on = TRUE;
39037602
A
460 thread_call_enter_delayed(inpcb_thread_call,
461 deadline);
39236c6e
A
462 } else {
463 inpcb_fast_timer_on = FALSE;
39037602
A
464 clock_interval_to_absolutetime_interval(offset,
465 NSEC_PER_SEC, &leeway);
466 thread_call_enter_delayed_with_leeway(
0a7de745
A
467 inpcb_thread_call, NULL, deadline, leeway,
468 THREAD_CALL_DELAY_LEEWAY);
39236c6e
A
469 }
470 } else if (inpcb_timeout_run == 1 &&
39037602 471 offset == 0 && !inpcb_fast_timer_on) {
39236c6e
A
472 /*
473 * Since the request was for a fast timer but the
474 * scheduled timer is a lazy timer, try to schedule
39037602 475 * another instance of fast timer also.
39236c6e
A
476 */
477 lck_mtx_convert_spin(&inpcb_timeout_lock);
478 inpcb_timeout_run++;
479 inpcb_fast_timer_on = TRUE;
39037602 480 thread_call_enter_delayed(inpcb_fast_thread_call, deadline);
39236c6e
A
481 }
482}
483
484void
485inpcb_gc_sched(struct inpcbinfo *ipi, u_int32_t type)
486{
fe8ab488 487 u_int32_t gccnt;
39037602 488
39236c6e
A
489 lck_mtx_lock_spin(&inpcb_timeout_lock);
490 inpcb_garbage_collecting = TRUE;
fe8ab488 491 gccnt = ipi->ipi_gc_req.intimer_nodelay +
0a7de745 492 ipi->ipi_gc_req.intimer_fast;
fe8ab488 493
5ba3f43e
A
494 if (gccnt > INPCB_GCREQ_THRESHOLD) {
495 type = INPCB_TIMER_FAST;
fe8ab488
A
496 }
497
39236c6e
A
498 switch (type) {
499 case INPCB_TIMER_NODELAY:
500 atomic_add_32(&ipi->ipi_gc_req.intimer_nodelay, 1);
39037602 501 inpcb_sched_timeout();
39236c6e
A
502 break;
503 case INPCB_TIMER_FAST:
504 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
39037602 505 inpcb_sched_timeout();
39236c6e
A
506 break;
507 default:
508 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
39037602 509 inpcb_sched_lazy_timeout();
39236c6e
A
510 break;
511 }
512 lck_mtx_unlock(&inpcb_timeout_lock);
513}
514
515void
516inpcb_timer_sched(struct inpcbinfo *ipi, u_int32_t type)
517{
39236c6e
A
518 lck_mtx_lock_spin(&inpcb_timeout_lock);
519 inpcb_ticking = TRUE;
520 switch (type) {
521 case INPCB_TIMER_NODELAY:
522 atomic_add_32(&ipi->ipi_timer_req.intimer_nodelay, 1);
39037602 523 inpcb_sched_timeout();
39236c6e
A
524 break;
525 case INPCB_TIMER_FAST:
526 atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
39037602 527 inpcb_sched_timeout();
39236c6e
A
528 break;
529 default:
530 atomic_add_32(&ipi->ipi_timer_req.intimer_lazy, 1);
39037602 531 inpcb_sched_lazy_timeout();
39236c6e
A
532 break;
533 }
534 lck_mtx_unlock(&inpcb_timeout_lock);
535}
536
537void
538in_pcbinfo_attach(struct inpcbinfo *ipi)
539{
540 struct inpcbinfo *ipi0;
541
542 lck_mtx_lock(&inpcb_lock);
543 TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
544 if (ipi0 == ipi) {
545 panic("%s: ipi %p already in the list\n",
546 __func__, ipi);
547 /* NOTREACHED */
548 }
549 }
550 TAILQ_INSERT_TAIL(&inpcb_head, ipi, ipi_entry);
551 lck_mtx_unlock(&inpcb_lock);
552}
553
554int
555in_pcbinfo_detach(struct inpcbinfo *ipi)
556{
557 struct inpcbinfo *ipi0;
558 int error = 0;
559
560 lck_mtx_lock(&inpcb_lock);
561 TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
0a7de745 562 if (ipi0 == ipi) {
39236c6e 563 break;
0a7de745 564 }
39236c6e 565 }
0a7de745 566 if (ipi0 != NULL) {
39236c6e 567 TAILQ_REMOVE(&inpcb_head, ipi0, ipi_entry);
0a7de745 568 } else {
39236c6e 569 error = ENXIO;
0a7de745 570 }
39236c6e
A
571 lck_mtx_unlock(&inpcb_lock);
572
0a7de745 573 return error;
39236c6e
A
574}
575
1c79356b
A
576/*
577 * Allocate a PCB and associate it with the socket.
2d21ac55
A
578 *
579 * Returns: 0 Success
580 * ENOBUFS
581 * ENOMEM
1c79356b
A
582 */
583int
39236c6e 584in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p)
1c79356b 585{
39236c6e 586#pragma unused(p)
2d21ac55 587 struct inpcb *inp;
0a7de745 588 caddr_t temp;
2d21ac55
A
589#if CONFIG_MACF_NET
590 int mac_error;
39236c6e 591#endif /* CONFIG_MACF_NET */
1c79356b 592
3e170ce0 593 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
39236c6e 594 inp = (struct inpcb *)zalloc(pcbinfo->ipi_zone);
0a7de745
A
595 if (inp == NULL) {
596 return ENOBUFS;
597 }
598 bzero((caddr_t)inp, sizeof(*inp));
39236c6e
A
599 } else {
600 inp = (struct inpcb *)(void *)so->so_saved_pcb;
601 temp = inp->inp_saved_ppcb;
0a7de745 602 bzero((caddr_t)inp, sizeof(*inp));
39236c6e 603 inp->inp_saved_ppcb = temp;
1c79356b
A
604 }
605
606 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
607 inp->inp_pcbinfo = pcbinfo;
608 inp->inp_socket = so;
2d21ac55
A
609#if CONFIG_MACF_NET
610 mac_error = mac_inpcb_label_init(inp, M_WAITOK);
611 if (mac_error != 0) {
0a7de745 612 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
2d21ac55 613 zfree(pcbinfo->ipi_zone, inp);
0a7de745
A
614 }
615 return mac_error;
2d21ac55
A
616 }
617 mac_inpcb_label_associate(so, inp);
39236c6e
A
618#endif /* CONFIG_MACF_NET */
619 /* make sure inp_stat is always 64-bit aligned */
620 inp->inp_stat = (struct inp_stat *)P2ROUNDUP(inp->inp_stat_store,
0a7de745 621 sizeof(u_int64_t));
39236c6e 622 if (((uintptr_t)inp->inp_stat - (uintptr_t)inp->inp_stat_store) +
0a7de745 623 sizeof(*inp->inp_stat) > sizeof(inp->inp_stat_store)) {
39236c6e
A
624 panic("%s: insufficient space to align inp_stat", __func__);
625 /* NOTREACHED */
626 }
627
628 /* make sure inp_cstat is always 64-bit aligned */
629 inp->inp_cstat = (struct inp_stat *)P2ROUNDUP(inp->inp_cstat_store,
0a7de745 630 sizeof(u_int64_t));
39236c6e 631 if (((uintptr_t)inp->inp_cstat - (uintptr_t)inp->inp_cstat_store) +
0a7de745 632 sizeof(*inp->inp_cstat) > sizeof(inp->inp_cstat_store)) {
39236c6e
A
633 panic("%s: insufficient space to align inp_cstat", __func__);
634 /* NOTREACHED */
635 }
636
637 /* make sure inp_wstat is always 64-bit aligned */
638 inp->inp_wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_wstat_store,
0a7de745 639 sizeof(u_int64_t));
39236c6e 640 if (((uintptr_t)inp->inp_wstat - (uintptr_t)inp->inp_wstat_store) +
0a7de745 641 sizeof(*inp->inp_wstat) > sizeof(inp->inp_wstat_store)) {
39236c6e
A
642 panic("%s: insufficient space to align inp_wstat", __func__);
643 /* NOTREACHED */
6d2010ae
A
644 }
645
fe8ab488
A
646 /* make sure inp_Wstat is always 64-bit aligned */
647 inp->inp_Wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_Wstat_store,
0a7de745 648 sizeof(u_int64_t));
fe8ab488 649 if (((uintptr_t)inp->inp_Wstat - (uintptr_t)inp->inp_Wstat_store) +
0a7de745 650 sizeof(*inp->inp_Wstat) > sizeof(inp->inp_Wstat_store)) {
fe8ab488
A
651 panic("%s: insufficient space to align inp_Wstat", __func__);
652 /* NOTREACHED */
653 }
39037602 654
91447636
A
655 so->so_pcb = (caddr_t)inp;
656
657 if (so->so_proto->pr_flags & PR_PCBLOCK) {
39236c6e
A
658 lck_mtx_init(&inp->inpcb_mtx, pcbinfo->ipi_lock_grp,
659 pcbinfo->ipi_lock_attr);
91447636
A
660 }
661
2d21ac55 662#if INET6
0a7de745 663 if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on) {
9bccf70c 664 inp->inp_flags |= IN6P_IPV6_V6ONLY;
0a7de745 665 }
39236c6e 666
0a7de745 667 if (ip6_auto_flowlabel) {
9bccf70c 668 inp->inp_flags |= IN6P_AUTOFLOWLABEL;
0a7de745 669 }
39236c6e 670#endif /* INET6 */
0a7de745 671 if (intcoproc_unrestricted) {
39037602 672 inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
0a7de745 673 }
39236c6e
A
674
675 (void) inp_update_policy(inp);
676
677 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
91447636 678 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
39236c6e 679 LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
91447636 680 pcbinfo->ipi_count++;
39236c6e 681 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 682 return 0;
1c79356b
A
683}
684
2d21ac55 685/*
39236c6e
A
686 * in_pcblookup_local_and_cleanup does everything
687 * in_pcblookup_local does but it checks for a socket
688 * that's going away. Since we know that the lock is
cb323159 689 * held read+write when this function is called, we
39236c6e
A
690 * can safely dispose of this socket like the slow
691 * timer would usually do and return NULL. This is
692 * great for bind.
693 */
694struct inpcb *
695in_pcblookup_local_and_cleanup(struct inpcbinfo *pcbinfo, struct in_addr laddr,
696 u_int lport_arg, int wild_okay)
2d21ac55
A
697{
698 struct inpcb *inp;
39236c6e 699
2d21ac55
A
700 /* Perform normal lookup */
701 inp = in_pcblookup_local(pcbinfo, laddr, lport_arg, wild_okay);
39236c6e 702
2d21ac55 703 /* Check if we found a match but it's waiting to be disposed */
39236c6e 704 if (inp != NULL && inp->inp_wantcnt == WNT_STOPUSING) {
2d21ac55 705 struct socket *so = inp->inp_socket;
39236c6e 706
5ba3f43e 707 socket_lock(so, 0);
39236c6e 708
2d21ac55 709 if (so->so_usecount == 0) {
0a7de745 710 if (inp->inp_state != INPCB_STATE_DEAD) {
b0d623f7 711 in_pcbdetach(inp);
0a7de745
A
712 }
713 in_pcbdispose(inp); /* will unlock & destroy */
2d21ac55 714 inp = NULL;
39236c6e 715 } else {
5ba3f43e 716 socket_unlock(so, 0);
2d21ac55
A
717 }
718 }
39236c6e 719
0a7de745 720 return inp;
2d21ac55
A
721}
722
c910b4d9 723static void
2d21ac55
A
724in_pcb_conflict_post_msg(u_int16_t port)
725{
39236c6e
A
726 /*
727 * Radar 5523020 send a kernel event notification if a
728 * non-participating socket tries to bind the port a socket
729 * who has set SOF_NOTIFYCONFLICT owns.
2d21ac55 730 */
39236c6e 731 struct kev_msg ev_msg;
0a7de745 732 struct kev_in_portinuse in_portinuse;
2d21ac55 733
0a7de745
A
734 bzero(&in_portinuse, sizeof(struct kev_in_portinuse));
735 bzero(&ev_msg, sizeof(struct kev_msg));
736 in_portinuse.port = ntohs(port); /* port in host order */
2d21ac55
A
737 in_portinuse.req_pid = proc_selfpid();
738 ev_msg.vendor_code = KEV_VENDOR_APPLE;
739 ev_msg.kev_class = KEV_NETWORK_CLASS;
740 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
741 ev_msg.event_code = KEV_INET_PORTINUSE;
742 ev_msg.dv[0].data_ptr = &in_portinuse;
0a7de745 743 ev_msg.dv[0].data_length = sizeof(struct kev_in_portinuse);
2d21ac55 744 ev_msg.dv[1].data_length = 0;
39037602 745 dlil_post_complete_msg(NULL, &ev_msg);
2d21ac55 746}
39236c6e 747
2d21ac55 748/*
39236c6e
A
749 * Bind an INPCB to an address and/or port. This routine should not alter
750 * the caller-supplied local address "nam".
751 *
2d21ac55
A
752 * Returns: 0 Success
753 * EADDRNOTAVAIL Address not available.
754 * EINVAL Invalid argument
755 * EAFNOSUPPORT Address family not supported [notdef]
756 * EACCES Permission denied
757 * EADDRINUSE Address in use
758 * EAGAIN Resource unavailable, try again
6d2010ae 759 * priv_check_cred:EPERM Operation not permitted
2d21ac55 760 */
1c79356b 761int
2d21ac55 762in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p)
1c79356b 763{
2d21ac55 764 struct socket *so = inp->inp_socket;
9bccf70c 765 unsigned short *lastport;
1c79356b 766 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
b0d623f7 767 u_short lport = 0, rand_port = 0;
1c79356b 768 int wild = 0, reuseport = (so->so_options & SO_REUSEPORT);
b0d623f7 769 int error, randomport, conflict = 0;
fe8ab488 770 boolean_t anonport = FALSE;
6d2010ae 771 kauth_cred_t cred;
fe8ab488
A
772 struct in_addr laddr;
773 struct ifnet *outif = NULL;
1c79356b 774
0a7de745
A
775 if (TAILQ_EMPTY(&in_ifaddrhead)) { /* XXX broken! */
776 return EADDRNOTAVAIL;
777 }
778 if (!(so->so_options & (SO_REUSEADDR | SO_REUSEPORT))) {
1c79356b 779 wild = 1;
0a7de745 780 }
fe8ab488
A
781
782 bzero(&laddr, sizeof(laddr));
783
4bd07ac2
A
784 socket_unlock(so, 0); /* keep reference on socket */
785 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
a39ff7e2
A
786 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
787 /* another thread completed the bind */
788 lck_rw_done(pcbinfo->ipi_lock);
789 socket_lock(so, 0);
0a7de745 790 return EINVAL;
a39ff7e2 791 }
4bd07ac2 792
39236c6e 793 if (nam != NULL) {
0a7de745 794 if (nam->sa_len != sizeof(struct sockaddr_in)) {
39236c6e 795 lck_rw_done(pcbinfo->ipi_lock);
91447636 796 socket_lock(so, 0);
0a7de745 797 return EINVAL;
91447636 798 }
39236c6e 799#if 0
1c79356b
A
800 /*
801 * We should check the family, but old programs
802 * incorrectly fail to initialize it.
803 */
39236c6e
A
804 if (nam->sa_family != AF_INET) {
805 lck_rw_done(pcbinfo->ipi_lock);
91447636 806 socket_lock(so, 0);
0a7de745 807 return EAFNOSUPPORT;
91447636 808 }
39236c6e
A
809#endif /* 0 */
810 lport = SIN(nam)->sin_port;
811
812 if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr))) {
1c79356b
A
813 /*
814 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
815 * allow complete duplication of binding if
816 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
817 * and a multicast address is bound on both
818 * new and duplicated sockets.
819 */
0a7de745
A
820 if (so->so_options & SO_REUSEADDR) {
821 reuseport = SO_REUSEADDR | SO_REUSEPORT;
822 }
39236c6e
A
823 } else if (SIN(nam)->sin_addr.s_addr != INADDR_ANY) {
824 struct sockaddr_in sin;
91447636 825 struct ifaddr *ifa;
39236c6e
A
826
827 /* Sanitized for interface address searches */
0a7de745 828 bzero(&sin, sizeof(sin));
39236c6e 829 sin.sin_family = AF_INET;
0a7de745 830 sin.sin_len = sizeof(struct sockaddr_in);
39236c6e
A
831 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
832
833 ifa = ifa_ifwithaddr(SA(&sin));
834 if (ifa == NULL) {
835 lck_rw_done(pcbinfo->ipi_lock);
91447636 836 socket_lock(so, 0);
0a7de745 837 return EADDRNOTAVAIL;
39236c6e
A
838 } else {
839 /*
840 * Opportunistically determine the outbound
841 * interface that may be used; this may not
842 * hold true if we end up using a route
843 * going over a different interface, e.g.
844 * when sending to a local address. This
845 * will get updated again after sending.
846 */
6d2010ae 847 IFA_LOCK(ifa);
316670eb 848 outif = ifa->ifa_ifp;
6d2010ae
A
849 IFA_UNLOCK(ifa);
850 IFA_REMREF(ifa);
91447636 851 }
1c79356b 852 }
cb323159
A
853
854
39236c6e 855 if (lport != 0) {
1c79356b 856 struct inpcb *t;
39236c6e 857 uid_t u;
1c79356b 858
5ba3f43e 859#if !CONFIG_EMBEDDED
d9a64523 860 if (ntohs(lport) < IPPORT_RESERVED &&
cb323159
A
861 SIN(nam)->sin_addr.s_addr != 0 &&
862 !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
6d2010ae 863 cred = kauth_cred_proc_ref(p);
39236c6e
A
864 error = priv_check_cred(cred,
865 PRIV_NETINET_RESERVEDPORT, 0);
6d2010ae
A
866 kauth_cred_unref(&cred);
867 if (error != 0) {
39236c6e 868 lck_rw_done(pcbinfo->ipi_lock);
6d2010ae 869 socket_lock(so, 0);
0a7de745 870 return EACCES;
6d2010ae 871 }
91447636 872 }
5ba3f43e 873#endif /* !CONFIG_EMBEDDED */
cb323159
A
874 /*
875 * Check wether the process is allowed to bind to a restricted port
876 */
877 if (!current_task_can_use_restricted_in_port(lport,
878 so->so_proto->pr_protocol, PORT_FLAGS_BSD)) {
879 lck_rw_done(pcbinfo->ipi_lock);
880 socket_lock(so, 0);
881 return EADDRINUSE;
882 }
883
39236c6e
A
884 if (!IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
885 (u = kauth_cred_getuid(so->so_cred)) != 0 &&
886 (t = in_pcblookup_local_and_cleanup(
0a7de745
A
887 inp->inp_pcbinfo, SIN(nam)->sin_addr, lport,
888 INPLOOKUP_WILDCARD)) != NULL &&
39236c6e
A
889 (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
890 t->inp_laddr.s_addr != INADDR_ANY ||
891 !(t->inp_socket->so_options & SO_REUSEPORT)) &&
892 (u != kauth_cred_getuid(t->inp_socket->so_cred)) &&
893 !(t->inp_socket->so_flags & SOF_REUSESHAREUID) &&
894 (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
cb323159
A
895 t->inp_laddr.s_addr != INADDR_ANY) &&
896 (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
897 !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
898 uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
39236c6e
A
899 if ((t->inp_socket->so_flags &
900 SOF_NOTIFYCONFLICT) &&
0a7de745 901 !(so->so_flags & SOF_NOTIFYCONFLICT)) {
39236c6e 902 conflict = 1;
0a7de745 903 }
39236c6e
A
904
905 lck_rw_done(pcbinfo->ipi_lock);
906
0a7de745 907 if (conflict) {
39236c6e 908 in_pcb_conflict_post_msg(lport);
0a7de745 909 }
2d21ac55 910
39236c6e 911 socket_lock(so, 0);
0a7de745 912 return EADDRINUSE;
1c79356b 913 }
39236c6e
A
914 t = in_pcblookup_local_and_cleanup(pcbinfo,
915 SIN(nam)->sin_addr, lport, wild);
916 if (t != NULL &&
cb323159
A
917 (reuseport & t->inp_socket->so_options) == 0 &&
918 (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
919 !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
920 uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
1c79356b 921#if INET6
39236c6e
A
922 if (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
923 t->inp_laddr.s_addr != INADDR_ANY ||
924 SOCK_DOM(so) != PF_INET6 ||
925 SOCK_DOM(t->inp_socket) != PF_INET6)
2d21ac55
A
926#endif /* INET6 */
927 {
39236c6e
A
928 if ((t->inp_socket->so_flags &
929 SOF_NOTIFYCONFLICT) &&
0a7de745 930 !(so->so_flags & SOF_NOTIFYCONFLICT)) {
2d21ac55 931 conflict = 1;
0a7de745 932 }
2d21ac55 933
39236c6e 934 lck_rw_done(pcbinfo->ipi_lock);
2d21ac55 935
0a7de745 936 if (conflict) {
2d21ac55 937 in_pcb_conflict_post_msg(lport);
0a7de745 938 }
91447636 939 socket_lock(so, 0);
0a7de745 940 return EADDRINUSE;
91447636 941 }
1c79356b
A
942 }
943 }
fe8ab488 944 laddr = SIN(nam)->sin_addr;
1c79356b
A
945 }
946 if (lport == 0) {
947 u_short first, last;
948 int count;
5ba3f43e 949 bool found;
1c79356b 950
cb323159
A
951 /*
952 * Override wild = 1 for implicit bind (mainly used by connect)
953 * For implicit bind (lport == 0), we always use an unused port,
954 * so REUSEADDR|REUSEPORT don't apply
955 */
956 wild = 1;
957
39236c6e
A
958 randomport = (so->so_flags & SOF_BINDRANDOMPORT) ||
959 (so->so_type == SOCK_STREAM ? tcp_use_randomport :
960 udp_use_randomport);
961
962 /*
fe8ab488
A
963 * Even though this looks similar to the code in
964 * in6_pcbsetport, the v6 vs v4 checks are different.
39236c6e 965 */
fe8ab488 966 anonport = TRUE;
1c79356b 967 if (inp->inp_flags & INP_HIGHPORT) {
0a7de745 968 first = ipport_hifirstauto; /* sysctl */
1c79356b 969 last = ipport_hilastauto;
39236c6e 970 lastport = &pcbinfo->ipi_lasthi;
1c79356b 971 } else if (inp->inp_flags & INP_LOWPORT) {
6d2010ae 972 cred = kauth_cred_proc_ref(p);
39236c6e
A
973 error = priv_check_cred(cred,
974 PRIV_NETINET_RESERVEDPORT, 0);
6d2010ae
A
975 kauth_cred_unref(&cred);
976 if (error != 0) {
39236c6e 977 lck_rw_done(pcbinfo->ipi_lock);
91447636 978 socket_lock(so, 0);
0a7de745 979 return error;
91447636 980 }
0a7de745
A
981 first = ipport_lowfirstauto; /* 1023 */
982 last = ipport_lowlastauto; /* 600 */
39236c6e 983 lastport = &pcbinfo->ipi_lastlow;
1c79356b 984 } else {
0a7de745 985 first = ipport_firstauto; /* sysctl */
1c79356b 986 last = ipport_lastauto;
39236c6e 987 lastport = &pcbinfo->ipi_lastport;
1c79356b 988 }
b0d623f7
A
989 /* No point in randomizing if only one port is available */
990
0a7de745 991 if (first == last) {
39236c6e 992 randomport = 0;
0a7de745 993 }
1c79356b
A
994 /*
995 * Simple check to ensure all ports are not used up causing
996 * a deadlock here.
997 *
998 * We split the two cases (up and down) so that the direction
999 * is not being tested on each round of the loop.
1000 */
1001 if (first > last) {
5ba3f43e
A
1002 struct in_addr lookup_addr;
1003
1c79356b
A
1004 /*
1005 * counting down
1006 */
b0d623f7 1007 if (randomport) {
0a7de745 1008 read_frandom(&rand_port, sizeof(rand_port));
39236c6e
A
1009 *lastport =
1010 first - (rand_port % (first - last));
b0d623f7 1011 }
1c79356b
A
1012 count = first - last;
1013
5ba3f43e
A
1014 lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
1015 inp->inp_laddr;
1016
1017 found = false;
1c79356b 1018 do {
0a7de745 1019 if (count-- < 0) { /* completely used? */
39236c6e 1020 lck_rw_done(pcbinfo->ipi_lock);
91447636 1021 socket_lock(so, 0);
0a7de745 1022 return EADDRNOTAVAIL;
1c79356b
A
1023 }
1024 --*lastport;
0a7de745 1025 if (*lastport > first || *lastport < last) {
1c79356b 1026 *lastport = first;
0a7de745 1027 }
1c79356b 1028 lport = htons(*lastport);
5ba3f43e 1029
cb323159
A
1030 /*
1031 * Skip if this is a restricted port as we do not want to
1032 * restricted ports as ephemeral
1033 */
1034 if (IS_RESTRICTED_IN_PORT(lport)) {
1035 continue;
1036 }
1037
5ba3f43e
A
1038 found = in_pcblookup_local_and_cleanup(pcbinfo,
1039 lookup_addr, lport, wild) == NULL;
1040 } while (!found);
1c79356b 1041 } else {
5ba3f43e
A
1042 struct in_addr lookup_addr;
1043
1c79356b
A
1044 /*
1045 * counting up
1046 */
b0d623f7 1047 if (randomport) {
0a7de745 1048 read_frandom(&rand_port, sizeof(rand_port));
39236c6e
A
1049 *lastport =
1050 first + (rand_port % (first - last));
b0d623f7 1051 }
1c79356b
A
1052 count = last - first;
1053
5ba3f43e
A
1054 lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
1055 inp->inp_laddr;
1056
1057 found = false;
1c79356b 1058 do {
0a7de745 1059 if (count-- < 0) { /* completely used? */
39236c6e 1060 lck_rw_done(pcbinfo->ipi_lock);
91447636 1061 socket_lock(so, 0);
0a7de745 1062 return EADDRNOTAVAIL;
1c79356b
A
1063 }
1064 ++*lastport;
0a7de745 1065 if (*lastport < first || *lastport > last) {
1c79356b 1066 *lastport = first;
0a7de745 1067 }
1c79356b 1068 lport = htons(*lastport);
5ba3f43e 1069
cb323159
A
1070 /*
1071 * Skip if this is a restricted port as we do not want to
1072 * restricted ports as ephemeral
1073 */
1074 if (IS_RESTRICTED_IN_PORT(lport)) {
1075 continue;
1076 }
1077
5ba3f43e
A
1078 found = in_pcblookup_local_and_cleanup(pcbinfo,
1079 lookup_addr, lport, wild) == NULL;
1080 } while (!found);
1c79356b
A
1081 }
1082 }
91447636 1083 socket_lock(so, 0);
4bd07ac2
A
1084
1085 /*
1086 * We unlocked socket's protocol lock for a long time.
1087 * The socket might have been dropped/defuncted.
1088 * Checking if world has changed since.
1089 */
1090 if (inp->inp_state == INPCB_STATE_DEAD) {
1091 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 1092 return ECONNABORTED;
4bd07ac2
A
1093 }
1094
fe8ab488
A
1095 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
1096 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 1097 return EINVAL;
fe8ab488
A
1098 }
1099
1100 if (laddr.s_addr != INADDR_ANY) {
1101 inp->inp_laddr = laddr;
1102 inp->inp_last_outifp = outif;
1103 }
1c79356b 1104 inp->inp_lport = lport;
0a7de745 1105 if (anonport) {
fe8ab488 1106 inp->inp_flags |= INP_ANONPORT;
0a7de745 1107 }
fe8ab488 1108
91447636 1109 if (in_pcbinshash(inp, 1) != 0) {
1c79356b 1110 inp->inp_laddr.s_addr = INADDR_ANY;
316670eb 1111 inp->inp_last_outifp = NULL;
fe8ab488
A
1112
1113 inp->inp_lport = 0;
0a7de745 1114 if (anonport) {
fe8ab488 1115 inp->inp_flags &= ~INP_ANONPORT;
0a7de745 1116 }
39236c6e 1117 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 1118 return EAGAIN;
1c79356b 1119 }
39236c6e 1120 lck_rw_done(pcbinfo->ipi_lock);
2d21ac55 1121 sflt_notify(so, sock_evt_bound, NULL);
0a7de745 1122 return 0;
1c79356b
A
1123}
1124
0a7de745 1125#define APN_FALLBACK_IP_FILTER(a) \
39037602
A
1126 (IN_LINKLOCAL(ntohl((a)->sin_addr.s_addr)) || \
1127 IN_LOOPBACK(ntohl((a)->sin_addr.s_addr)) || \
1128 IN_ZERONET(ntohl((a)->sin_addr.s_addr)) || \
1129 IN_MULTICAST(ntohl((a)->sin_addr.s_addr)) || \
1130 IN_PRIVATE(ntohl((a)->sin_addr.s_addr)))
1131
0a7de745 1132#define APN_FALLBACK_NOTIF_INTERVAL 2 /* Magic Number */
39037602
A
1133static uint64_t last_apn_fallback = 0;
1134
1135static boolean_t
0a7de745 1136apn_fallback_required(proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4)
39037602
A
1137{
1138 uint64_t timenow;
1139 struct sockaddr_storage lookup_default_addr;
1140 struct rtentry *rt = NULL;
1141
1142 VERIFY(proc != NULL);
1143
0a7de745 1144 if (apn_fallbk_enabled == FALSE) {
39037602 1145 return FALSE;
0a7de745 1146 }
39037602 1147
0a7de745 1148 if (proc == kernproc) {
39037602 1149 return FALSE;
0a7de745 1150 }
39037602 1151
0a7de745 1152 if (so && (so->so_options & SO_NOAPNFALLBK)) {
39037602 1153 return FALSE;
0a7de745 1154 }
39037602
A
1155
1156 timenow = net_uptime();
1157 if ((timenow - last_apn_fallback) < APN_FALLBACK_NOTIF_INTERVAL) {
1158 apn_fallbk_log((LOG_INFO, "APN fallback notification throttled.\n"));
1159 return FALSE;
1160 }
1161
0a7de745 1162 if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4)) {
39037602 1163 return FALSE;
0a7de745 1164 }
39037602
A
1165
1166 /* Check if we have unscoped IPv6 default route through cellular */
1167 bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1168 lookup_default_addr.ss_family = AF_INET6;
1169 lookup_default_addr.ss_len = sizeof(struct sockaddr_in6);
1170
1171 rt = rtalloc1((struct sockaddr *)&lookup_default_addr, 0, 0);
1172 if (NULL == rt) {
1173 apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1174 "unscoped default IPv6 route.\n"));
1175 return FALSE;
1176 }
1177
1178 if (!IFNET_IS_CELLULAR(rt->rt_ifp)) {
1179 rtfree(rt);
1180 apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1181 "unscoped default IPv6 route through cellular interface.\n"));
1182 return FALSE;
1183 }
1184
1185 /*
1186 * We have a default IPv6 route, ensure that
1187 * we do not have IPv4 default route before triggering
1188 * the event
1189 */
1190 rtfree(rt);
1191 rt = NULL;
1192
1193 bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1194 lookup_default_addr.ss_family = AF_INET;
1195 lookup_default_addr.ss_len = sizeof(struct sockaddr_in);
1196
1197 rt = rtalloc1((struct sockaddr *)&lookup_default_addr, 0, 0);
1198
1199 if (rt) {
1200 rtfree(rt);
1201 rt = NULL;
1202 apn_fallbk_log((LOG_INFO, "APN fallback notification found unscoped "
1203 "IPv4 default route!\n"));
1204 return FALSE;
1205 }
1206
1207 {
1208 /*
1209 * We disable APN fallback if the binary is not a third-party app.
1210 * Note that platform daemons use their process name as a
1211 * bundle ID so we filter out bundle IDs without dots.
1212 */
1213 const char *bundle_id = cs_identity_get(proc);
1214 if (bundle_id == NULL ||
1215 bundle_id[0] == '\0' ||
1216 strchr(bundle_id, '.') == NULL ||
1217 strncmp(bundle_id, "com.apple.", sizeof("com.apple.") - 1) == 0) {
1218 apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found first-"
1219 "party bundle ID \"%s\"!\n", (bundle_id ? bundle_id : "NULL")));
1220 return FALSE;
1221 }
1222 }
1223
1224 {
1225 /*
1226 * The Apple App Store IPv6 requirement started on
1227 * June 1st, 2016 at 12:00:00 AM PDT.
1228 * We disable APN fallback if the binary is more recent than that.
1229 * We check both atime and birthtime since birthtime is not always supported.
1230 */
1231 static const long ipv6_start_date = 1464764400L;
1232 vfs_context_t context;
1233 struct stat64 sb;
1234 int vn_stat_error;
1235
1236 bzero(&sb, sizeof(struct stat64));
1237 context = vfs_context_create(NULL);
cb323159 1238 vn_stat_error = vn_stat(proc->p_textvp, &sb, NULL, 1, 0, context);
39037602
A
1239 (void)vfs_context_rele(context);
1240
1241 if (vn_stat_error != 0 ||
1242 sb.st_atimespec.tv_sec >= ipv6_start_date ||
1243 sb.st_birthtimespec.tv_sec >= ipv6_start_date) {
1244 apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found binary "
1245 "too recent! (err %d atime %ld mtime %ld ctime %ld birthtime %ld)\n",
1246 vn_stat_error, sb.st_atimespec.tv_sec, sb.st_mtimespec.tv_sec,
1247 sb.st_ctimespec.tv_sec, sb.st_birthtimespec.tv_sec));
1248 return FALSE;
1249 }
1250 }
1251 return TRUE;
1252}
1253
1254static void
d9a64523 1255apn_fallback_trigger(proc_t proc, struct socket *so)
39037602
A
1256{
1257 pid_t pid = 0;
1258 struct kev_msg ev_msg;
1259 struct kev_netevent_apnfallbk_data apnfallbk_data;
1260
1261 last_apn_fallback = net_uptime();
1262 pid = proc_pid(proc);
1263 uuid_t application_uuid;
1264 uuid_clear(application_uuid);
1265 proc_getexecutableuuid(proc, application_uuid,
1266 sizeof(application_uuid));
1267
0a7de745 1268 bzero(&ev_msg, sizeof(struct kev_msg));
39037602
A
1269 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1270 ev_msg.kev_class = KEV_NETWORK_CLASS;
1271 ev_msg.kev_subclass = KEV_NETEVENT_SUBCLASS;
1272 ev_msg.event_code = KEV_NETEVENT_APNFALLBACK;
1273
1274 bzero(&apnfallbk_data, sizeof(apnfallbk_data));
d9a64523
A
1275
1276 if (so->so_flags & SOF_DELEGATED) {
1277 apnfallbk_data.epid = so->e_pid;
1278 uuid_copy(apnfallbk_data.euuid, so->e_uuid);
1279 } else {
1280 apnfallbk_data.epid = so->last_pid;
1281 uuid_copy(apnfallbk_data.euuid, so->last_uuid);
1282 }
39037602
A
1283
1284 ev_msg.dv[0].data_ptr = &apnfallbk_data;
1285 ev_msg.dv[0].data_length = sizeof(apnfallbk_data);
1286 kev_post_msg(&ev_msg);
1287 apn_fallbk_log((LOG_INFO, "APN fallback notification issued.\n"));
1288}
1289
1c79356b 1290/*
39236c6e
A
1291 * Transform old in_pcbconnect() into an inner subroutine for new
1292 * in_pcbconnect(); do some validity-checking on the remote address
1293 * (in "nam") and then determine local host address (i.e., which
1294 * interface) to use to access that remote host.
1295 *
1296 * This routine may alter the caller-supplied remote address "nam".
1c79356b 1297 *
39236c6e
A
1298 * The caller may override the bound-to-interface setting of the socket
1299 * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1300 *
1301 * This routine might return an ifp with a reference held if the caller
1302 * provides a non-NULL outif, even in the error case. The caller is
1303 * responsible for releasing its reference.
2d21ac55
A
1304 *
1305 * Returns: 0 Success
1306 * EINVAL Invalid argument
1307 * EAFNOSUPPORT Address family not supported
1308 * EADDRNOTAVAIL Address not available
1c79356b 1309 */
1c79356b 1310int
39236c6e 1311in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr,
39037602 1312 unsigned int ifscope, struct ifnet **outif, int raw)
1c79356b 1313{
39236c6e
A
1314 struct route *ro = &inp->inp_route;
1315 struct in_ifaddr *ia = NULL;
1316 struct sockaddr_in sin;
1317 int error = 0;
fe8ab488 1318 boolean_t restricted = FALSE;
39236c6e 1319
0a7de745 1320 if (outif != NULL) {
39236c6e 1321 *outif = NULL;
0a7de745
A
1322 }
1323 if (nam->sa_len != sizeof(struct sockaddr_in)) {
1324 return EINVAL;
1325 }
1326 if (SIN(nam)->sin_family != AF_INET) {
1327 return EAFNOSUPPORT;
1328 }
1329 if (raw == 0 && SIN(nam)->sin_port == 0) {
1330 return EADDRNOTAVAIL;
1331 }
b0d623f7 1332
39236c6e
A
1333 /*
1334 * If the destination address is INADDR_ANY,
1335 * use the primary local address.
1336 * If the supplied address is INADDR_BROADCAST,
1337 * and the primary interface supports broadcast,
1338 * choose the broadcast address for that interface.
1339 */
39037602
A
1340 if (raw == 0 && (SIN(nam)->sin_addr.s_addr == INADDR_ANY ||
1341 SIN(nam)->sin_addr.s_addr == (u_int32_t)INADDR_BROADCAST)) {
39236c6e
A
1342 lck_rw_lock_shared(in_ifaddr_rwlock);
1343 if (!TAILQ_EMPTY(&in_ifaddrhead)) {
1344 ia = TAILQ_FIRST(&in_ifaddrhead);
1345 IFA_LOCK_SPIN(&ia->ia_ifa);
1346 if (SIN(nam)->sin_addr.s_addr == INADDR_ANY) {
1347 SIN(nam)->sin_addr = IA_SIN(ia)->sin_addr;
1348 } else if (ia->ia_ifp->if_flags & IFF_BROADCAST) {
1349 SIN(nam)->sin_addr =
1350 SIN(&ia->ia_broadaddr)->sin_addr;
1351 }
1352 IFA_UNLOCK(&ia->ia_ifa);
1353 ia = NULL;
1354 }
1355 lck_rw_done(in_ifaddr_rwlock);
1356 }
1357 /*
1358 * Otherwise, if the socket has already bound the source, just use it.
1359 */
1360 if (inp->inp_laddr.s_addr != INADDR_ANY) {
1361 VERIFY(ia == NULL);
1362 *laddr = inp->inp_laddr;
0a7de745 1363 return 0;
1c79356b 1364 }
6d2010ae 1365
39236c6e
A
1366 /*
1367 * If the ifscope is specified by the caller (e.g. IP_PKTINFO)
1368 * then it overrides the sticky ifscope set for the socket.
1369 */
0a7de745 1370 if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF)) {
39236c6e 1371 ifscope = inp->inp_boundifp->if_index;
0a7de745 1372 }
6d2010ae 1373
39236c6e
A
1374 /*
1375 * If route is known or can be allocated now,
1376 * our src addr is taken from the i/f, else punt.
1377 * Note that we should check the address family of the cached
1378 * destination, in case of sharing the cache with IPv6.
1379 */
0a7de745 1380 if (ro->ro_rt != NULL) {
39236c6e 1381 RT_LOCK_SPIN(ro->ro_rt);
0a7de745 1382 }
39236c6e
A
1383 if (ROUTE_UNUSABLE(ro) || ro->ro_dst.sa_family != AF_INET ||
1384 SIN(&ro->ro_dst)->sin_addr.s_addr != SIN(nam)->sin_addr.s_addr ||
1385 (inp->inp_socket->so_options & SO_DONTROUTE)) {
0a7de745 1386 if (ro->ro_rt != NULL) {
b0d623f7 1387 RT_UNLOCK(ro->ro_rt);
0a7de745 1388 }
39236c6e
A
1389 ROUTE_RELEASE(ro);
1390 }
1391 if (!(inp->inp_socket->so_options & SO_DONTROUTE) &&
1392 (ro->ro_rt == NULL || ro->ro_rt->rt_ifp == NULL)) {
0a7de745 1393 if (ro->ro_rt != NULL) {
39236c6e 1394 RT_UNLOCK(ro->ro_rt);
0a7de745 1395 }
39236c6e
A
1396 ROUTE_RELEASE(ro);
1397 /* No route yet, so try to acquire one */
0a7de745 1398 bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
39236c6e 1399 ro->ro_dst.sa_family = AF_INET;
0a7de745 1400 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
39236c6e
A
1401 SIN(&ro->ro_dst)->sin_addr = SIN(nam)->sin_addr;
1402 rtalloc_scoped(ro, ifscope);
0a7de745 1403 if (ro->ro_rt != NULL) {
39236c6e 1404 RT_LOCK_SPIN(ro->ro_rt);
0a7de745 1405 }
39236c6e
A
1406 }
1407 /* Sanitized local copy for interface address searches */
0a7de745 1408 bzero(&sin, sizeof(sin));
39236c6e 1409 sin.sin_family = AF_INET;
0a7de745 1410 sin.sin_len = sizeof(struct sockaddr_in);
39236c6e
A
1411 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
1412 /*
1413 * If we did not find (or use) a route, assume dest is reachable
1414 * on a directly connected network and try to find a corresponding
1415 * interface to take the source address from.
1416 */
1417 if (ro->ro_rt == NULL) {
39037602
A
1418 proc_t proc = current_proc();
1419
39236c6e
A
1420 VERIFY(ia == NULL);
1421 ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
0a7de745 1422 if (ia == NULL) {
39236c6e 1423 ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
0a7de745 1424 }
39236c6e 1425 error = ((ia == NULL) ? ENETUNREACH : 0);
743345f9 1426
39037602 1427 if (apn_fallback_required(proc, inp->inp_socket,
0a7de745 1428 (void *)nam)) {
d9a64523 1429 apn_fallback_trigger(proc, inp->inp_socket);
0a7de745 1430 }
39037602 1431
39236c6e
A
1432 goto done;
1433 }
1434 RT_LOCK_ASSERT_HELD(ro->ro_rt);
1435 /*
1436 * If the outgoing interface on the route found is not
1437 * a loopback interface, use the address from that interface.
1438 */
1439 if (!(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK)) {
1440 VERIFY(ia == NULL);
6d2010ae
A
1441 /*
1442 * If the route points to a cellular interface and the
1443 * caller forbids our using interfaces of such type,
1444 * pretend that there is no route.
fe8ab488 1445 * Apply the same logic for expensive interfaces.
6d2010ae 1446 */
fe8ab488 1447 if (inp_restricted_send(inp, ro->ro_rt->rt_ifp)) {
39236c6e
A
1448 RT_UNLOCK(ro->ro_rt);
1449 ROUTE_RELEASE(ro);
1450 error = EHOSTUNREACH;
fe8ab488 1451 restricted = TRUE;
39236c6e 1452 } else {
6d2010ae
A
1453 /* Become a regular mutex */
1454 RT_CONVERT_LOCK(ro->ro_rt);
39236c6e
A
1455 ia = ifatoia(ro->ro_rt->rt_ifa);
1456 IFA_ADDREF(&ia->ia_ifa);
d9a64523
A
1457
1458 /*
1459 * Mark the control block for notification of
1460 * a possible flow that might undergo clat46
1461 * translation.
1462 *
1463 * We defer the decision to a later point when
1464 * inpcb is being disposed off.
1465 * The reason is that we only want to send notification
1466 * if the flow was ever used to send data.
1467 */
0a7de745 1468 if (IS_INTF_CLAT46(ro->ro_rt->rt_ifp)) {
d9a64523 1469 inp->inp_flags2 |= INP2_CLAT46_FLOW;
0a7de745 1470 }
d9a64523 1471
b0d623f7 1472 RT_UNLOCK(ro->ro_rt);
39236c6e 1473 error = 0;
91447636 1474 }
39236c6e
A
1475 goto done;
1476 }
1477 VERIFY(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK);
1478 RT_UNLOCK(ro->ro_rt);
1479 /*
1480 * The outgoing interface is marked with 'loopback net', so a route
1481 * to ourselves is here.
1482 * Try to find the interface of the destination address and then
1483 * take the address from there. That interface is not necessarily
1484 * a loopback interface.
1485 */
1486 VERIFY(ia == NULL);
1487 ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
0a7de745 1488 if (ia == NULL) {
39236c6e 1489 ia = ifatoia(ifa_ifwithaddr_scoped(SA(&sin), ifscope));
0a7de745
A
1490 }
1491 if (ia == NULL) {
39236c6e 1492 ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
0a7de745 1493 }
39236c6e
A
1494 if (ia == NULL) {
1495 RT_LOCK(ro->ro_rt);
1496 ia = ifatoia(ro->ro_rt->rt_ifa);
0a7de745 1497 if (ia != NULL) {
39236c6e 1498 IFA_ADDREF(&ia->ia_ifa);
0a7de745 1499 }
39236c6e
A
1500 RT_UNLOCK(ro->ro_rt);
1501 }
1502 error = ((ia == NULL) ? ENETUNREACH : 0);
1503
1504done:
1505 /*
1506 * If the destination address is multicast and an outgoing
1507 * interface has been set as a multicast option, use the
1508 * address of that interface as our source address.
1509 */
15129b1c 1510 if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
39236c6e
A
1511 inp->inp_moptions != NULL) {
1512 struct ip_moptions *imo;
1513 struct ifnet *ifp;
1514
1515 imo = inp->inp_moptions;
1516 IMO_LOCK(imo);
1517 if (imo->imo_multicast_ifp != NULL && (ia == NULL ||
1518 ia->ia_ifp != imo->imo_multicast_ifp)) {
1519 ifp = imo->imo_multicast_ifp;
0a7de745 1520 if (ia != NULL) {
6d2010ae 1521 IFA_REMREF(&ia->ia_ifa);
0a7de745 1522 }
39236c6e
A
1523 lck_rw_lock_shared(in_ifaddr_rwlock);
1524 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
0a7de745 1525 if (ia->ia_ifp == ifp) {
39236c6e 1526 break;
0a7de745 1527 }
6d2010ae 1528 }
0a7de745 1529 if (ia != NULL) {
39236c6e 1530 IFA_ADDREF(&ia->ia_ifa);
0a7de745 1531 }
39236c6e 1532 lck_rw_done(in_ifaddr_rwlock);
0a7de745 1533 if (ia == NULL) {
39236c6e 1534 error = EADDRNOTAVAIL;
0a7de745 1535 } else {
15129b1c 1536 error = 0;
0a7de745 1537 }
1c79356b 1538 }
39236c6e
A
1539 IMO_UNLOCK(imo);
1540 }
1541 /*
1542 * Don't do pcblookup call here; return interface in laddr
1543 * and exit to caller, that will do the lookup.
1544 */
1545 if (ia != NULL) {
1c79356b 1546 /*
39236c6e
A
1547 * If the source address belongs to a cellular interface
1548 * and the socket forbids our using interfaces of such
1549 * type, pretend that there is no source address.
fe8ab488 1550 * Apply the same logic for expensive interfaces.
1c79356b 1551 */
39236c6e 1552 IFA_LOCK_SPIN(&ia->ia_ifa);
fe8ab488 1553 if (inp_restricted_send(inp, ia->ia_ifa.ifa_ifp)) {
39236c6e
A
1554 IFA_UNLOCK(&ia->ia_ifa);
1555 error = EHOSTUNREACH;
fe8ab488 1556 restricted = TRUE;
39236c6e
A
1557 } else if (error == 0) {
1558 *laddr = ia->ia_addr.sin_addr;
1559 if (outif != NULL) {
1560 struct ifnet *ifp;
1561
0a7de745 1562 if (ro->ro_rt != NULL) {
39236c6e 1563 ifp = ro->ro_rt->rt_ifp;
0a7de745 1564 } else {
39236c6e 1565 ifp = ia->ia_ifp;
0a7de745 1566 }
39236c6e
A
1567
1568 VERIFY(ifp != NULL);
1569 IFA_CONVERT_LOCK(&ia->ia_ifa);
0a7de745
A
1570 ifnet_reference(ifp); /* for caller */
1571 if (*outif != NULL) {
39236c6e 1572 ifnet_release(*outif);
0a7de745 1573 }
39236c6e 1574 *outif = ifp;
1c79356b 1575 }
39236c6e
A
1576 IFA_UNLOCK(&ia->ia_ifa);
1577 } else {
1578 IFA_UNLOCK(&ia->ia_ifa);
1c79356b 1579 }
6d2010ae 1580 IFA_REMREF(&ia->ia_ifa);
39236c6e
A
1581 ia = NULL;
1582 }
1583
fe8ab488 1584 if (restricted && error == EHOSTUNREACH) {
39236c6e
A
1585 soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED |
1586 SO_FILT_HINT_IFDENIED));
1c79356b 1587 }
39236c6e 1588
0a7de745 1589 return error;
1c79356b
A
1590}
1591
1592/*
1593 * Outer subroutine:
1594 * Connect from a socket to a specified address.
1595 * Both address and port must be specified in argument sin.
1596 * If don't have a local address for this socket yet,
1597 * then pick one.
39236c6e
A
1598 *
1599 * The caller may override the bound-to-interface setting of the socket
1600 * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1c79356b
A
1601 */
1602int
316670eb 1603in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p,
39236c6e 1604 unsigned int ifscope, struct ifnet **outif)
1c79356b 1605{
39236c6e 1606 struct in_addr laddr;
316670eb 1607 struct sockaddr_in *sin = (struct sockaddr_in *)(void *)nam;
91447636 1608 struct inpcb *pcb;
1c79356b 1609 int error;
fe8ab488 1610 struct socket *so = inp->inp_socket;
1c79356b 1611
d9a64523 1612#if CONTENT_FILTER
0a7de745 1613 if (so) {
d9a64523 1614 so->so_state_change_cnt++;
0a7de745 1615 }
d9a64523
A
1616#endif
1617
1c79356b
A
1618 /*
1619 * Call inner routine, to assign local interface address.
1620 */
0a7de745
A
1621 if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0) {
1622 return error;
1623 }
1c79356b 1624
fe8ab488 1625 socket_unlock(so, 0);
91447636 1626 pcb = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port,
39236c6e 1627 inp->inp_laddr.s_addr ? inp->inp_laddr : laddr,
91447636 1628 inp->inp_lport, 0, NULL);
fe8ab488 1629 socket_lock(so, 0);
6d2010ae 1630
39236c6e
A
1631 /*
1632 * Check if the socket is still in a valid state. When we unlock this
1633 * embryonic socket, it can get aborted if another thread is closing
6d2010ae
A
1634 * the listener (radar 7947600).
1635 */
0a7de745
A
1636 if ((so->so_flags & SOF_ABORTED) != 0) {
1637 return ECONNREFUSED;
1638 }
6d2010ae 1639
91447636 1640 if (pcb != NULL) {
0b4c1975 1641 in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
0a7de745 1642 return EADDRINUSE;
1c79356b
A
1643 }
1644 if (inp->inp_laddr.s_addr == INADDR_ANY) {
9bccf70c 1645 if (inp->inp_lport == 0) {
39236c6e 1646 error = in_pcbbind(inp, NULL, p);
0a7de745
A
1647 if (error) {
1648 return error;
1649 }
9bccf70c 1650 }
39236c6e
A
1651 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1652 /*
1653 * Lock inversion issue, mostly with udp
1654 * multicast packets.
1655 */
fe8ab488 1656 socket_unlock(so, 0);
39236c6e 1657 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
fe8ab488 1658 socket_lock(so, 0);
91447636 1659 }
39236c6e
A
1660 inp->inp_laddr = laddr;
1661 /* no reference needed */
316670eb 1662 inp->inp_last_outifp = (outif != NULL) ? *outif : NULL;
55e303ae 1663 inp->inp_flags |= INP_INADDR_ANY;
39236c6e 1664 } else {
3e170ce0
A
1665 /*
1666 * Usage of IP_PKTINFO, without local port already
1667 * speficified will cause kernel to panic,
1668 * see rdar://problem/18508185.
1669 * For now returning error to avoid a kernel panic
1670 * This routines can be refactored and handle this better
1671 * in future.
1672 */
0a7de745
A
1673 if (inp->inp_lport == 0) {
1674 return EINVAL;
1675 }
39236c6e
A
1676 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1677 /*
1678 * Lock inversion issue, mostly with udp
1679 * multicast packets.
1680 */
fe8ab488 1681 socket_unlock(so, 0);
39236c6e 1682 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
fe8ab488 1683 socket_lock(so, 0);
91447636 1684 }
1c79356b
A
1685 }
1686 inp->inp_faddr = sin->sin_addr;
1687 inp->inp_fport = sin->sin_port;
0a7de745 1688 if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
fe8ab488 1689 nstat_pcb_invalidate_cache(inp);
0a7de745 1690 }
1c79356b 1691 in_pcbrehash(inp);
39236c6e 1692 lck_rw_done(inp->inp_pcbinfo->ipi_lock);
0a7de745 1693 return 0;
1c79356b
A
1694}
1695
1696void
2d21ac55 1697in_pcbdisconnect(struct inpcb *inp)
1c79356b 1698{
39236c6e 1699 struct socket *so = inp->inp_socket;
1c79356b 1700
0a7de745 1701 if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
fe8ab488 1702 nstat_pcb_cache(inp);
0a7de745 1703 }
fe8ab488 1704
1c79356b
A
1705 inp->inp_faddr.s_addr = INADDR_ANY;
1706 inp->inp_fport = 0;
91447636 1707
d9a64523 1708#if CONTENT_FILTER
0a7de745 1709 if (so) {
d9a64523 1710 so->so_state_change_cnt++;
0a7de745 1711 }
d9a64523
A
1712#endif
1713
39236c6e
A
1714 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1715 /* lock inversion issue, mostly with udp multicast packets */
1716 socket_unlock(so, 0);
1717 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1718 socket_lock(so, 0);
91447636
A
1719 }
1720
1c79356b 1721 in_pcbrehash(inp);
39236c6e
A
1722 lck_rw_done(inp->inp_pcbinfo->ipi_lock);
1723 /*
1724 * A multipath subflow socket would have its SS_NOFDREF set by default,
1725 * so check for SOF_MP_SUBFLOW socket flag before detaching the PCB;
1726 * when the socket is closed for real, SOF_MP_SUBFLOW would be cleared.
1727 */
0a7de745 1728 if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) {
1c79356b 1729 in_pcbdetach(inp);
0a7de745 1730 }
1c79356b
A
1731}
1732
1733void
2d21ac55 1734in_pcbdetach(struct inpcb *inp)
1c79356b
A
1735{
1736 struct socket *so = inp->inp_socket;
1c79356b 1737
39236c6e
A
1738 if (so->so_pcb == NULL) {
1739 /* PCB has been disposed */
1740 panic("%s: inp=%p so=%p proto=%d so_pcb is null!\n", __func__,
1741 inp, so, SOCK_PROTO(so));
1742 /* NOTREACHED */
91447636 1743 }
39037602 1744
1c79356b 1745#if IPSEC
39236c6e
A
1746 if (inp->inp_sp != NULL) {
1747 (void) ipsec4_delete_pcbpolicy(inp);
91447636 1748 }
39236c6e 1749#endif /* IPSEC */
39037602 1750
5ba3f43e
A
1751 if (inp->inp_stat != NULL && SOCK_PROTO(so) == IPPROTO_UDP) {
1752 if (inp->inp_stat->rxpackets == 0 && inp->inp_stat->txpackets == 0) {
1753 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_no_data);
1754 }
1755 }
1756
fe8ab488
A
1757 /*
1758 * Let NetworkStatistics know this PCB is going away
1759 * before we detach it.
1760 */
39037602 1761 if (nstat_collect &&
0a7de745 1762 (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) {
fe8ab488 1763 nstat_pcb_detach(inp);
0a7de745 1764 }
3e170ce0
A
1765
1766 /* Free memory buffer held for generating keep alives */
1767 if (inp->inp_keepalive_data != NULL) {
1768 FREE(inp->inp_keepalive_data, M_TEMP);
1769 inp->inp_keepalive_data = NULL;
1770 }
1771
91447636 1772 /* mark socket state as dead */
39236c6e
A
1773 if (in_pcb_checkstate(inp, WNT_STOPUSING, 1) != WNT_STOPUSING) {
1774 panic("%s: so=%p proto=%d couldn't set to STOPUSING\n",
1775 __func__, so, SOCK_PROTO(so));
1776 /* NOTREACHED */
1777 }
1c79356b 1778
39236c6e 1779 if (!(so->so_flags & SOF_PCBCLEARING)) {
6d2010ae 1780 struct ip_moptions *imo;
2d21ac55 1781
91447636 1782 inp->inp_vflag = 0;
39236c6e
A
1783 if (inp->inp_options != NULL) {
1784 (void) m_free(inp->inp_options);
1785 inp->inp_options = NULL;
91447636 1786 }
39236c6e 1787 ROUTE_RELEASE(&inp->inp_route);
6d2010ae 1788 imo = inp->inp_moptions;
91447636
A
1789 inp->inp_moptions = NULL;
1790 sofreelastref(so, 0);
1791 inp->inp_state = INPCB_STATE_DEAD;
d9a64523
A
1792
1793 /*
1794 * Enqueue an event to send kernel event notification
1795 * if the flow has to CLAT46 for data packets
1796 */
1797 if (inp->inp_flags2 & INP2_CLAT46_FLOW) {
1798 /*
1799 * If there has been any exchange of data bytes
1800 * over this flow.
1801 * Schedule a notification to report that flow is
1802 * using client side translation.
1803 */
1804 if (inp->inp_stat != NULL &&
1805 (inp->inp_stat->txbytes != 0 ||
0a7de745 1806 inp->inp_stat->rxbytes != 0)) {
d9a64523
A
1807 if (so->so_flags & SOF_DELEGATED) {
1808 in6_clat46_event_enqueue_nwk_wq_entry(
0a7de745
A
1809 IN6_CLAT46_EVENT_V4_FLOW,
1810 so->e_pid,
1811 so->e_uuid);
d9a64523
A
1812 } else {
1813 in6_clat46_event_enqueue_nwk_wq_entry(
0a7de745
A
1814 IN6_CLAT46_EVENT_V4_FLOW,
1815 so->last_pid,
1816 so->last_uuid);
d9a64523
A
1817 }
1818 }
1819 }
1820
39236c6e
A
1821 /* makes sure we're not called twice from so_close */
1822 so->so_flags |= SOF_PCBCLEARING;
1823
1824 inpcb_gc_sched(inp->inp_pcbinfo, INPCB_TIMER_FAST);
39037602
A
1825
1826 /*
1827 * See inp_join_group() for why we need to unlock
1828 */
1829 if (imo != NULL) {
1830 socket_unlock(so, 0);
1831 IMO_REMREF(imo);
1832 socket_lock(so, 0);
1833 }
91447636
A
1834 }
1835}
1c79356b 1836
1c79356b 1837
39236c6e
A
1838void
1839in_pcbdispose(struct inpcb *inp)
91447636
A
1840{
1841 struct socket *so = inp->inp_socket;
1842 struct inpcbinfo *ipi = inp->inp_pcbinfo;
1843
39236c6e
A
1844 if (so != NULL && so->so_usecount != 0) {
1845 panic("%s: so %p [%d,%d] usecount %d lockhistory %s\n",
1846 __func__, so, SOCK_DOM(so), SOCK_TYPE(so), so->so_usecount,
1847 solockhistory_nr(so));
1848 /* NOTREACHED */
1849 } else if (inp->inp_wantcnt != WNT_STOPUSING) {
1850 if (so != NULL) {
1851 panic_plain("%s: inp %p invalid wantcnt %d, so %p "
1852 "[%d,%d] usecount %d retaincnt %d state 0x%x "
1853 "flags 0x%x lockhistory %s\n", __func__, inp,
1854 inp->inp_wantcnt, so, SOCK_DOM(so), SOCK_TYPE(so),
1855 so->so_usecount, so->so_retaincnt, so->so_state,
1856 so->so_flags, solockhistory_nr(so));
1857 /* NOTREACHED */
1858 } else {
1859 panic("%s: inp %p invalid wantcnt %d no socket\n",
1860 __func__, inp, inp->inp_wantcnt);
1861 /* NOTREACHED */
1862 }
91447636 1863 }
91447636 1864
5ba3f43e 1865 LCK_RW_ASSERT(ipi->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
91447636
A
1866
1867 inp->inp_gencnt = ++ipi->ipi_gencnt;
316670eb 1868 /* access ipi in in_pcbremlists */
91447636 1869 in_pcbremlists(inp);
316670eb 1870
39236c6e 1871 if (so != NULL) {
91447636
A
1872 if (so->so_proto->pr_flags & PR_PCBLOCK) {
1873 sofreelastref(so, 0);
39236c6e
A
1874 if (so->so_rcv.sb_cc > 0 || so->so_snd.sb_cc > 0) {
1875 /*
1876 * selthreadclear() already called
1877 * during sofreelastref() above.
1878 */
91447636
A
1879 sbrelease(&so->so_rcv);
1880 sbrelease(&so->so_snd);
1881 }
39236c6e
A
1882 if (so->so_head != NULL) {
1883 panic("%s: so=%p head still exist\n",
1884 __func__, so);
1885 /* NOTREACHED */
1886 }
1887 lck_mtx_unlock(&inp->inpcb_mtx);
5ba3f43e
A
1888
1889#if NECP
1890 necp_inpcb_remove_cb(inp);
1891#endif /* NECP */
1892
39236c6e 1893 lck_mtx_destroy(&inp->inpcb_mtx, ipi->ipi_lock_grp);
9bccf70c 1894 }
39236c6e
A
1895 /* makes sure we're not called twice from so_close */
1896 so->so_flags |= SOF_PCBCLEARING;
1897 so->so_saved_pcb = (caddr_t)inp;
1898 so->so_pcb = NULL;
1899 inp->inp_socket = NULL;
2d21ac55
A
1900#if CONFIG_MACF_NET
1901 mac_inpcb_label_destroy(inp);
39236c6e 1902#endif /* CONFIG_MACF_NET */
39037602
A
1903#if NECP
1904 necp_inpcb_dispose(inp);
1905#endif /* NECP */
b0d623f7
A
1906 /*
1907 * In case there a route cached after a detach (possible
1908 * in the tcp case), make sure that it is freed before
1909 * we deallocate the structure.
1910 */
39236c6e 1911 ROUTE_RELEASE(&inp->inp_route);
3e170ce0 1912 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
91447636 1913 zfree(ipi->ipi_zone, inp);
55e303ae 1914 }
91447636 1915 sodealloc(so);
9bccf70c 1916 }
1c79356b
A
1917}
1918
1919/*
39236c6e 1920 * The calling convention of in_getsockaddr() and in_getpeeraddr() was
1c79356b
A
1921 * modified to match the pru_sockaddr() and pru_peeraddr() entry points
1922 * in struct pr_usrreqs, so that protocols can just reference then directly
39236c6e 1923 * without the need for a wrapper function.
1c79356b
A
1924 */
1925int
39236c6e 1926in_getsockaddr(struct socket *so, struct sockaddr **nam)
1c79356b 1927{
2d21ac55
A
1928 struct inpcb *inp;
1929 struct sockaddr_in *sin;
1c79356b
A
1930
1931 /*
1932 * Do the malloc first in case it blocks.
1933 */
0a7de745
A
1934 MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK);
1935 if (sin == NULL) {
1936 return ENOBUFS;
1937 }
1938 bzero(sin, sizeof(*sin));
1c79356b 1939 sin->sin_family = AF_INET;
0a7de745 1940 sin->sin_len = sizeof(*sin);
1c79356b 1941
39236c6e 1942 if ((inp = sotoinpcb(so)) == NULL) {
1c79356b 1943 FREE(sin, M_SONAME);
0a7de745 1944 return EINVAL;
1c79356b
A
1945 }
1946 sin->sin_port = inp->inp_lport;
1947 sin->sin_addr = inp->inp_laddr;
1c79356b
A
1948
1949 *nam = (struct sockaddr *)sin;
0a7de745 1950 return 0;
1c79356b
A
1951}
1952
1953int
5ba3f43e 1954in_getsockaddr_s(struct socket *so, struct sockaddr_in *ss)
1c79356b 1955{
5ba3f43e 1956 struct sockaddr_in *sin = ss;
1c79356b 1957 struct inpcb *inp;
1c79356b 1958
39236c6e 1959 VERIFY(ss != NULL);
0a7de745 1960 bzero(ss, sizeof(*ss));
39236c6e 1961
1c79356b 1962 sin->sin_family = AF_INET;
0a7de745 1963 sin->sin_len = sizeof(*sin);
1c79356b 1964
0a7de745
A
1965 if ((inp = sotoinpcb(so)) == NULL) {
1966 return EINVAL;
1967 }
39236c6e
A
1968
1969 sin->sin_port = inp->inp_lport;
1970 sin->sin_addr = inp->inp_laddr;
0a7de745 1971 return 0;
39236c6e
A
1972}
1973
1974int
1975in_getpeeraddr(struct socket *so, struct sockaddr **nam)
1976{
1977 struct inpcb *inp;
1978 struct sockaddr_in *sin;
1979
1980 /*
1981 * Do the malloc first in case it blocks.
1982 */
0a7de745
A
1983 MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK);
1984 if (sin == NULL) {
1985 return ENOBUFS;
1986 }
1987 bzero((caddr_t)sin, sizeof(*sin));
39236c6e 1988 sin->sin_family = AF_INET;
0a7de745 1989 sin->sin_len = sizeof(*sin);
39236c6e
A
1990
1991 if ((inp = sotoinpcb(so)) == NULL) {
1c79356b 1992 FREE(sin, M_SONAME);
0a7de745 1993 return EINVAL;
1c79356b
A
1994 }
1995 sin->sin_port = inp->inp_fport;
1996 sin->sin_addr = inp->inp_faddr;
1c79356b
A
1997
1998 *nam = (struct sockaddr *)sin;
0a7de745 1999 return 0;
39236c6e
A
2000}
2001
1c79356b 2002void
2d21ac55 2003in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr,
39236c6e 2004 int errno, void (*notify)(struct inpcb *, int))
1c79356b 2005{
91447636
A
2006 struct inpcb *inp;
2007
39236c6e 2008 lck_rw_lock_shared(pcbinfo->ipi_lock);
1c79356b 2009
39236c6e 2010 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
9bccf70c 2011#if INET6
0a7de745 2012 if (!(inp->inp_vflag & INP_IPV4)) {
1c79356b 2013 continue;
0a7de745 2014 }
39236c6e 2015#endif /* INET6 */
1c79356b 2016 if (inp->inp_faddr.s_addr != faddr.s_addr ||
0a7de745 2017 inp->inp_socket == NULL) {
39236c6e 2018 continue;
0a7de745
A
2019 }
2020 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
91447636 2021 continue;
0a7de745 2022 }
91447636 2023 socket_lock(inp->inp_socket, 1);
9bccf70c 2024 (*notify)(inp, errno);
39236c6e 2025 (void) in_pcb_checkstate(inp, WNT_RELEASE, 1);
91447636 2026 socket_unlock(inp->inp_socket, 1);
1c79356b 2027 }
39236c6e 2028 lck_rw_done(pcbinfo->ipi_lock);
1c79356b
A
2029}
2030
2031/*
2032 * Check for alternatives when higher level complains
2033 * about service problems. For now, invalidate cached
2034 * routing information. If the route was created dynamically
2035 * (by a redirect), time to try a default gateway again.
2036 */
2037void
2d21ac55 2038in_losing(struct inpcb *inp)
1c79356b 2039{
39236c6e 2040 boolean_t release = FALSE;
2d21ac55 2041 struct rtentry *rt;
1c79356b 2042
b0d623f7 2043 if ((rt = inp->inp_route.ro_rt) != NULL) {
39236c6e 2044 struct in_ifaddr *ia = NULL;
b0d623f7 2045
b0d623f7 2046 RT_LOCK(rt);
b0d623f7
A
2047 if (rt->rt_flags & RTF_DYNAMIC) {
2048 /*
2049 * Prevent another thread from modifying rt_key,
2050 * rt_gateway via rt_setgate() after rt_lock is
2051 * dropped by marking the route as defunct.
2052 */
2053 rt->rt_flags |= RTF_CONDEMNED;
2054 RT_UNLOCK(rt);
2055 (void) rtrequest(RTM_DELETE, rt_key(rt),
39236c6e 2056 rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
b0d623f7
A
2057 } else {
2058 RT_UNLOCK(rt);
2059 }
2d21ac55 2060 /* if the address is gone keep the old route in the pcb */
39236c6e
A
2061 if (inp->inp_laddr.s_addr != INADDR_ANY &&
2062 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
2063 /*
2064 * Address is around; ditch the route. A new route
2065 * can be allocated the next time output is attempted.
2066 */
2067 release = TRUE;
2d21ac55 2068 }
0a7de745 2069 if (ia != NULL) {
39236c6e 2070 IFA_REMREF(&ia->ia_ifa);
0a7de745 2071 }
1c79356b 2072 }
0a7de745 2073 if (rt == NULL || release) {
39236c6e 2074 ROUTE_RELEASE(&inp->inp_route);
0a7de745 2075 }
1c79356b
A
2076}
2077
2078/*
2079 * After a routing change, flush old routing
2080 * and allocate a (hopefully) better one.
2081 */
9bccf70c 2082void
39236c6e 2083in_rtchange(struct inpcb *inp, int errno)
1c79356b 2084{
39236c6e
A
2085#pragma unused(errno)
2086 boolean_t release = FALSE;
2d21ac55
A
2087 struct rtentry *rt;
2088
2089 if ((rt = inp->inp_route.ro_rt) != NULL) {
39236c6e 2090 struct in_ifaddr *ia = NULL;
b0d623f7 2091
39236c6e
A
2092 /* if address is gone, keep the old route */
2093 if (inp->inp_laddr.s_addr != INADDR_ANY &&
2094 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
2095 /*
2096 * Address is around; ditch the route. A new route
2097 * can be allocated the next time output is attempted.
2098 */
2099 release = TRUE;
2d21ac55 2100 }
0a7de745 2101 if (ia != NULL) {
39236c6e 2102 IFA_REMREF(&ia->ia_ifa);
0a7de745 2103 }
1c79356b 2104 }
0a7de745 2105 if (rt == NULL || release) {
39236c6e 2106 ROUTE_RELEASE(&inp->inp_route);
0a7de745 2107 }
1c79356b
A
2108}
2109
2110/*
2111 * Lookup a PCB based on the local address and port.
2112 */
2113struct inpcb *
2d21ac55 2114in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
39236c6e 2115 unsigned int lport_arg, int wild_okay)
1c79356b 2116{
2d21ac55 2117 struct inpcb *inp;
1c79356b
A
2118 int matchwild = 3, wildcard;
2119 u_short lport = lport_arg;
2120
39236c6e 2121 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_START, 0, 0, 0, 0, 0);
1c79356b
A
2122
2123 if (!wild_okay) {
2124 struct inpcbhead *head;
2125 /*
2126 * Look for an unconnected (wildcard foreign addr) PCB that
2127 * matches the local address and port we're looking for.
2128 */
39236c6e
A
2129 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2130 pcbinfo->ipi_hashmask)];
9bccf70c
A
2131 LIST_FOREACH(inp, head, inp_hash) {
2132#if INET6
0a7de745 2133 if (!(inp->inp_vflag & INP_IPV4)) {
1c79356b 2134 continue;
0a7de745 2135 }
39236c6e 2136#endif /* INET6 */
1c79356b
A
2137 if (inp->inp_faddr.s_addr == INADDR_ANY &&
2138 inp->inp_laddr.s_addr == laddr.s_addr &&
2139 inp->inp_lport == lport) {
2140 /*
2141 * Found.
2142 */
0a7de745 2143 return inp;
1c79356b
A
2144 }
2145 }
2146 /*
2147 * Not found.
2148 */
39236c6e 2149 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0, 0, 0, 0, 0);
0a7de745 2150 return NULL;
1c79356b
A
2151 } else {
2152 struct inpcbporthead *porthash;
2153 struct inpcbport *phd;
2154 struct inpcb *match = NULL;
2155 /*
2156 * Best fit PCB lookup.
2157 *
2158 * First see if this local port is in use by looking on the
2159 * port hash list.
2160 */
39236c6e
A
2161 porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
2162 pcbinfo->ipi_porthashmask)];
9bccf70c 2163 LIST_FOREACH(phd, porthash, phd_hash) {
0a7de745 2164 if (phd->phd_port == lport) {
1c79356b 2165 break;
0a7de745 2166 }
1c79356b
A
2167 }
2168 if (phd != NULL) {
2169 /*
2170 * Port is in use by one or more PCBs. Look for best
2171 * fit.
2172 */
9bccf70c 2173 LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
1c79356b 2174 wildcard = 0;
9bccf70c 2175#if INET6
0a7de745 2176 if (!(inp->inp_vflag & INP_IPV4)) {
1c79356b 2177 continue;
0a7de745 2178 }
39236c6e 2179#endif /* INET6 */
0a7de745 2180 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1c79356b 2181 wildcard++;
0a7de745 2182 }
1c79356b 2183 if (inp->inp_laddr.s_addr != INADDR_ANY) {
0a7de745 2184 if (laddr.s_addr == INADDR_ANY) {
1c79356b 2185 wildcard++;
0a7de745
A
2186 } else if (inp->inp_laddr.s_addr !=
2187 laddr.s_addr) {
1c79356b 2188 continue;
0a7de745 2189 }
1c79356b 2190 } else {
0a7de745 2191 if (laddr.s_addr != INADDR_ANY) {
1c79356b 2192 wildcard++;
0a7de745 2193 }
1c79356b
A
2194 }
2195 if (wildcard < matchwild) {
2196 match = inp;
2197 matchwild = wildcard;
2198 if (matchwild == 0) {
2199 break;
2200 }
2201 }
2202 }
2203 }
39236c6e
A
2204 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match,
2205 0, 0, 0, 0);
0a7de745 2206 return match;
1c79356b
A
2207 }
2208}
2209
6d2010ae
A
2210/*
2211 * Check if PCB exists in hash list.
2212 */
2213int
39236c6e
A
2214in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2215 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2216 uid_t *uid, gid_t *gid, struct ifnet *ifp)
6d2010ae
A
2217{
2218 struct inpcbhead *head;
2219 struct inpcb *inp;
2220 u_short fport = fport_arg, lport = lport_arg;
39236c6e
A
2221 int found = 0;
2222 struct inpcb *local_wild = NULL;
2223#if INET6
2224 struct inpcb *local_wild_mapped = NULL;
2225#endif /* INET6 */
6d2010ae
A
2226
2227 *uid = UID_MAX;
2228 *gid = GID_MAX;
316670eb 2229
6d2010ae
A
2230 /*
2231 * We may have found the pcb in the last lookup - check this first.
2232 */
2233
39236c6e 2234 lck_rw_lock_shared(pcbinfo->ipi_lock);
6d2010ae
A
2235
2236 /*
2237 * First look for an exact match.
2238 */
39236c6e
A
2239 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2240 pcbinfo->ipi_hashmask)];
6d2010ae
A
2241 LIST_FOREACH(inp, head, inp_hash) {
2242#if INET6
0a7de745 2243 if (!(inp->inp_vflag & INP_IPV4)) {
6d2010ae 2244 continue;
0a7de745 2245 }
39236c6e 2246#endif /* INET6 */
0a7de745 2247 if (inp_restricted_recv(inp, ifp)) {
316670eb 2248 continue;
0a7de745 2249 }
316670eb 2250
cb323159
A
2251#if NECP
2252 if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2253 continue;
2254 }
2255#endif /* NECP */
2256
6d2010ae
A
2257 if (inp->inp_faddr.s_addr == faddr.s_addr &&
2258 inp->inp_laddr.s_addr == laddr.s_addr &&
2259 inp->inp_fport == fport &&
2260 inp->inp_lport == lport) {
2261 if ((found = (inp->inp_socket != NULL))) {
2262 /*
2263 * Found.
2264 */
316670eb 2265 *uid = kauth_cred_getuid(
0a7de745 2266 inp->inp_socket->so_cred);
316670eb 2267 *gid = kauth_cred_getgid(
0a7de745 2268 inp->inp_socket->so_cred);
6d2010ae 2269 }
39236c6e 2270 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2271 return found;
6d2010ae
A
2272 }
2273 }
6d2010ae 2274
39236c6e
A
2275 if (!wildcard) {
2276 /*
2277 * Not found.
2278 */
2279 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2280 return 0;
39236c6e 2281 }
316670eb 2282
39236c6e
A
2283 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2284 pcbinfo->ipi_hashmask)];
2285 LIST_FOREACH(inp, head, inp_hash) {
6d2010ae 2286#if INET6
0a7de745 2287 if (!(inp->inp_vflag & INP_IPV4)) {
39236c6e 2288 continue;
0a7de745 2289 }
6d2010ae 2290#endif /* INET6 */
0a7de745 2291 if (inp_restricted_recv(inp, ifp)) {
39236c6e 2292 continue;
0a7de745 2293 }
39236c6e 2294
cb323159
A
2295#if NECP
2296 if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2297 continue;
2298 }
2299#endif /* NECP */
2300
39236c6e
A
2301 if (inp->inp_faddr.s_addr == INADDR_ANY &&
2302 inp->inp_lport == lport) {
2303 if (inp->inp_laddr.s_addr == laddr.s_addr) {
2304 if ((found = (inp->inp_socket != NULL))) {
316670eb 2305 *uid = kauth_cred_getuid(
0a7de745 2306 inp->inp_socket->so_cred);
316670eb 2307 *gid = kauth_cred_getgid(
0a7de745 2308 inp->inp_socket->so_cred);
6d2010ae 2309 }
39236c6e 2310 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2311 return found;
39236c6e
A
2312 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2313#if INET6
2314 if (inp->inp_socket &&
0a7de745 2315 SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
39236c6e 2316 local_wild_mapped = inp;
0a7de745 2317 } else
6d2010ae 2318#endif /* INET6 */
0a7de745 2319 local_wild = inp;
39236c6e 2320 }
6d2010ae 2321 }
39236c6e
A
2322 }
2323 if (local_wild == NULL) {
2324#if INET6
2325 if (local_wild_mapped != NULL) {
2326 if ((found = (local_wild_mapped->inp_socket != NULL))) {
316670eb 2327 *uid = kauth_cred_getuid(
0a7de745 2328 local_wild_mapped->inp_socket->so_cred);
316670eb 2329 *gid = kauth_cred_getgid(
0a7de745 2330 local_wild_mapped->inp_socket->so_cred);
6d2010ae 2331 }
39236c6e 2332 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2333 return found;
6d2010ae 2334 }
39236c6e
A
2335#endif /* INET6 */
2336 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2337 return 0;
6d2010ae 2338 }
39236c6e
A
2339 if ((found = (local_wild->inp_socket != NULL))) {
2340 *uid = kauth_cred_getuid(
0a7de745 2341 local_wild->inp_socket->so_cred);
39236c6e 2342 *gid = kauth_cred_getgid(
0a7de745 2343 local_wild->inp_socket->so_cred);
39236c6e
A
2344 }
2345 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2346 return found;
6d2010ae
A
2347}
2348
1c79356b
A
2349/*
2350 * Lookup PCB in hash list.
2351 */
2352struct inpcb *
39236c6e
A
2353in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2354 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2355 struct ifnet *ifp)
1c79356b
A
2356{
2357 struct inpcbhead *head;
2d21ac55 2358 struct inpcb *inp;
1c79356b 2359 u_short fport = fport_arg, lport = lport_arg;
39236c6e
A
2360 struct inpcb *local_wild = NULL;
2361#if INET6
2362 struct inpcb *local_wild_mapped = NULL;
2363#endif /* INET6 */
1c79356b
A
2364
2365 /*
2366 * We may have found the pcb in the last lookup - check this first.
2367 */
2368
39236c6e 2369 lck_rw_lock_shared(pcbinfo->ipi_lock);
1c79356b
A
2370
2371 /*
2372 * First look for an exact match.
2373 */
39236c6e
A
2374 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2375 pcbinfo->ipi_hashmask)];
9bccf70c
A
2376 LIST_FOREACH(inp, head, inp_hash) {
2377#if INET6
0a7de745 2378 if (!(inp->inp_vflag & INP_IPV4)) {
1c79356b 2379 continue;
0a7de745 2380 }
39236c6e 2381#endif /* INET6 */
0a7de745 2382 if (inp_restricted_recv(inp, ifp)) {
316670eb 2383 continue;
0a7de745 2384 }
316670eb 2385
cb323159
A
2386#if NECP
2387 if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2388 continue;
2389 }
2390#endif /* NECP */
2391
1c79356b
A
2392 if (inp->inp_faddr.s_addr == faddr.s_addr &&
2393 inp->inp_laddr.s_addr == laddr.s_addr &&
2394 inp->inp_fport == fport &&
2395 inp->inp_lport == lport) {
2396 /*
2397 * Found.
2398 */
39236c6e
A
2399 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2400 WNT_STOPUSING) {
2401 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2402 return inp;
39236c6e
A
2403 } else {
2404 /* it's there but dead, say it isn't found */
2405 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2406 return NULL;
91447636 2407 }
1c79356b
A
2408 }
2409 }
1c79356b 2410
39236c6e
A
2411 if (!wildcard) {
2412 /*
2413 * Not found.
2414 */
2415 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2416 return NULL;
39236c6e
A
2417 }
2418
2419 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2420 pcbinfo->ipi_hashmask)];
2421 LIST_FOREACH(inp, head, inp_hash) {
9bccf70c 2422#if INET6
0a7de745 2423 if (!(inp->inp_vflag & INP_IPV4)) {
39236c6e 2424 continue;
0a7de745 2425 }
39236c6e 2426#endif /* INET6 */
0a7de745 2427 if (inp_restricted_recv(inp, ifp)) {
39236c6e 2428 continue;
0a7de745 2429 }
39236c6e 2430
cb323159
A
2431#if NECP
2432 if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2433 continue;
2434 }
2435#endif /* NECP */
2436
39236c6e
A
2437 if (inp->inp_faddr.s_addr == INADDR_ANY &&
2438 inp->inp_lport == lport) {
2439 if (inp->inp_laddr.s_addr == laddr.s_addr) {
2440 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2441 WNT_STOPUSING) {
2442 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2443 return inp;
39236c6e
A
2444 } else {
2445 /* it's dead; say it isn't found */
2446 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2447 return NULL;
91447636 2448 }
39236c6e 2449 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2d21ac55 2450#if INET6
0a7de745 2451 if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
39236c6e 2452 local_wild_mapped = inp;
0a7de745 2453 } else
2d21ac55 2454#endif /* INET6 */
0a7de745 2455 local_wild = inp;
1c79356b
A
2456 }
2457 }
39236c6e
A
2458 }
2459 if (local_wild == NULL) {
2d21ac55 2460#if INET6
39236c6e
A
2461 if (local_wild_mapped != NULL) {
2462 if (in_pcb_checkstate(local_wild_mapped,
2463 WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2464 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2465 return local_wild_mapped;
39236c6e
A
2466 } else {
2467 /* it's dead; say it isn't found */
2468 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2469 return NULL;
91447636 2470 }
91447636 2471 }
39236c6e
A
2472#endif /* INET6 */
2473 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2474 return NULL;
39236c6e
A
2475 }
2476 if (in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2477 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2478 return local_wild;
1c79356b 2479 }
1c79356b 2480 /*
39236c6e 2481 * It's either not found or is already dead.
1c79356b 2482 */
39236c6e 2483 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2484 return NULL;
1c79356b
A
2485}
2486
2487/*
4bd07ac2
A
2488 * @brief Insert PCB onto various hash lists.
2489 *
2490 * @param inp Pointer to internet protocol control block
2491 * @param locked Implies if ipi_lock (protecting pcb list)
0a7de745 2492 * is already locked or not.
4bd07ac2
A
2493 *
2494 * @return int error on failure and 0 on success
1c79356b
A
2495 */
2496int
2d21ac55 2497in_pcbinshash(struct inpcb *inp, int locked)
1c79356b
A
2498{
2499 struct inpcbhead *pcbhash;
2500 struct inpcbporthead *pcbporthash;
2501 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2502 struct inpcbport *phd;
2503 u_int32_t hashkey_faddr;
2504
39236c6e
A
2505 if (!locked) {
2506 if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) {
2507 /*
2508 * Lock inversion issue, mostly with udp
2509 * multicast packets
2510 */
2511 socket_unlock(inp->inp_socket, 0);
2512 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
2513 socket_lock(inp->inp_socket, 0);
39236c6e
A
2514 }
2515 }
b0d623f7 2516
4bd07ac2
A
2517 /*
2518 * This routine or its caller may have given up
2519 * socket's protocol lock briefly.
2520 * During that time the socket may have been dropped.
2521 * Safe-guarding against that.
2522 */
2523 if (inp->inp_state == INPCB_STATE_DEAD) {
2524 if (!locked) {
2525 lck_rw_done(pcbinfo->ipi_lock);
2526 }
0a7de745 2527 return ECONNABORTED;
4bd07ac2
A
2528 }
2529
2530
1c79356b 2531#if INET6
0a7de745 2532 if (inp->inp_vflag & INP_IPV6) {
1c79356b 2533 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
0a7de745 2534 } else
1c79356b 2535#endif /* INET6 */
0a7de745 2536 hashkey_faddr = inp->inp_faddr.s_addr;
1c79356b 2537
39236c6e
A
2538 inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2539 inp->inp_fport, pcbinfo->ipi_hashmask);
91447636 2540
39236c6e 2541 pcbhash = &pcbinfo->ipi_hashbase[inp->inp_hash_element];
1c79356b 2542
39236c6e
A
2543 pcbporthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(inp->inp_lport,
2544 pcbinfo->ipi_porthashmask)];
1c79356b
A
2545
2546 /*
2547 * Go through port list and look for a head for this lport.
2548 */
9bccf70c 2549 LIST_FOREACH(phd, pcbporthash, phd_hash) {
0a7de745 2550 if (phd->phd_port == inp->inp_lport) {
1c79356b 2551 break;
0a7de745 2552 }
1c79356b 2553 }
316670eb 2554
1c79356b
A
2555 /*
2556 * If none exists, malloc one and tack it on.
2557 */
2558 if (phd == NULL) {
0a7de745 2559 MALLOC(phd, struct inpcbport *, sizeof(struct inpcbport),
39236c6e 2560 M_PCB, M_WAITOK);
1c79356b 2561 if (phd == NULL) {
0a7de745 2562 if (!locked) {
39236c6e 2563 lck_rw_done(pcbinfo->ipi_lock);
0a7de745
A
2564 }
2565 return ENOBUFS; /* XXX */
1c79356b
A
2566 }
2567 phd->phd_port = inp->inp_lport;
2568 LIST_INIT(&phd->phd_pcblist);
2569 LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
2570 }
fe8ab488
A
2571
2572 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
5ba3f43e
A
2573
2574
1c79356b
A
2575 inp->inp_phd = phd;
2576 LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
2577 LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
fe8ab488
A
2578 inp->inp_flags2 |= INP2_INHASHLIST;
2579
0a7de745 2580 if (!locked) {
39236c6e 2581 lck_rw_done(pcbinfo->ipi_lock);
0a7de745 2582 }
39037602 2583
fe8ab488
A
2584#if NECP
2585 // This call catches the original setting of the local address
2586 inp_update_necp_policy(inp, NULL, NULL, 0);
2587#endif /* NECP */
39037602 2588
0a7de745 2589 return 0;
1c79356b
A
2590}
2591
2592/*
2593 * Move PCB to the proper hash bucket when { faddr, fport } have been
2594 * changed. NOTE: This does not handle the case of the lport changing (the
2595 * hashed port list would have to be updated as well), so the lport must
2596 * not change after in_pcbinshash() has been called.
2597 */
2598void
2d21ac55 2599in_pcbrehash(struct inpcb *inp)
1c79356b
A
2600{
2601 struct inpcbhead *head;
2602 u_int32_t hashkey_faddr;
2603
2604#if INET6
0a7de745 2605 if (inp->inp_vflag & INP_IPV6) {
1c79356b 2606 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
0a7de745 2607 } else
1c79356b 2608#endif /* INET6 */
0a7de745 2609 hashkey_faddr = inp->inp_faddr.s_addr;
39236c6e
A
2610
2611 inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2612 inp->inp_fport, inp->inp_pcbinfo->ipi_hashmask);
2613 head = &inp->inp_pcbinfo->ipi_hashbase[inp->inp_hash_element];
1c79356b 2614
fe8ab488
A
2615 if (inp->inp_flags2 & INP2_INHASHLIST) {
2616 LIST_REMOVE(inp, inp_hash);
2617 inp->inp_flags2 &= ~INP2_INHASHLIST;
2618 }
2619
2620 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
1c79356b 2621 LIST_INSERT_HEAD(head, inp, inp_hash);
fe8ab488 2622 inp->inp_flags2 |= INP2_INHASHLIST;
39037602 2623
fe8ab488
A
2624#if NECP
2625 // This call catches updates to the remote addresses
2626 inp_update_necp_policy(inp, NULL, NULL, 0);
2627#endif /* NECP */
1c79356b
A
2628}
2629
2630/*
2631 * Remove PCB from various lists.
316670eb 2632 * Must be called pcbinfo lock is held in exclusive mode.
1c79356b
A
2633 */
2634void
2d21ac55 2635in_pcbremlists(struct inpcb *inp)
1c79356b
A
2636{
2637 inp->inp_gencnt = ++inp->inp_pcbinfo->ipi_gencnt;
1c79356b 2638
fe8ab488
A
2639 /*
2640 * Check if it's in hashlist -- an inp is placed in hashlist when
39037602 2641 * it's local port gets assigned. So it should also be present
fe8ab488
A
2642 * in the port list.
2643 */
2644 if (inp->inp_flags2 & INP2_INHASHLIST) {
1c79356b
A
2645 struct inpcbport *phd = inp->inp_phd;
2646
fe8ab488
A
2647 VERIFY(phd != NULL && inp->inp_lport > 0);
2648
1c79356b 2649 LIST_REMOVE(inp, inp_hash);
fe8ab488
A
2650 inp->inp_hash.le_next = NULL;
2651 inp->inp_hash.le_prev = NULL;
2652
1c79356b 2653 LIST_REMOVE(inp, inp_portlist);
fe8ab488
A
2654 inp->inp_portlist.le_next = NULL;
2655 inp->inp_portlist.le_prev = NULL;
2656 if (LIST_EMPTY(&phd->phd_pcblist)) {
1c79356b
A
2657 LIST_REMOVE(phd, phd_hash);
2658 FREE(phd, M_PCB);
2659 }
fe8ab488
A
2660 inp->inp_phd = NULL;
2661 inp->inp_flags2 &= ~INP2_INHASHLIST;
1c79356b 2662 }
fe8ab488 2663 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
39236c6e
A
2664
2665 if (inp->inp_flags2 & INP2_TIMEWAIT) {
2666 /* Remove from time-wait queue */
2667 tcp_remove_from_time_wait(inp);
2668 inp->inp_flags2 &= ~INP2_TIMEWAIT;
2669 VERIFY(inp->inp_pcbinfo->ipi_twcount != 0);
2670 inp->inp_pcbinfo->ipi_twcount--;
2671 } else {
2672 /* Remove from global inp list if it is not time-wait */
2673 LIST_REMOVE(inp, inp_list);
2674 }
316670eb 2675
bd504ef0 2676 if (inp->inp_flags2 & INP2_IN_FCTREE) {
0a7de745 2677 inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED | INPFC_REMOVE));
bd504ef0
A
2678 VERIFY(!(inp->inp_flags2 & INP2_IN_FCTREE));
2679 }
39236c6e 2680
1c79356b
A
2681 inp->inp_pcbinfo->ipi_count--;
2682}
2683
39236c6e
A
2684/*
2685 * Mechanism used to defer the memory release of PCBs
2686 * The pcb list will contain the pcb until the reaper can clean it up if
2687 * the following conditions are met:
2688 * 1) state "DEAD",
2689 * 2) wantcnt is STOPUSING
2690 * 3) usecount is 0
91447636 2691 * This function will be called to either mark the pcb as
39236c6e 2692 */
91447636
A
2693int
2694in_pcb_checkstate(struct inpcb *pcb, int mode, int locked)
91447636 2695{
39236c6e 2696 volatile UInt32 *wantcnt = (volatile UInt32 *)&pcb->inp_wantcnt;
2d21ac55
A
2697 UInt32 origwant;
2698 UInt32 newwant;
91447636
A
2699
2700 switch (mode) {
39236c6e
A
2701 case WNT_STOPUSING:
2702 /*
2703 * Try to mark the pcb as ready for recycling. CAS with
2704 * STOPUSING, if success we're good, if it's in use, will
2705 * be marked later
2706 */
0a7de745 2707 if (locked == 0) {
39236c6e 2708 socket_lock(pcb->inp_socket, 1);
0a7de745 2709 }
39236c6e 2710 pcb->inp_state = INPCB_STATE_DEAD;
91447636 2711
39236c6e
A
2712stopusing:
2713 if (pcb->inp_socket->so_usecount < 0) {
2714 panic("%s: pcb=%p so=%p usecount is negative\n",
2715 __func__, pcb, pcb->inp_socket);
2716 /* NOTREACHED */
2717 }
0a7de745 2718 if (locked == 0) {
39236c6e 2719 socket_unlock(pcb->inp_socket, 1);
0a7de745 2720 }
91447636 2721
39236c6e 2722 inpcb_gc_sched(pcb->inp_pcbinfo, INPCB_TIMER_FAST);
6d2010ae 2723
39236c6e 2724 origwant = *wantcnt;
0a7de745
A
2725 if ((UInt16) origwant == 0xffff) { /* should stop using */
2726 return WNT_STOPUSING;
2727 }
39236c6e
A
2728 newwant = 0xffff;
2729 if ((UInt16) origwant == 0) {
2730 /* try to mark it as unsuable now */
2731 OSCompareAndSwap(origwant, newwant, wantcnt);
2732 }
0a7de745 2733 return WNT_STOPUSING;
91447636 2734
39236c6e
A
2735 case WNT_ACQUIRE:
2736 /*
2737 * Try to increase reference to pcb. If WNT_STOPUSING
2738 * should bail out. If socket state DEAD, try to set count
2739 * to STOPUSING, return failed otherwise increase cnt.
2740 */
2741 do {
91447636 2742 origwant = *wantcnt;
39236c6e
A
2743 if ((UInt16) origwant == 0xffff) {
2744 /* should stop using */
0a7de745 2745 return WNT_STOPUSING;
91447636 2746 }
39236c6e
A
2747 newwant = origwant + 1;
2748 } while (!OSCompareAndSwap(origwant, newwant, wantcnt));
0a7de745 2749 return WNT_ACQUIRE;
91447636 2750
39236c6e
A
2751 case WNT_RELEASE:
2752 /*
2753 * Release reference. If result is null and pcb state
2754 * is DEAD, set wanted bit to STOPUSING
2755 */
0a7de745 2756 if (locked == 0) {
39236c6e 2757 socket_lock(pcb->inp_socket, 1);
0a7de745 2758 }
91447636 2759
39236c6e
A
2760 do {
2761 origwant = *wantcnt;
2762 if ((UInt16) origwant == 0x0) {
2763 panic("%s: pcb=%p release with zero count",
2764 __func__, pcb);
2765 /* NOTREACHED */
2766 }
2767 if ((UInt16) origwant == 0xffff) {
2768 /* should stop using */
0a7de745 2769 if (locked == 0) {
39236c6e 2770 socket_unlock(pcb->inp_socket, 1);
0a7de745
A
2771 }
2772 return WNT_STOPUSING;
39236c6e
A
2773 }
2774 newwant = origwant - 1;
2775 } while (!OSCompareAndSwap(origwant, newwant, wantcnt));
2776
0a7de745 2777 if (pcb->inp_state == INPCB_STATE_DEAD) {
39236c6e 2778 goto stopusing;
0a7de745 2779 }
39236c6e
A
2780 if (pcb->inp_socket->so_usecount < 0) {
2781 panic("%s: RELEASE pcb=%p so=%p usecount is negative\n",
2782 __func__, pcb, pcb->inp_socket);
2783 /* NOTREACHED */
2784 }
91447636 2785
0a7de745 2786 if (locked == 0) {
39236c6e 2787 socket_unlock(pcb->inp_socket, 1);
0a7de745
A
2788 }
2789 return WNT_RELEASE;
91447636 2790
39236c6e
A
2791 default:
2792 panic("%s: so=%p not a valid state =%x\n", __func__,
2793 pcb->inp_socket, mode);
2794 /* NOTREACHED */
91447636
A
2795 }
2796
2797 /* NOTREACHED */
0a7de745 2798 return mode;
91447636
A
2799}
2800
2801/*
2802 * inpcb_to_compat copies specific bits of an inpcb to a inpcb_compat.
2803 * The inpcb_compat data structure is passed to user space and must
b0d623f7 2804 * not change. We intentionally avoid copying pointers.
91447636
A
2805 */
2806void
39236c6e 2807inpcb_to_compat(struct inpcb *inp, struct inpcb_compat *inp_compat)
91447636 2808{
0a7de745 2809 bzero(inp_compat, sizeof(*inp_compat));
91447636
A
2810 inp_compat->inp_fport = inp->inp_fport;
2811 inp_compat->inp_lport = inp->inp_lport;
316670eb 2812 inp_compat->nat_owner = 0;
39236c6e 2813 inp_compat->nat_cookie = 0;
91447636
A
2814 inp_compat->inp_gencnt = inp->inp_gencnt;
2815 inp_compat->inp_flags = inp->inp_flags;
2816 inp_compat->inp_flow = inp->inp_flow;
2817 inp_compat->inp_vflag = inp->inp_vflag;
2818 inp_compat->inp_ip_ttl = inp->inp_ip_ttl;
2819 inp_compat->inp_ip_p = inp->inp_ip_p;
39236c6e
A
2820 inp_compat->inp_dependfaddr.inp6_foreign =
2821 inp->inp_dependfaddr.inp6_foreign;
2822 inp_compat->inp_dependladdr.inp6_local =
2823 inp->inp_dependladdr.inp6_local;
91447636 2824 inp_compat->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
39236c6e 2825 inp_compat->inp_depend6.inp6_hlim = 0;
91447636 2826 inp_compat->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
39236c6e 2827 inp_compat->inp_depend6.inp6_ifindex = 0;
91447636
A
2828 inp_compat->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
2829}
9bccf70c 2830
5ba3f43e 2831#if !CONFIG_EMBEDDED
b0d623f7 2832void
39236c6e 2833inpcb_to_xinpcb64(struct inpcb *inp, struct xinpcb64 *xinp)
b0d623f7 2834{
6d2010ae
A
2835 xinp->inp_fport = inp->inp_fport;
2836 xinp->inp_lport = inp->inp_lport;
2837 xinp->inp_gencnt = inp->inp_gencnt;
2838 xinp->inp_flags = inp->inp_flags;
2839 xinp->inp_flow = inp->inp_flow;
2840 xinp->inp_vflag = inp->inp_vflag;
2841 xinp->inp_ip_ttl = inp->inp_ip_ttl;
2842 xinp->inp_ip_p = inp->inp_ip_p;
2843 xinp->inp_dependfaddr.inp6_foreign = inp->inp_dependfaddr.inp6_foreign;
2844 xinp->inp_dependladdr.inp6_local = inp->inp_dependladdr.inp6_local;
2845 xinp->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
39236c6e 2846 xinp->inp_depend6.inp6_hlim = 0;
6d2010ae 2847 xinp->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
39236c6e 2848 xinp->inp_depend6.inp6_ifindex = 0;
6d2010ae 2849 xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
b0d623f7 2850}
5ba3f43e 2851#endif /* !CONFIG_EMBEDDED */
b0d623f7 2852
b0d623f7
A
2853/*
2854 * The following routines implement this scheme:
2855 *
2856 * Callers of ip_output() that intend to cache the route in the inpcb pass
2857 * a local copy of the struct route to ip_output(). Using a local copy of
2858 * the cached route significantly simplifies things as IP no longer has to
2859 * worry about having exclusive access to the passed in struct route, since
2860 * it's defined in the caller's stack; in essence, this allows for a lock-
2861 * less operation when updating the struct route at the IP level and below,
2862 * whenever necessary. The scheme works as follows:
2863 *
2864 * Prior to dropping the socket's lock and calling ip_output(), the caller
2865 * copies the struct route from the inpcb into its stack, and adds a reference
2866 * to the cached route entry, if there was any. The socket's lock is then
2867 * dropped and ip_output() is called with a pointer to the copy of struct
2868 * route defined on the stack (not to the one in the inpcb.)
2869 *
2870 * Upon returning from ip_output(), the caller then acquires the socket's
2871 * lock and synchronizes the cache; if there is no route cached in the inpcb,
2872 * it copies the local copy of struct route (which may or may not contain any
2873 * route) back into the cache; otherwise, if the inpcb has a route cached in
2874 * it, the one in the local copy will be freed, if there's any. Trashing the
2875 * cached route in the inpcb can be avoided because ip_output() is single-
2876 * threaded per-PCB (i.e. multiple transmits on a PCB are always serialized
2877 * by the socket/transport layer.)
2878 */
2879void
2880inp_route_copyout(struct inpcb *inp, struct route *dst)
2881{
2882 struct route *src = &inp->inp_route;
2883
5ba3f43e 2884 socket_lock_assert_owned(inp->inp_socket);
b0d623f7 2885
0b4c1975 2886 /*
39236c6e 2887 * If the route in the PCB is stale or not for IPv4, blow it away;
0b4c1975
A
2888 * this is possible in the case of IPv4-mapped address case.
2889 */
0a7de745 2890 if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET) {
39236c6e 2891 ROUTE_RELEASE(src);
0a7de745 2892 }
316670eb 2893
0a7de745 2894 route_copyout(dst, src, sizeof(*dst));
b0d623f7
A
2895}
2896
2897void
2898inp_route_copyin(struct inpcb *inp, struct route *src)
2899{
2900 struct route *dst = &inp->inp_route;
2901
5ba3f43e 2902 socket_lock_assert_owned(inp->inp_socket);
b0d623f7
A
2903
2904 /* Minor sanity check */
0a7de745 2905 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
b0d623f7 2906 panic("%s: wrong or corrupted route: %p", __func__, src);
0a7de745 2907 }
b0d623f7 2908
0a7de745 2909 route_copyin(src, dst, sizeof(*src));
6d2010ae
A
2910}
2911
2912/*
39037602 2913 * Handler for setting IP_BOUND_IF/IPV6_BOUND_IF socket option.
6d2010ae 2914 */
316670eb 2915int
39236c6e 2916inp_bindif(struct inpcb *inp, unsigned int ifscope, struct ifnet **pifp)
6d2010ae 2917{
316670eb
A
2918 struct ifnet *ifp = NULL;
2919
2920 ifnet_head_lock_shared();
2921 if ((ifscope > (unsigned)if_index) || (ifscope != IFSCOPE_NONE &&
2922 (ifp = ifindex2ifnet[ifscope]) == NULL)) {
2923 ifnet_head_done();
0a7de745 2924 return ENXIO;
316670eb
A
2925 }
2926 ifnet_head_done();
2927
2928 VERIFY(ifp != NULL || ifscope == IFSCOPE_NONE);
2929
6d2010ae
A
2930 /*
2931 * A zero interface scope value indicates an "unbind".
2932 * Otherwise, take in whatever value the app desires;
2933 * the app may already know the scope (or force itself
2934 * to such a scope) ahead of time before the interface
2935 * gets attached. It doesn't matter either way; any
2936 * route lookup from this point on will require an
2937 * exact match for the embedded interface scope.
2938 */
316670eb 2939 inp->inp_boundifp = ifp;
0a7de745 2940 if (inp->inp_boundifp == NULL) {
6d2010ae 2941 inp->inp_flags &= ~INP_BOUND_IF;
0a7de745 2942 } else {
6d2010ae 2943 inp->inp_flags |= INP_BOUND_IF;
0a7de745 2944 }
6d2010ae
A
2945
2946 /* Blow away any cached route in the PCB */
39236c6e
A
2947 ROUTE_RELEASE(&inp->inp_route);
2948
0a7de745 2949 if (pifp != NULL) {
39236c6e 2950 *pifp = ifp;
0a7de745 2951 }
316670eb 2952
0a7de745 2953 return 0;
6d2010ae
A
2954}
2955
2956/*
39236c6e
A
2957 * Handler for setting IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
2958 * as well as for setting PROC_UUID_NO_CELLULAR policy.
6d2010ae 2959 */
39236c6e
A
2960void
2961inp_set_nocellular(struct inpcb *inp)
6d2010ae 2962{
39236c6e 2963 inp->inp_flags |= INP_NO_IFT_CELLULAR;
6d2010ae
A
2964
2965 /* Blow away any cached route in the PCB */
39236c6e
A
2966 ROUTE_RELEASE(&inp->inp_route);
2967}
2968
2969/*
2970 * Handler for clearing IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
2971 * as well as for clearing PROC_UUID_NO_CELLULAR policy.
2972 */
2973void
2974inp_clear_nocellular(struct inpcb *inp)
2975{
2976 struct socket *so = inp->inp_socket;
2977
2978 /*
2979 * SO_RESTRICT_DENY_CELLULAR socket restriction issued on the socket
2980 * has a higher precendence than INP_NO_IFT_CELLULAR. Clear the flag
2981 * if and only if the socket is unrestricted.
2982 */
2983 if (so != NULL && !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
2984 inp->inp_flags &= ~INP_NO_IFT_CELLULAR;
2985
2986 /* Blow away any cached route in the PCB */
2987 ROUTE_RELEASE(&inp->inp_route);
6d2010ae 2988 }
39236c6e 2989}
6d2010ae 2990
fe8ab488
A
2991void
2992inp_set_noexpensive(struct inpcb *inp)
2993{
2994 inp->inp_flags2 |= INP2_NO_IFF_EXPENSIVE;
2995
2996 /* Blow away any cached route in the PCB */
2997 ROUTE_RELEASE(&inp->inp_route);
2998}
2999
cb323159
A
3000void
3001inp_set_noconstrained(struct inpcb *inp)
3002{
3003 inp->inp_flags2 |= INP2_NO_IFF_CONSTRAINED;
3004
3005 /* Blow away any cached route in the PCB */
3006 ROUTE_RELEASE(&inp->inp_route);
3007}
3008
fe8ab488
A
3009void
3010inp_set_awdl_unrestricted(struct inpcb *inp)
3011{
3012 inp->inp_flags2 |= INP2_AWDL_UNRESTRICTED;
3013
3014 /* Blow away any cached route in the PCB */
3015 ROUTE_RELEASE(&inp->inp_route);
3016}
3017
3018boolean_t
3019inp_get_awdl_unrestricted(struct inpcb *inp)
3020{
3021 return (inp->inp_flags2 & INP2_AWDL_UNRESTRICTED) ? TRUE : FALSE;
3022}
3023
3024void
3025inp_clear_awdl_unrestricted(struct inpcb *inp)
3026{
3027 inp->inp_flags2 &= ~INP2_AWDL_UNRESTRICTED;
3028
3029 /* Blow away any cached route in the PCB */
3030 ROUTE_RELEASE(&inp->inp_route);
3031}
3032
39037602
A
3033void
3034inp_set_intcoproc_allowed(struct inpcb *inp)
3035{
3036 inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
3037
3038 /* Blow away any cached route in the PCB */
3039 ROUTE_RELEASE(&inp->inp_route);
3040}
3041
3042boolean_t
3043inp_get_intcoproc_allowed(struct inpcb *inp)
3044{
3045 return (inp->inp_flags2 & INP2_INTCOPROC_ALLOWED) ? TRUE : FALSE;
3046}
3047
3048void
3049inp_clear_intcoproc_allowed(struct inpcb *inp)
3050{
3051 inp->inp_flags2 &= ~INP2_INTCOPROC_ALLOWED;
3052
3053 /* Blow away any cached route in the PCB */
3054 ROUTE_RELEASE(&inp->inp_route);
3055}
3056
fe8ab488 3057#if NECP
39236c6e 3058/*
fe8ab488 3059 * Called when PROC_UUID_NECP_APP_POLICY is set.
39236c6e
A
3060 */
3061void
fe8ab488 3062inp_set_want_app_policy(struct inpcb *inp)
39236c6e 3063{
fe8ab488 3064 inp->inp_flags2 |= INP2_WANT_APP_POLICY;
39236c6e
A
3065}
3066
3067/*
fe8ab488 3068 * Called when PROC_UUID_NECP_APP_POLICY is cleared.
39236c6e
A
3069 */
3070void
fe8ab488 3071inp_clear_want_app_policy(struct inpcb *inp)
39236c6e 3072{
fe8ab488 3073 inp->inp_flags2 &= ~INP2_WANT_APP_POLICY;
b0d623f7 3074}
fe8ab488 3075#endif /* NECP */
316670eb
A
3076
3077/*
3078 * Calculate flow hash for an inp, used by an interface to identify a
3079 * flow. When an interface provides flow control advisory, this flow
3080 * hash is used as an identifier.
3081 */
3082u_int32_t
3083inp_calc_flowhash(struct inpcb *inp)
3084{
3085 struct inp_flowhash_key fh __attribute__((aligned(8)));
3086 u_int32_t flowhash = 0;
bd504ef0 3087 struct inpcb *tmp_inp = NULL;
316670eb 3088
0a7de745 3089 if (inp_hash_seed == 0) {
316670eb 3090 inp_hash_seed = RandomULong();
0a7de745 3091 }
316670eb 3092
0a7de745 3093 bzero(&fh, sizeof(fh));
316670eb 3094
0a7de745
A
3095 bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof(fh.infh_laddr));
3096 bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof(fh.infh_faddr));
316670eb
A
3097
3098 fh.infh_lport = inp->inp_lport;
3099 fh.infh_fport = inp->inp_fport;
3100 fh.infh_af = (inp->inp_vflag & INP_IPV6) ? AF_INET6 : AF_INET;
3101 fh.infh_proto = inp->inp_ip_p;
3102 fh.infh_rand1 = RandomULong();
3103 fh.infh_rand2 = RandomULong();
3104
3105try_again:
0a7de745 3106 flowhash = net_flowhash(&fh, sizeof(fh), inp_hash_seed);
316670eb
A
3107 if (flowhash == 0) {
3108 /* try to get a non-zero flowhash */
3109 inp_hash_seed = RandomULong();
3110 goto try_again;
3111 }
3112
bd504ef0 3113 inp->inp_flowhash = flowhash;
316670eb 3114
bd504ef0 3115 /* Insert the inp into inp_fc_tree */
39236c6e 3116 lck_mtx_lock_spin(&inp_fc_lck);
bd504ef0
A
3117 tmp_inp = RB_FIND(inp_fc_tree, &inp_fc_tree, inp);
3118 if (tmp_inp != NULL) {
316670eb 3119 /*
bd504ef0
A
3120 * There is a different inp with the same flowhash.
3121 * There can be a collision on flow hash but the
39236c6e 3122 * probability is low. Let's recompute the
bd504ef0 3123 * flowhash.
316670eb
A
3124 */
3125 lck_mtx_unlock(&inp_fc_lck);
bd504ef0
A
3126 /* recompute hash seed */
3127 inp_hash_seed = RandomULong();
3128 goto try_again;
316670eb 3129 }
39236c6e 3130
bd504ef0
A
3131 RB_INSERT(inp_fc_tree, &inp_fc_tree, inp);
3132 inp->inp_flags2 |= INP2_IN_FCTREE;
316670eb 3133 lck_mtx_unlock(&inp_fc_lck);
bd504ef0 3134
0a7de745 3135 return flowhash;
39236c6e
A
3136}
3137
3138void
3139inp_flowadv(uint32_t flowhash)
3140{
3141 struct inpcb *inp;
3142
3143 inp = inp_fc_getinp(flowhash, 0);
3144
0a7de745 3145 if (inp == NULL) {
39236c6e 3146 return;
0a7de745 3147 }
39236c6e 3148 inp_fc_feedback(inp);
316670eb
A
3149}
3150
bd504ef0
A
3151/*
3152 * Function to compare inp_fc_entries in inp flow control tree
3153 */
3154static inline int
3155infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2)
316670eb 3156{
0a7de745
A
3157 return memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
3158 sizeof(inp1->inp_flowhash));
bd504ef0 3159}
316670eb 3160
39236c6e 3161static struct inpcb *
bd504ef0
A
3162inp_fc_getinp(u_int32_t flowhash, u_int32_t flags)
3163{
3164 struct inpcb *inp = NULL;
3165 int locked = (flags & INPFC_SOLOCKED) ? 1 : 0;
316670eb
A
3166
3167 lck_mtx_lock_spin(&inp_fc_lck);
bd504ef0
A
3168 key_inp.inp_flowhash = flowhash;
3169 inp = RB_FIND(inp_fc_tree, &inp_fc_tree, &key_inp);
3170 if (inp == NULL) {
316670eb
A
3171 /* inp is not present, return */
3172 lck_mtx_unlock(&inp_fc_lck);
0a7de745 3173 return NULL;
316670eb
A
3174 }
3175
bd504ef0
A
3176 if (flags & INPFC_REMOVE) {
3177 RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp);
3178 lck_mtx_unlock(&inp_fc_lck);
316670eb 3179
0a7de745 3180 bzero(&(inp->infc_link), sizeof(inp->infc_link));
bd504ef0 3181 inp->inp_flags2 &= ~INP2_IN_FCTREE;
0a7de745 3182 return NULL;
316670eb 3183 }
39236c6e 3184
0a7de745 3185 if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
bd504ef0 3186 inp = NULL;
0a7de745 3187 }
316670eb
A
3188 lck_mtx_unlock(&inp_fc_lck);
3189
0a7de745 3190 return inp;
316670eb
A
3191}
3192
39236c6e 3193static void
316670eb
A
3194inp_fc_feedback(struct inpcb *inp)
3195{
3196 struct socket *so = inp->inp_socket;
3197
3198 /* we already hold a want_cnt on this inp, socket can't be null */
39236c6e 3199 VERIFY(so != NULL);
316670eb
A
3200 socket_lock(so, 1);
3201
3202 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3203 socket_unlock(so, 1);
3204 return;
3205 }
3206
0a7de745 3207 if (inp->inp_sndinprog_cnt > 0) {
fe8ab488 3208 inp->inp_flags |= INP_FC_FEEDBACK;
0a7de745 3209 }
fe8ab488 3210
316670eb
A
3211 /*
3212 * Return if the connection is not in flow-controlled state.
3213 * This can happen if the connection experienced
3214 * loss while it was in flow controlled state
3215 */
3216 if (!INP_WAIT_FOR_IF_FEEDBACK(inp)) {
3217 socket_unlock(so, 1);
3218 return;
3219 }
3220 inp_reset_fc_state(inp);
3221
0a7de745 3222 if (SOCK_TYPE(so) == SOCK_STREAM) {
316670eb 3223 inp_fc_unthrottle_tcp(inp);
0a7de745 3224 }
316670eb
A
3225
3226 socket_unlock(so, 1);
3227}
3228
3229void
3230inp_reset_fc_state(struct inpcb *inp)
3231{
3232 struct socket *so = inp->inp_socket;
3233 int suspended = (INP_IS_FLOW_SUSPENDED(inp)) ? 1 : 0;
3234 int needwakeup = (INP_WAIT_FOR_IF_FEEDBACK(inp)) ? 1 : 0;
3235
3236 inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
3237
3238 if (suspended) {
3239 so->so_flags &= ~(SOF_SUSPENDED);
3240 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME));
3241 }
3242
316670eb 3243 /* Give a write wakeup to unblock the socket */
0a7de745 3244 if (needwakeup) {
316670eb 3245 sowwakeup(so);
0a7de745 3246 }
316670eb
A
3247}
3248
3249int
3250inp_set_fc_state(struct inpcb *inp, int advcode)
3251{
bd504ef0 3252 struct inpcb *tmp_inp = NULL;
316670eb 3253 /*
39236c6e 3254 * If there was a feedback from the interface when
316670eb
A
3255 * send operation was in progress, we should ignore
3256 * this flow advisory to avoid a race between setting
3257 * flow controlled state and receiving feedback from
3258 * the interface
3259 */
0a7de745
A
3260 if (inp->inp_flags & INP_FC_FEEDBACK) {
3261 return 0;
3262 }
316670eb
A
3263
3264 inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
39236c6e
A
3265 if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash,
3266 INPFC_SOLOCKED)) != NULL) {
0a7de745
A
3267 if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3268 return 0;
3269 }
bd504ef0 3270 VERIFY(tmp_inp == inp);
316670eb
A
3271 switch (advcode) {
3272 case FADV_FLOW_CONTROLLED:
3273 inp->inp_flags |= INP_FLOW_CONTROLLED;
3274 break;
3275 case FADV_SUSPENDED:
3276 inp->inp_flags |= INP_FLOW_SUSPENDED;
3277 soevent(inp->inp_socket,
3278 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND));
3279
3280 /* Record the fact that suspend event was sent */
3281 inp->inp_socket->so_flags |= SOF_SUSPENDED;
3282 break;
3283 }
0a7de745 3284 return 1;
316670eb 3285 }
0a7de745 3286 return 0;
316670eb
A
3287}
3288
3289/*
3290 * Handler for SO_FLUSH socket option.
3291 */
3292int
3293inp_flush(struct inpcb *inp, int optval)
3294{
3295 u_int32_t flowhash = inp->inp_flowhash;
39236c6e 3296 struct ifnet *rtifp, *oifp;
316670eb
A
3297
3298 /* Either all classes or one of the valid ones */
0a7de745
A
3299 if (optval != SO_TC_ALL && !SO_VALID_TC(optval)) {
3300 return EINVAL;
3301 }
316670eb
A
3302
3303 /* We need a flow hash for identification */
0a7de745
A
3304 if (flowhash == 0) {
3305 return 0;
3306 }
316670eb 3307
39236c6e
A
3308 /* Grab the interfaces from the route and pcb */
3309 rtifp = ((inp->inp_route.ro_rt != NULL) ?
3310 inp->inp_route.ro_rt->rt_ifp : NULL);
3311 oifp = inp->inp_last_outifp;
3312
0a7de745 3313 if (rtifp != NULL) {
39236c6e 3314 if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
0a7de745
A
3315 }
3316 if (oifp != NULL && oifp != rtifp) {
39236c6e 3317 if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
0a7de745 3318 }
316670eb 3319
0a7de745 3320 return 0;
316670eb
A
3321}
3322
3323/*
3324 * Clear the INP_INADDR_ANY flag (special case for PPP only)
3325 */
39236c6e
A
3326void
3327inp_clear_INP_INADDR_ANY(struct socket *so)
316670eb
A
3328{
3329 struct inpcb *inp = NULL;
3330
3331 socket_lock(so, 1);
3332 inp = sotoinpcb(so);
3333 if (inp) {
3334 inp->inp_flags &= ~INP_INADDR_ANY;
3335 }
3336 socket_unlock(so, 1);
3337}
3338
39236c6e
A
3339void
3340inp_get_soprocinfo(struct inpcb *inp, struct so_procinfo *soprocinfo)
3341{
3342 struct socket *so = inp->inp_socket;
3343
3344 soprocinfo->spi_pid = so->last_pid;
cb323159
A
3345 strlcpy(&soprocinfo->spi_proc_name[0], &inp->inp_last_proc_name[0],
3346 sizeof(soprocinfo->spi_proc_name));
0a7de745 3347 if (so->last_pid != 0) {
fe8ab488 3348 uuid_copy(soprocinfo->spi_uuid, so->last_uuid);
0a7de745 3349 }
39236c6e
A
3350 /*
3351 * When not delegated, the effective pid is the same as the real pid
3352 */
fe8ab488 3353 if (so->so_flags & SOF_DELEGATED) {
3e170ce0 3354 soprocinfo->spi_delegated = 1;
39236c6e 3355 soprocinfo->spi_epid = so->e_pid;
3e170ce0 3356 uuid_copy(soprocinfo->spi_euuid, so->e_uuid);
fe8ab488 3357 } else {
3e170ce0 3358 soprocinfo->spi_delegated = 0;
39236c6e 3359 soprocinfo->spi_epid = so->last_pid;
fe8ab488 3360 }
cb323159
A
3361 strlcpy(&soprocinfo->spi_e_proc_name[0], &inp->inp_e_proc_name[0],
3362 sizeof(soprocinfo->spi_e_proc_name));
39236c6e
A
3363}
3364
3365int
3366inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash,
3367 struct so_procinfo *soprocinfo)
3368{
3369 struct inpcb *inp = NULL;
3370 int found = 0;
3371
0a7de745 3372 bzero(soprocinfo, sizeof(struct so_procinfo));
39236c6e 3373
0a7de745
A
3374 if (!flowhash) {
3375 return -1;
3376 }
39236c6e
A
3377
3378 lck_rw_lock_shared(pcbinfo->ipi_lock);
3379 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
3380 if (inp->inp_state != INPCB_STATE_DEAD &&
3381 inp->inp_socket != NULL &&
3382 inp->inp_flowhash == flowhash) {
3383 found = 1;
3384 inp_get_soprocinfo(inp, soprocinfo);
3385 break;
3386 }
3387 }
3388 lck_rw_done(pcbinfo->ipi_lock);
3389
0a7de745 3390 return found;
39236c6e
A
3391}
3392
3393#if CONFIG_PROC_UUID_POLICY
3394static void
3395inp_update_cellular_policy(struct inpcb *inp, boolean_t set)
3396{
3397 struct socket *so = inp->inp_socket;
3398 int before, after;
3399
3400 VERIFY(so != NULL);
3401 VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3402
fe8ab488 3403 before = INP_NO_CELLULAR(inp);
39236c6e
A
3404 if (set) {
3405 inp_set_nocellular(inp);
3406 } else {
3407 inp_clear_nocellular(inp);
3408 }
fe8ab488 3409 after = INP_NO_CELLULAR(inp);
39236c6e
A
3410 if (net_io_policy_log && (before != after)) {
3411 static const char *ok = "OK";
3412 static const char *nok = "NOACCESS";
3413 uuid_string_t euuid_buf;
3414 pid_t epid;
3415
3416 if (so->so_flags & SOF_DELEGATED) {
3417 uuid_unparse(so->e_uuid, euuid_buf);
3418 epid = so->e_pid;
3419 } else {
3420 uuid_unparse(so->last_uuid, euuid_buf);
3421 epid = so->last_pid;
3422 }
3423
3424 /* allow this socket to generate another notification event */
3425 so->so_ifdenied_notifies = 0;
3426
3427 log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
3428 "euuid %s%s %s->%s\n", __func__,
3429 (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
3430 SOCK_TYPE(so), epid, euuid_buf,
3431 (so->so_flags & SOF_DELEGATED) ?
3432 " [delegated]" : "",
3433 ((before < after) ? ok : nok),
3434 ((before < after) ? nok : ok));
3435 }
3436}
3437
fe8ab488 3438#if NECP
39236c6e 3439static void
fe8ab488 3440inp_update_necp_want_app_policy(struct inpcb *inp, boolean_t set)
39236c6e
A
3441{
3442 struct socket *so = inp->inp_socket;
3443 int before, after;
3444
3445 VERIFY(so != NULL);
3446 VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3447
fe8ab488 3448 before = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
39236c6e 3449 if (set) {
fe8ab488 3450 inp_set_want_app_policy(inp);
39236c6e 3451 } else {
fe8ab488 3452 inp_clear_want_app_policy(inp);
39236c6e 3453 }
fe8ab488 3454 after = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
39236c6e
A
3455 if (net_io_policy_log && (before != after)) {
3456 static const char *wanted = "WANTED";
3457 static const char *unwanted = "UNWANTED";
3458 uuid_string_t euuid_buf;
3459 pid_t epid;
3460
3461 if (so->so_flags & SOF_DELEGATED) {
3462 uuid_unparse(so->e_uuid, euuid_buf);
3463 epid = so->e_pid;
3464 } else {
3465 uuid_unparse(so->last_uuid, euuid_buf);
3466 epid = so->last_pid;
3467 }
3468
3469 log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
3470 "euuid %s%s %s->%s\n", __func__,
3471 (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
3472 SOCK_TYPE(so), epid, euuid_buf,
3473 (so->so_flags & SOF_DELEGATED) ?
3474 " [delegated]" : "",
3475 ((before < after) ? unwanted : wanted),
3476 ((before < after) ? wanted : unwanted));
3477 }
3478}
fe8ab488 3479#endif /* NECP */
39236c6e
A
3480#endif /* !CONFIG_PROC_UUID_POLICY */
3481
fe8ab488
A
3482#if NECP
3483void
3484inp_update_necp_policy(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int override_bound_interface)
3485{
3486 necp_socket_find_policy_match(inp, override_local_addr, override_remote_addr, override_bound_interface);
3487 if (necp_socket_should_rescope(inp) &&
0a7de745
A
3488 inp->inp_lport == 0 &&
3489 inp->inp_laddr.s_addr == INADDR_ANY &&
3490 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
fe8ab488
A
3491 // If we should rescope, and the socket is not yet bound
3492 inp_bindif(inp, necp_socket_get_rescope_if_index(inp), NULL);
3493 }
3494}
3495#endif /* NECP */
3496
39236c6e
A
3497int
3498inp_update_policy(struct inpcb *inp)
3499{
3500#if CONFIG_PROC_UUID_POLICY
3501 struct socket *so = inp->inp_socket;
3502 uint32_t pflags = 0;
3503 int32_t ogencnt;
3504 int err = 0;
ea3f0419 3505 uint8_t *lookup_uuid = NULL;
39236c6e
A
3506
3507 if (!net_io_policy_uuid ||
0a7de745
A
3508 so == NULL || inp->inp_state == INPCB_STATE_DEAD) {
3509 return 0;
3510 }
39236c6e
A
3511
3512 /*
3513 * Kernel-created sockets that aren't delegating other sockets
3514 * are currently exempted from UUID policy checks.
3515 */
0a7de745
A
3516 if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED)) {
3517 return 0;
3518 }
39236c6e 3519
ea3f0419
A
3520#if defined(XNU_TARGET_OS_OSX)
3521 if (so->so_rpid > 0) {
3522 lookup_uuid = so->so_ruuid;
bca245ac
A
3523 ogencnt = so->so_policy_gencnt;
3524 err = proc_uuid_policy_lookup(lookup_uuid, &pflags, &so->so_policy_gencnt);
ea3f0419
A
3525 }
3526#endif
bca245ac 3527 if (lookup_uuid == NULL || err == ENOENT) {
ea3f0419 3528 lookup_uuid = ((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid);
bca245ac
A
3529 ogencnt = so->so_policy_gencnt;
3530 err = proc_uuid_policy_lookup(lookup_uuid, &pflags, &so->so_policy_gencnt);
ea3f0419
A
3531 }
3532
39236c6e
A
3533 /*
3534 * Discard cached generation count if the entry is gone (ENOENT),
3535 * so that we go thru the checks below.
3536 */
0a7de745 3537 if (err == ENOENT && ogencnt != 0) {
39236c6e 3538 so->so_policy_gencnt = 0;
0a7de745 3539 }
39236c6e
A
3540
3541 /*
3542 * If the generation count has changed, inspect the policy flags
3543 * and act accordingly. If a policy flag was previously set and
3544 * the UUID is no longer present in the table (ENOENT), treat it
3545 * as if the flag has been cleared.
3546 */
3547 if ((err == 0 || err == ENOENT) && ogencnt != so->so_policy_gencnt) {
3548 /* update cellular policy for this socket */
3549 if (err == 0 && (pflags & PROC_UUID_NO_CELLULAR)) {
3550 inp_update_cellular_policy(inp, TRUE);
3551 } else if (!(pflags & PROC_UUID_NO_CELLULAR)) {
3552 inp_update_cellular_policy(inp, FALSE);
3553 }
fe8ab488
A
3554#if NECP
3555 /* update necp want app policy for this socket */
3556 if (err == 0 && (pflags & PROC_UUID_NECP_APP_POLICY)) {
3557 inp_update_necp_want_app_policy(inp, TRUE);
3558 } else if (!(pflags & PROC_UUID_NECP_APP_POLICY)) {
3559 inp_update_necp_want_app_policy(inp, FALSE);
39236c6e 3560 }
fe8ab488 3561#endif /* NECP */
39236c6e
A
3562 }
3563
0a7de745 3564 return (err == ENOENT) ? 0 : err;
39236c6e
A
3565#else /* !CONFIG_PROC_UUID_POLICY */
3566#pragma unused(inp)
0a7de745 3567 return 0;
39236c6e
A
3568#endif /* !CONFIG_PROC_UUID_POLICY */
3569}
39037602
A
3570
3571static unsigned int log_restricted;
3572SYSCTL_DECL(_net_inet);
3573SYSCTL_INT(_net_inet, OID_AUTO, log_restricted,
3574 CTLFLAG_RW | CTLFLAG_LOCKED, &log_restricted, 0,
3575 "Log network restrictions");
fe8ab488
A
3576/*
3577 * Called when we need to enforce policy restrictions in the input path.
3578 *
3579 * Returns TRUE if we're not allowed to receive data, otherwise FALSE.
3580 */
39037602
A
3581static boolean_t
3582_inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
39236c6e
A
3583{
3584 VERIFY(inp != NULL);
3585
fe8ab488
A
3586 /*
3587 * Inbound restrictions.
3588 */
0a7de745
A
3589 if (!sorestrictrecv) {
3590 return FALSE;
3591 }
39236c6e 3592
0a7de745
A
3593 if (ifp == NULL) {
3594 return FALSE;
3595 }
fe8ab488 3596
0a7de745
A
3597 if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
3598 return TRUE;
3599 }
fe8ab488 3600
0a7de745
A
3601 if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
3602 return TRUE;
3603 }
fe8ab488 3604
cb323159
A
3605 if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
3606 return TRUE;
3607 }
3608
0a7de745
A
3609 if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
3610 return TRUE;
3611 }
39037602 3612
0a7de745
A
3613 if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV)) {
3614 return FALSE;
3615 }
39236c6e 3616
0a7de745
A
3617 if (inp->inp_flags & INP_RECV_ANYIF) {
3618 return FALSE;
3619 }
39236c6e 3620
0a7de745
A
3621 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp) {
3622 return FALSE;
3623 }
39236c6e 3624
0a7de745
A
3625 if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
3626 return TRUE;
3627 }
39037602 3628
0a7de745 3629 return TRUE;
39236c6e 3630}
fe8ab488 3631
39037602
A
3632boolean_t
3633inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
3634{
3635 boolean_t ret;
3636
3637 ret = _inp_restricted_recv(inp, ifp);
3638 if (ret == TRUE && log_restricted) {
743345f9
A
3639 printf("pid %d (%s) is unable to receive packets on %s\n",
3640 current_proc()->p_pid, proc_best_name(current_proc()),
3641 ifp->if_xname);
39037602 3642 }
0a7de745 3643 return ret;
39037602
A
3644}
3645
fe8ab488
A
3646/*
3647 * Called when we need to enforce policy restrictions in the output path.
3648 *
3649 * Returns TRUE if we're not allowed to send data out, otherwise FALSE.
3650 */
39037602
A
3651static boolean_t
3652_inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
fe8ab488
A
3653{
3654 VERIFY(inp != NULL);
3655
3656 /*
3657 * Outbound restrictions.
3658 */
0a7de745
A
3659 if (!sorestrictsend) {
3660 return FALSE;
3661 }
fe8ab488 3662
0a7de745
A
3663 if (ifp == NULL) {
3664 return FALSE;
3665 }
fe8ab488 3666
0a7de745
A
3667 if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
3668 return TRUE;
3669 }
fe8ab488 3670
0a7de745
A
3671 if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
3672 return TRUE;
3673 }
fe8ab488 3674
cb323159
A
3675 if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
3676 return TRUE;
3677 }
3678
0a7de745
A
3679 if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
3680 return TRUE;
3681 }
fe8ab488 3682
0a7de745
A
3683 if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
3684 return TRUE;
3685 }
39037602 3686
0a7de745 3687 return FALSE;
fe8ab488 3688}
39037602
A
3689
3690boolean_t
3691inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
3692{
3693 boolean_t ret;
3694
3695 ret = _inp_restricted_send(inp, ifp);
3696 if (ret == TRUE && log_restricted) {
743345f9
A
3697 printf("pid %d (%s) is unable to transmit packets on %s\n",
3698 current_proc()->p_pid, proc_best_name(current_proc()),
3699 ifp->if_xname);
39037602 3700 }
0a7de745 3701 return ret;
39037602
A
3702}
3703
3704inline void
3705inp_count_sndbytes(struct inpcb *inp, u_int32_t th_ack)
3706{
3707 struct ifnet *ifp = inp->inp_last_outifp;
3708 struct socket *so = inp->inp_socket;
3709 if (ifp != NULL && !(so->so_flags & SOF_MP_SUBFLOW) &&
cb323159 3710 (ifp->if_type == IFT_CELLULAR || IFNET_IS_WIFI(ifp))) {
39037602
A
3711 int32_t unsent;
3712
3713 so->so_snd.sb_flags |= SB_SNDBYTE_CNT;
3714
3715 /*
3716 * There can be data outstanding before the connection
3717 * becomes established -- TFO case
3718 */
0a7de745 3719 if (so->so_snd.sb_cc > 0) {
39037602 3720 inp_incr_sndbytes_total(so, so->so_snd.sb_cc);
0a7de745 3721 }
39037602
A
3722
3723 unsent = inp_get_sndbytes_allunsent(so, th_ack);
0a7de745 3724 if (unsent > 0) {
39037602 3725 inp_incr_sndbytes_unsent(so, unsent);
0a7de745 3726 }
39037602
A
3727 }
3728}
3729
3730inline void
3731inp_incr_sndbytes_total(struct socket *so, int32_t len)
3732{
3733 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3734 struct ifnet *ifp = inp->inp_last_outifp;
3735
3736 if (ifp != NULL) {
3737 VERIFY(ifp->if_sndbyte_total >= 0);
3738 OSAddAtomic64(len, &ifp->if_sndbyte_total);
3739 }
3740}
3741
3742inline void
3743inp_decr_sndbytes_total(struct socket *so, int32_t len)
3744{
3745 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3746 struct ifnet *ifp = inp->inp_last_outifp;
3747
3748 if (ifp != NULL) {
3749 VERIFY(ifp->if_sndbyte_total >= len);
3750 OSAddAtomic64(-len, &ifp->if_sndbyte_total);
3751 }
3752}
3753
3754inline void
3755inp_incr_sndbytes_unsent(struct socket *so, int32_t len)
3756{
3757 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3758 struct ifnet *ifp = inp->inp_last_outifp;
3759
3760 if (ifp != NULL) {
3761 VERIFY(ifp->if_sndbyte_unsent >= 0);
3762 OSAddAtomic64(len, &ifp->if_sndbyte_unsent);
3763 }
3764}
3765
3766inline void
3767inp_decr_sndbytes_unsent(struct socket *so, int32_t len)
3768{
0a7de745 3769 if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
39037602 3770 return;
0a7de745 3771 }
39037602 3772
cb323159
A
3773 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3774 struct ifnet *ifp = inp->inp_last_outifp;
3775
39037602 3776 if (ifp != NULL) {
0a7de745 3777 if (ifp->if_sndbyte_unsent >= len) {
39037602 3778 OSAddAtomic64(-len, &ifp->if_sndbyte_unsent);
0a7de745 3779 } else {
39037602 3780 ifp->if_sndbyte_unsent = 0;
0a7de745 3781 }
39037602
A
3782 }
3783}
3784
3785inline void
3786inp_decr_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
3787{
3788 int32_t len;
3789
0a7de745 3790 if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
39037602 3791 return;
0a7de745 3792 }
39037602
A
3793
3794 len = inp_get_sndbytes_allunsent(so, th_ack);
3795 inp_decr_sndbytes_unsent(so, len);
3796}
5ba3f43e
A
3797
3798
3799inline void
3800inp_set_activity_bitmap(struct inpcb *inp)
3801{
3802 in_stat_set_activity_bitmap(&inp->inp_nw_activity, net_uptime());
3803}
3804
3805inline void
3806inp_get_activity_bitmap(struct inpcb *inp, activity_bitmap_t *ab)
3807{
0a7de745 3808 bcopy(&inp->inp_nw_activity, ab, sizeof(*ab));
5ba3f43e 3809}
cb323159
A
3810
3811void
3812inp_update_last_owner(struct socket *so, struct proc *p, struct proc *ep)
3813{
3814 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3815
3816 if (inp == NULL) {
3817 return;
3818 }
3819
3820 if (p != NULL) {
3821 strlcpy(&inp->inp_last_proc_name[0], proc_name_address(p), sizeof(inp->inp_last_proc_name));
3822 }
3823 if (so->so_flags & SOF_DELEGATED) {
3824 if (ep != NULL) {
3825 strlcpy(&inp->inp_e_proc_name[0], proc_name_address(ep), sizeof(inp->inp_e_proc_name));
3826 } else {
3827 inp->inp_e_proc_name[0] = 0;
3828 }
3829 } else {
3830 inp->inp_e_proc_name[0] = 0;
3831 }
3832}
3833
3834void
3835inp_copy_last_owner(struct socket *so, struct socket *head)
3836{
3837 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3838 struct inpcb *head_inp = (struct inpcb *)head->so_pcb;
3839
3840 if (inp == NULL || head_inp == NULL) {
3841 return;
3842 }
3843
3844 strlcpy(&inp->inp_last_proc_name[0], &head_inp->inp_last_proc_name[0], sizeof(inp->inp_last_proc_name));
3845 strlcpy(&inp->inp_e_proc_name[0], &head_inp->inp_e_proc_name[0], sizeof(inp->inp_e_proc_name));
3846}