]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/in_pcb.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / bsd / netinet / in_pcb.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1991, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/in_pcb.c,v 1.59.2.17 2001/08/13 16:26:17 ume Exp $
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/domain.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/proc.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/mcache.h>
76 #include <sys/kauth.h>
77 #include <sys/priv.h>
78 #include <sys/proc_uuid_policy.h>
79 #include <sys/syslog.h>
80 #include <sys/priv.h>
81 #include <net/dlil.h>
82
83 #include <libkern/OSAtomic.h>
84 #include <kern/locks.h>
85
86 #include <machine/limits.h>
87
88 #include <kern/zalloc.h>
89
90 #include <net/if.h>
91 #include <net/if_types.h>
92 #include <net/route.h>
93 #include <net/flowhash.h>
94 #include <net/flowadv.h>
95 #include <net/nat464_utils.h>
96 #include <net/ntstat.h>
97 #include <net/restricted_in_port.h>
98
99 #include <netinet/in.h>
100 #include <netinet/in_pcb.h>
101 #include <netinet/in_var.h>
102 #include <netinet/ip_var.h>
103
104 #if INET6
105 #include <netinet/ip6.h>
106 #include <netinet6/ip6_var.h>
107 #endif /* INET6 */
108
109 #include <sys/kdebug.h>
110 #include <sys/random.h>
111
112 #include <dev/random/randomdev.h>
113 #include <mach/boolean.h>
114
115 #include <pexpert/pexpert.h>
116
117 #if NECP
118 #include <net/necp.h>
119 #endif
120
121 #include <sys/stat.h>
122 #include <sys/ubc.h>
123 #include <sys/vnode.h>
124
125 #include <os/log.h>
126
127 extern const char *proc_name_address(struct proc *);
128
129 static lck_grp_t *inpcb_lock_grp;
130 static lck_attr_t *inpcb_lock_attr;
131 static lck_grp_attr_t *inpcb_lock_grp_attr;
132 decl_lck_mtx_data(static, inpcb_lock); /* global INPCB lock */
133 decl_lck_mtx_data(static, inpcb_timeout_lock);
134
135 static TAILQ_HEAD(, inpcbinfo) inpcb_head = TAILQ_HEAD_INITIALIZER(inpcb_head);
136
137 static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */
138 static boolean_t inpcb_garbage_collecting = FALSE; /* gc timer is scheduled */
139 static boolean_t inpcb_ticking = FALSE; /* "slow" timer is scheduled */
140 static boolean_t inpcb_fast_timer_on = FALSE;
141
142 #define INPCB_GCREQ_THRESHOLD 50000
143
144 static thread_call_t inpcb_thread_call, inpcb_fast_thread_call;
145 static void inpcb_sched_timeout(void);
146 static void inpcb_sched_lazy_timeout(void);
147 static void _inpcb_sched_timeout(unsigned int);
148 static void inpcb_timeout(void *, void *);
149 const int inpcb_timeout_lazy = 10; /* 10 seconds leeway for lazy timers */
150 extern int tvtohz(struct timeval *);
151
152 #if CONFIG_PROC_UUID_POLICY
153 static void inp_update_cellular_policy(struct inpcb *, boolean_t);
154 #if NECP
155 static void inp_update_necp_want_app_policy(struct inpcb *, boolean_t);
156 #endif /* NECP */
157 #endif /* !CONFIG_PROC_UUID_POLICY */
158
159 #define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8))
160 #define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
161
162 /*
163 * These configure the range of local port addresses assigned to
164 * "unspecified" outgoing connections/packets/whatever.
165 */
166 int ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */
167 int ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */
168 int ipport_firstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
169 int ipport_lastauto = IPPORT_HILASTAUTO; /* 65535 */
170 int ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
171 int ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */
172
173 #define RANGECHK(var, min, max) \
174 if ((var) < (min)) { (var) = (min); } \
175 else if ((var) > (max)) { (var) = (max); }
176
177 static int
178 sysctl_net_ipport_check SYSCTL_HANDLER_ARGS
179 {
180 #pragma unused(arg1, arg2)
181 int error;
182 #if (DEBUG | DEVELOPMENT)
183 int old_value = *(int *)oidp->oid_arg1;
184 /*
185 * For unit testing allow a non-superuser process with the
186 * proper entitlement to modify the variables
187 */
188 if (req->newptr) {
189 if (proc_suser(current_proc()) != 0 &&
190 (error = priv_check_cred(kauth_cred_get(),
191 PRIV_NETINET_RESERVEDPORT, 0))) {
192 return EPERM;
193 }
194 }
195 #endif /* (DEBUG | DEVELOPMENT) */
196
197 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
198 if (!error) {
199 RANGECHK(ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
200 RANGECHK(ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
201 RANGECHK(ipport_firstauto, IPPORT_RESERVED, USHRT_MAX);
202 RANGECHK(ipport_lastauto, IPPORT_RESERVED, USHRT_MAX);
203 RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, USHRT_MAX);
204 RANGECHK(ipport_hilastauto, IPPORT_RESERVED, USHRT_MAX);
205 }
206
207 #if (DEBUG | DEVELOPMENT)
208 os_log(OS_LOG_DEFAULT,
209 "%s:%u sysctl net.restricted_port.verbose: %d -> %d)",
210 proc_best_name(current_proc()), proc_selfpid(),
211 old_value, *(int *)oidp->oid_arg1);
212 #endif /* (DEBUG | DEVELOPMENT) */
213
214 return error;
215 }
216
217 #undef RANGECHK
218
219 SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange,
220 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IP Ports");
221
222 #if (DEBUG | DEVELOPMENT)
223 #define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY)
224 #else
225 #define CTLFAGS_IP_PORTRANGE (CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED)
226 #endif /* (DEBUG | DEVELOPMENT) */
227
228 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
229 CTLFAGS_IP_PORTRANGE,
230 &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", "");
231 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
232 CTLFAGS_IP_PORTRANGE,
233 &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", "");
234 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first,
235 CTLFAGS_IP_PORTRANGE,
236 &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", "");
237 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last,
238 CTLFAGS_IP_PORTRANGE,
239 &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", "");
240 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
241 CTLFAGS_IP_PORTRANGE,
242 &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", "");
243 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
244 CTLFAGS_IP_PORTRANGE,
245 &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", "");
246
247 static uint32_t apn_fallbk_debug = 0;
248 #define apn_fallbk_log(x) do { if (apn_fallbk_debug >= 1) log x; } while (0)
249
250 #if CONFIG_EMBEDDED
251 static boolean_t apn_fallbk_enabled = TRUE;
252
253 SYSCTL_DECL(_net_inet);
254 SYSCTL_NODE(_net_inet, OID_AUTO, apn_fallback, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "APN Fallback");
255 SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED,
256 &apn_fallbk_enabled, 0, "APN fallback enable");
257 SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
258 &apn_fallbk_debug, 0, "APN fallback debug enable");
259 #else
260 static boolean_t apn_fallbk_enabled = FALSE;
261 #endif
262
263 extern int udp_use_randomport;
264 extern int tcp_use_randomport;
265
266 /* Structs used for flowhash computation */
267 struct inp_flowhash_key_addr {
268 union {
269 struct in_addr v4;
270 struct in6_addr v6;
271 u_int8_t addr8[16];
272 u_int16_t addr16[8];
273 u_int32_t addr32[4];
274 } infha;
275 };
276
277 struct inp_flowhash_key {
278 struct inp_flowhash_key_addr infh_laddr;
279 struct inp_flowhash_key_addr infh_faddr;
280 u_int32_t infh_lport;
281 u_int32_t infh_fport;
282 u_int32_t infh_af;
283 u_int32_t infh_proto;
284 u_int32_t infh_rand1;
285 u_int32_t infh_rand2;
286 };
287
288 static u_int32_t inp_hash_seed = 0;
289
290 static int infc_cmp(const struct inpcb *, const struct inpcb *);
291
292 /* Flags used by inp_fc_getinp */
293 #define INPFC_SOLOCKED 0x1
294 #define INPFC_REMOVE 0x2
295 static struct inpcb *inp_fc_getinp(u_int32_t, u_int32_t);
296
297 static void inp_fc_feedback(struct inpcb *);
298 extern void tcp_remove_from_time_wait(struct inpcb *inp);
299
300 decl_lck_mtx_data(static, inp_fc_lck);
301
302 RB_HEAD(inp_fc_tree, inpcb) inp_fc_tree;
303 RB_PROTOTYPE(inp_fc_tree, inpcb, infc_link, infc_cmp);
304 RB_GENERATE(inp_fc_tree, inpcb, infc_link, infc_cmp);
305
306 /*
307 * Use this inp as a key to find an inp in the flowhash tree.
308 * Accesses to it are protected by inp_fc_lck.
309 */
310 struct inpcb key_inp;
311
312 /*
313 * in_pcb.c: manage the Protocol Control Blocks.
314 */
315
316 void
317 in_pcbinit(void)
318 {
319 static int inpcb_initialized = 0;
320
321 VERIFY(!inpcb_initialized);
322 inpcb_initialized = 1;
323
324 inpcb_lock_grp_attr = lck_grp_attr_alloc_init();
325 inpcb_lock_grp = lck_grp_alloc_init("inpcb", inpcb_lock_grp_attr);
326 inpcb_lock_attr = lck_attr_alloc_init();
327 lck_mtx_init(&inpcb_lock, inpcb_lock_grp, inpcb_lock_attr);
328 lck_mtx_init(&inpcb_timeout_lock, inpcb_lock_grp, inpcb_lock_attr);
329 inpcb_thread_call = thread_call_allocate_with_priority(inpcb_timeout,
330 NULL, THREAD_CALL_PRIORITY_KERNEL);
331 inpcb_fast_thread_call = thread_call_allocate_with_priority(
332 inpcb_timeout, NULL, THREAD_CALL_PRIORITY_KERNEL);
333 if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL) {
334 panic("unable to alloc the inpcb thread call");
335 }
336
337 /*
338 * Initialize data structures required to deliver
339 * flow advisories.
340 */
341 lck_mtx_init(&inp_fc_lck, inpcb_lock_grp, inpcb_lock_attr);
342 lck_mtx_lock(&inp_fc_lck);
343 RB_INIT(&inp_fc_tree);
344 bzero(&key_inp, sizeof(key_inp));
345 lck_mtx_unlock(&inp_fc_lck);
346 }
347
348 #define INPCB_HAVE_TIMER_REQ(req) (((req).intimer_lazy > 0) || \
349 ((req).intimer_fast > 0) || ((req).intimer_nodelay > 0))
350 static void
351 inpcb_timeout(void *arg0, void *arg1)
352 {
353 #pragma unused(arg0, arg1)
354 struct inpcbinfo *ipi;
355 boolean_t t, gc;
356 struct intimercount gccnt, tmcnt;
357
358 /*
359 * Update coarse-grained networking timestamp (in sec.); the idea
360 * is to piggy-back on the timeout callout to update the counter
361 * returnable via net_uptime().
362 */
363 net_update_uptime();
364
365 bzero(&gccnt, sizeof(gccnt));
366 bzero(&tmcnt, sizeof(tmcnt));
367
368 lck_mtx_lock_spin(&inpcb_timeout_lock);
369 gc = inpcb_garbage_collecting;
370 inpcb_garbage_collecting = FALSE;
371
372 t = inpcb_ticking;
373 inpcb_ticking = FALSE;
374
375 if (gc || t) {
376 lck_mtx_unlock(&inpcb_timeout_lock);
377
378 lck_mtx_lock(&inpcb_lock);
379 TAILQ_FOREACH(ipi, &inpcb_head, ipi_entry) {
380 if (INPCB_HAVE_TIMER_REQ(ipi->ipi_gc_req)) {
381 bzero(&ipi->ipi_gc_req,
382 sizeof(ipi->ipi_gc_req));
383 if (gc && ipi->ipi_gc != NULL) {
384 ipi->ipi_gc(ipi);
385 gccnt.intimer_lazy +=
386 ipi->ipi_gc_req.intimer_lazy;
387 gccnt.intimer_fast +=
388 ipi->ipi_gc_req.intimer_fast;
389 gccnt.intimer_nodelay +=
390 ipi->ipi_gc_req.intimer_nodelay;
391 }
392 }
393 if (INPCB_HAVE_TIMER_REQ(ipi->ipi_timer_req)) {
394 bzero(&ipi->ipi_timer_req,
395 sizeof(ipi->ipi_timer_req));
396 if (t && ipi->ipi_timer != NULL) {
397 ipi->ipi_timer(ipi);
398 tmcnt.intimer_lazy +=
399 ipi->ipi_timer_req.intimer_lazy;
400 tmcnt.intimer_fast +=
401 ipi->ipi_timer_req.intimer_fast;
402 tmcnt.intimer_nodelay +=
403 ipi->ipi_timer_req.intimer_nodelay;
404 }
405 }
406 }
407 lck_mtx_unlock(&inpcb_lock);
408 lck_mtx_lock_spin(&inpcb_timeout_lock);
409 }
410
411 /* lock was dropped above, so check first before overriding */
412 if (!inpcb_garbage_collecting) {
413 inpcb_garbage_collecting = INPCB_HAVE_TIMER_REQ(gccnt);
414 }
415 if (!inpcb_ticking) {
416 inpcb_ticking = INPCB_HAVE_TIMER_REQ(tmcnt);
417 }
418
419 /* re-arm the timer if there's work to do */
420 inpcb_timeout_run--;
421 VERIFY(inpcb_timeout_run >= 0 && inpcb_timeout_run < 2);
422
423 if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0) {
424 inpcb_sched_timeout();
425 } else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5) {
426 /* be lazy when idle with little activity */
427 inpcb_sched_lazy_timeout();
428 } else {
429 inpcb_sched_timeout();
430 }
431
432 lck_mtx_unlock(&inpcb_timeout_lock);
433 }
434
435 static void
436 inpcb_sched_timeout(void)
437 {
438 _inpcb_sched_timeout(0);
439 }
440
441 static void
442 inpcb_sched_lazy_timeout(void)
443 {
444 _inpcb_sched_timeout(inpcb_timeout_lazy);
445 }
446
447 static void
448 _inpcb_sched_timeout(unsigned int offset)
449 {
450 uint64_t deadline, leeway;
451
452 clock_interval_to_deadline(1, NSEC_PER_SEC, &deadline);
453 LCK_MTX_ASSERT(&inpcb_timeout_lock, LCK_MTX_ASSERT_OWNED);
454 if (inpcb_timeout_run == 0 &&
455 (inpcb_garbage_collecting || inpcb_ticking)) {
456 lck_mtx_convert_spin(&inpcb_timeout_lock);
457 inpcb_timeout_run++;
458 if (offset == 0) {
459 inpcb_fast_timer_on = TRUE;
460 thread_call_enter_delayed(inpcb_thread_call,
461 deadline);
462 } else {
463 inpcb_fast_timer_on = FALSE;
464 clock_interval_to_absolutetime_interval(offset,
465 NSEC_PER_SEC, &leeway);
466 thread_call_enter_delayed_with_leeway(
467 inpcb_thread_call, NULL, deadline, leeway,
468 THREAD_CALL_DELAY_LEEWAY);
469 }
470 } else if (inpcb_timeout_run == 1 &&
471 offset == 0 && !inpcb_fast_timer_on) {
472 /*
473 * Since the request was for a fast timer but the
474 * scheduled timer is a lazy timer, try to schedule
475 * another instance of fast timer also.
476 */
477 lck_mtx_convert_spin(&inpcb_timeout_lock);
478 inpcb_timeout_run++;
479 inpcb_fast_timer_on = TRUE;
480 thread_call_enter_delayed(inpcb_fast_thread_call, deadline);
481 }
482 }
483
484 void
485 inpcb_gc_sched(struct inpcbinfo *ipi, u_int32_t type)
486 {
487 u_int32_t gccnt;
488
489 lck_mtx_lock_spin(&inpcb_timeout_lock);
490 inpcb_garbage_collecting = TRUE;
491 gccnt = ipi->ipi_gc_req.intimer_nodelay +
492 ipi->ipi_gc_req.intimer_fast;
493
494 if (gccnt > INPCB_GCREQ_THRESHOLD) {
495 type = INPCB_TIMER_FAST;
496 }
497
498 switch (type) {
499 case INPCB_TIMER_NODELAY:
500 atomic_add_32(&ipi->ipi_gc_req.intimer_nodelay, 1);
501 inpcb_sched_timeout();
502 break;
503 case INPCB_TIMER_FAST:
504 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
505 inpcb_sched_timeout();
506 break;
507 default:
508 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
509 inpcb_sched_lazy_timeout();
510 break;
511 }
512 lck_mtx_unlock(&inpcb_timeout_lock);
513 }
514
515 void
516 inpcb_timer_sched(struct inpcbinfo *ipi, u_int32_t type)
517 {
518 lck_mtx_lock_spin(&inpcb_timeout_lock);
519 inpcb_ticking = TRUE;
520 switch (type) {
521 case INPCB_TIMER_NODELAY:
522 atomic_add_32(&ipi->ipi_timer_req.intimer_nodelay, 1);
523 inpcb_sched_timeout();
524 break;
525 case INPCB_TIMER_FAST:
526 atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
527 inpcb_sched_timeout();
528 break;
529 default:
530 atomic_add_32(&ipi->ipi_timer_req.intimer_lazy, 1);
531 inpcb_sched_lazy_timeout();
532 break;
533 }
534 lck_mtx_unlock(&inpcb_timeout_lock);
535 }
536
537 void
538 in_pcbinfo_attach(struct inpcbinfo *ipi)
539 {
540 struct inpcbinfo *ipi0;
541
542 lck_mtx_lock(&inpcb_lock);
543 TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
544 if (ipi0 == ipi) {
545 panic("%s: ipi %p already in the list\n",
546 __func__, ipi);
547 /* NOTREACHED */
548 }
549 }
550 TAILQ_INSERT_TAIL(&inpcb_head, ipi, ipi_entry);
551 lck_mtx_unlock(&inpcb_lock);
552 }
553
554 int
555 in_pcbinfo_detach(struct inpcbinfo *ipi)
556 {
557 struct inpcbinfo *ipi0;
558 int error = 0;
559
560 lck_mtx_lock(&inpcb_lock);
561 TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
562 if (ipi0 == ipi) {
563 break;
564 }
565 }
566 if (ipi0 != NULL) {
567 TAILQ_REMOVE(&inpcb_head, ipi0, ipi_entry);
568 } else {
569 error = ENXIO;
570 }
571 lck_mtx_unlock(&inpcb_lock);
572
573 return error;
574 }
575
576 /*
577 * Allocate a PCB and associate it with the socket.
578 *
579 * Returns: 0 Success
580 * ENOBUFS
581 * ENOMEM
582 */
583 int
584 in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p)
585 {
586 #pragma unused(p)
587 struct inpcb *inp;
588 caddr_t temp;
589 #if CONFIG_MACF_NET
590 int mac_error;
591 #endif /* CONFIG_MACF_NET */
592
593 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
594 inp = (struct inpcb *)zalloc(pcbinfo->ipi_zone);
595 if (inp == NULL) {
596 return ENOBUFS;
597 }
598 bzero((caddr_t)inp, sizeof(*inp));
599 } else {
600 inp = (struct inpcb *)(void *)so->so_saved_pcb;
601 temp = inp->inp_saved_ppcb;
602 bzero((caddr_t)inp, sizeof(*inp));
603 inp->inp_saved_ppcb = temp;
604 }
605
606 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
607 inp->inp_pcbinfo = pcbinfo;
608 inp->inp_socket = so;
609 #if CONFIG_MACF_NET
610 mac_error = mac_inpcb_label_init(inp, M_WAITOK);
611 if (mac_error != 0) {
612 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
613 zfree(pcbinfo->ipi_zone, inp);
614 }
615 return mac_error;
616 }
617 mac_inpcb_label_associate(so, inp);
618 #endif /* CONFIG_MACF_NET */
619 /* make sure inp_stat is always 64-bit aligned */
620 inp->inp_stat = (struct inp_stat *)P2ROUNDUP(inp->inp_stat_store,
621 sizeof(u_int64_t));
622 if (((uintptr_t)inp->inp_stat - (uintptr_t)inp->inp_stat_store) +
623 sizeof(*inp->inp_stat) > sizeof(inp->inp_stat_store)) {
624 panic("%s: insufficient space to align inp_stat", __func__);
625 /* NOTREACHED */
626 }
627
628 /* make sure inp_cstat is always 64-bit aligned */
629 inp->inp_cstat = (struct inp_stat *)P2ROUNDUP(inp->inp_cstat_store,
630 sizeof(u_int64_t));
631 if (((uintptr_t)inp->inp_cstat - (uintptr_t)inp->inp_cstat_store) +
632 sizeof(*inp->inp_cstat) > sizeof(inp->inp_cstat_store)) {
633 panic("%s: insufficient space to align inp_cstat", __func__);
634 /* NOTREACHED */
635 }
636
637 /* make sure inp_wstat is always 64-bit aligned */
638 inp->inp_wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_wstat_store,
639 sizeof(u_int64_t));
640 if (((uintptr_t)inp->inp_wstat - (uintptr_t)inp->inp_wstat_store) +
641 sizeof(*inp->inp_wstat) > sizeof(inp->inp_wstat_store)) {
642 panic("%s: insufficient space to align inp_wstat", __func__);
643 /* NOTREACHED */
644 }
645
646 /* make sure inp_Wstat is always 64-bit aligned */
647 inp->inp_Wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_Wstat_store,
648 sizeof(u_int64_t));
649 if (((uintptr_t)inp->inp_Wstat - (uintptr_t)inp->inp_Wstat_store) +
650 sizeof(*inp->inp_Wstat) > sizeof(inp->inp_Wstat_store)) {
651 panic("%s: insufficient space to align inp_Wstat", __func__);
652 /* NOTREACHED */
653 }
654
655 so->so_pcb = (caddr_t)inp;
656
657 if (so->so_proto->pr_flags & PR_PCBLOCK) {
658 lck_mtx_init(&inp->inpcb_mtx, pcbinfo->ipi_lock_grp,
659 pcbinfo->ipi_lock_attr);
660 }
661
662 #if INET6
663 if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on) {
664 inp->inp_flags |= IN6P_IPV6_V6ONLY;
665 }
666
667 if (ip6_auto_flowlabel) {
668 inp->inp_flags |= IN6P_AUTOFLOWLABEL;
669 }
670 #endif /* INET6 */
671 if (intcoproc_unrestricted) {
672 inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
673 }
674
675 (void) inp_update_policy(inp);
676
677 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
678 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
679 LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
680 pcbinfo->ipi_count++;
681 lck_rw_done(pcbinfo->ipi_lock);
682 return 0;
683 }
684
685 /*
686 * in_pcblookup_local_and_cleanup does everything
687 * in_pcblookup_local does but it checks for a socket
688 * that's going away. Since we know that the lock is
689 * held read+write when this function is called, we
690 * can safely dispose of this socket like the slow
691 * timer would usually do and return NULL. This is
692 * great for bind.
693 */
694 struct inpcb *
695 in_pcblookup_local_and_cleanup(struct inpcbinfo *pcbinfo, struct in_addr laddr,
696 u_int lport_arg, int wild_okay)
697 {
698 struct inpcb *inp;
699
700 /* Perform normal lookup */
701 inp = in_pcblookup_local(pcbinfo, laddr, lport_arg, wild_okay);
702
703 /* Check if we found a match but it's waiting to be disposed */
704 if (inp != NULL && inp->inp_wantcnt == WNT_STOPUSING) {
705 struct socket *so = inp->inp_socket;
706
707 socket_lock(so, 0);
708
709 if (so->so_usecount == 0) {
710 if (inp->inp_state != INPCB_STATE_DEAD) {
711 in_pcbdetach(inp);
712 }
713 in_pcbdispose(inp); /* will unlock & destroy */
714 inp = NULL;
715 } else {
716 socket_unlock(so, 0);
717 }
718 }
719
720 return inp;
721 }
722
723 static void
724 in_pcb_conflict_post_msg(u_int16_t port)
725 {
726 /*
727 * Radar 5523020 send a kernel event notification if a
728 * non-participating socket tries to bind the port a socket
729 * who has set SOF_NOTIFYCONFLICT owns.
730 */
731 struct kev_msg ev_msg;
732 struct kev_in_portinuse in_portinuse;
733
734 bzero(&in_portinuse, sizeof(struct kev_in_portinuse));
735 bzero(&ev_msg, sizeof(struct kev_msg));
736 in_portinuse.port = ntohs(port); /* port in host order */
737 in_portinuse.req_pid = proc_selfpid();
738 ev_msg.vendor_code = KEV_VENDOR_APPLE;
739 ev_msg.kev_class = KEV_NETWORK_CLASS;
740 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
741 ev_msg.event_code = KEV_INET_PORTINUSE;
742 ev_msg.dv[0].data_ptr = &in_portinuse;
743 ev_msg.dv[0].data_length = sizeof(struct kev_in_portinuse);
744 ev_msg.dv[1].data_length = 0;
745 dlil_post_complete_msg(NULL, &ev_msg);
746 }
747
748 /*
749 * Bind an INPCB to an address and/or port. This routine should not alter
750 * the caller-supplied local address "nam".
751 *
752 * Returns: 0 Success
753 * EADDRNOTAVAIL Address not available.
754 * EINVAL Invalid argument
755 * EAFNOSUPPORT Address family not supported [notdef]
756 * EACCES Permission denied
757 * EADDRINUSE Address in use
758 * EAGAIN Resource unavailable, try again
759 * priv_check_cred:EPERM Operation not permitted
760 */
761 int
762 in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p)
763 {
764 struct socket *so = inp->inp_socket;
765 unsigned short *lastport;
766 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
767 u_short lport = 0, rand_port = 0;
768 int wild = 0, reuseport = (so->so_options & SO_REUSEPORT);
769 int error, randomport, conflict = 0;
770 boolean_t anonport = FALSE;
771 kauth_cred_t cred;
772 struct in_addr laddr;
773 struct ifnet *outif = NULL;
774
775 if (TAILQ_EMPTY(&in_ifaddrhead)) { /* XXX broken! */
776 return EADDRNOTAVAIL;
777 }
778 if (!(so->so_options & (SO_REUSEADDR | SO_REUSEPORT))) {
779 wild = 1;
780 }
781
782 bzero(&laddr, sizeof(laddr));
783
784 socket_unlock(so, 0); /* keep reference on socket */
785 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
786 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
787 /* another thread completed the bind */
788 lck_rw_done(pcbinfo->ipi_lock);
789 socket_lock(so, 0);
790 return EINVAL;
791 }
792
793 if (nam != NULL) {
794 if (nam->sa_len != sizeof(struct sockaddr_in)) {
795 lck_rw_done(pcbinfo->ipi_lock);
796 socket_lock(so, 0);
797 return EINVAL;
798 }
799 #if 0
800 /*
801 * We should check the family, but old programs
802 * incorrectly fail to initialize it.
803 */
804 if (nam->sa_family != AF_INET) {
805 lck_rw_done(pcbinfo->ipi_lock);
806 socket_lock(so, 0);
807 return EAFNOSUPPORT;
808 }
809 #endif /* 0 */
810 lport = SIN(nam)->sin_port;
811
812 if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr))) {
813 /*
814 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
815 * allow complete duplication of binding if
816 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
817 * and a multicast address is bound on both
818 * new and duplicated sockets.
819 */
820 if (so->so_options & SO_REUSEADDR) {
821 reuseport = SO_REUSEADDR | SO_REUSEPORT;
822 }
823 } else if (SIN(nam)->sin_addr.s_addr != INADDR_ANY) {
824 struct sockaddr_in sin;
825 struct ifaddr *ifa;
826
827 /* Sanitized for interface address searches */
828 bzero(&sin, sizeof(sin));
829 sin.sin_family = AF_INET;
830 sin.sin_len = sizeof(struct sockaddr_in);
831 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
832
833 ifa = ifa_ifwithaddr(SA(&sin));
834 if (ifa == NULL) {
835 lck_rw_done(pcbinfo->ipi_lock);
836 socket_lock(so, 0);
837 return EADDRNOTAVAIL;
838 } else {
839 /*
840 * Opportunistically determine the outbound
841 * interface that may be used; this may not
842 * hold true if we end up using a route
843 * going over a different interface, e.g.
844 * when sending to a local address. This
845 * will get updated again after sending.
846 */
847 IFA_LOCK(ifa);
848 outif = ifa->ifa_ifp;
849 IFA_UNLOCK(ifa);
850 IFA_REMREF(ifa);
851 }
852 }
853
854
855 if (lport != 0) {
856 struct inpcb *t;
857 uid_t u;
858
859 #if !CONFIG_EMBEDDED
860 if (ntohs(lport) < IPPORT_RESERVED &&
861 SIN(nam)->sin_addr.s_addr != 0 &&
862 !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) {
863 cred = kauth_cred_proc_ref(p);
864 error = priv_check_cred(cred,
865 PRIV_NETINET_RESERVEDPORT, 0);
866 kauth_cred_unref(&cred);
867 if (error != 0) {
868 lck_rw_done(pcbinfo->ipi_lock);
869 socket_lock(so, 0);
870 return EACCES;
871 }
872 }
873 #endif /* !CONFIG_EMBEDDED */
874 /*
875 * Check wether the process is allowed to bind to a restricted port
876 */
877 if (!current_task_can_use_restricted_in_port(lport,
878 so->so_proto->pr_protocol, PORT_FLAGS_BSD)) {
879 lck_rw_done(pcbinfo->ipi_lock);
880 socket_lock(so, 0);
881 return EADDRINUSE;
882 }
883
884 if (!IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
885 (u = kauth_cred_getuid(so->so_cred)) != 0 &&
886 (t = in_pcblookup_local_and_cleanup(
887 inp->inp_pcbinfo, SIN(nam)->sin_addr, lport,
888 INPLOOKUP_WILDCARD)) != NULL &&
889 (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
890 t->inp_laddr.s_addr != INADDR_ANY ||
891 !(t->inp_socket->so_options & SO_REUSEPORT)) &&
892 (u != kauth_cred_getuid(t->inp_socket->so_cred)) &&
893 !(t->inp_socket->so_flags & SOF_REUSESHAREUID) &&
894 (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
895 t->inp_laddr.s_addr != INADDR_ANY) &&
896 (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
897 !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
898 uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
899 if ((t->inp_socket->so_flags &
900 SOF_NOTIFYCONFLICT) &&
901 !(so->so_flags & SOF_NOTIFYCONFLICT)) {
902 conflict = 1;
903 }
904
905 lck_rw_done(pcbinfo->ipi_lock);
906
907 if (conflict) {
908 in_pcb_conflict_post_msg(lport);
909 }
910
911 socket_lock(so, 0);
912 return EADDRINUSE;
913 }
914 t = in_pcblookup_local_and_cleanup(pcbinfo,
915 SIN(nam)->sin_addr, lport, wild);
916 if (t != NULL &&
917 (reuseport & t->inp_socket->so_options) == 0 &&
918 (!(t->inp_flags2 & INP2_EXTERNAL_PORT) ||
919 !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
920 uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) {
921 #if INET6
922 if (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
923 t->inp_laddr.s_addr != INADDR_ANY ||
924 SOCK_DOM(so) != PF_INET6 ||
925 SOCK_DOM(t->inp_socket) != PF_INET6)
926 #endif /* INET6 */
927 {
928 if ((t->inp_socket->so_flags &
929 SOF_NOTIFYCONFLICT) &&
930 !(so->so_flags & SOF_NOTIFYCONFLICT)) {
931 conflict = 1;
932 }
933
934 lck_rw_done(pcbinfo->ipi_lock);
935
936 if (conflict) {
937 in_pcb_conflict_post_msg(lport);
938 }
939 socket_lock(so, 0);
940 return EADDRINUSE;
941 }
942 }
943 }
944 laddr = SIN(nam)->sin_addr;
945 }
946 if (lport == 0) {
947 u_short first, last;
948 int count;
949 bool found;
950
951 /*
952 * Override wild = 1 for implicit bind (mainly used by connect)
953 * For implicit bind (lport == 0), we always use an unused port,
954 * so REUSEADDR|REUSEPORT don't apply
955 */
956 wild = 1;
957
958 randomport = (so->so_flags & SOF_BINDRANDOMPORT) ||
959 (so->so_type == SOCK_STREAM ? tcp_use_randomport :
960 udp_use_randomport);
961
962 /*
963 * Even though this looks similar to the code in
964 * in6_pcbsetport, the v6 vs v4 checks are different.
965 */
966 anonport = TRUE;
967 if (inp->inp_flags & INP_HIGHPORT) {
968 first = ipport_hifirstauto; /* sysctl */
969 last = ipport_hilastauto;
970 lastport = &pcbinfo->ipi_lasthi;
971 } else if (inp->inp_flags & INP_LOWPORT) {
972 cred = kauth_cred_proc_ref(p);
973 error = priv_check_cred(cred,
974 PRIV_NETINET_RESERVEDPORT, 0);
975 kauth_cred_unref(&cred);
976 if (error != 0) {
977 lck_rw_done(pcbinfo->ipi_lock);
978 socket_lock(so, 0);
979 return error;
980 }
981 first = ipport_lowfirstauto; /* 1023 */
982 last = ipport_lowlastauto; /* 600 */
983 lastport = &pcbinfo->ipi_lastlow;
984 } else {
985 first = ipport_firstauto; /* sysctl */
986 last = ipport_lastauto;
987 lastport = &pcbinfo->ipi_lastport;
988 }
989 /* No point in randomizing if only one port is available */
990
991 if (first == last) {
992 randomport = 0;
993 }
994 /*
995 * Simple check to ensure all ports are not used up causing
996 * a deadlock here.
997 *
998 * We split the two cases (up and down) so that the direction
999 * is not being tested on each round of the loop.
1000 */
1001 if (first > last) {
1002 struct in_addr lookup_addr;
1003
1004 /*
1005 * counting down
1006 */
1007 if (randomport) {
1008 read_frandom(&rand_port, sizeof(rand_port));
1009 *lastport =
1010 first - (rand_port % (first - last));
1011 }
1012 count = first - last;
1013
1014 lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
1015 inp->inp_laddr;
1016
1017 found = false;
1018 do {
1019 if (count-- < 0) { /* completely used? */
1020 lck_rw_done(pcbinfo->ipi_lock);
1021 socket_lock(so, 0);
1022 return EADDRNOTAVAIL;
1023 }
1024 --*lastport;
1025 if (*lastport > first || *lastport < last) {
1026 *lastport = first;
1027 }
1028 lport = htons(*lastport);
1029
1030 /*
1031 * Skip if this is a restricted port as we do not want to
1032 * restricted ports as ephemeral
1033 */
1034 if (IS_RESTRICTED_IN_PORT(lport)) {
1035 continue;
1036 }
1037
1038 found = in_pcblookup_local_and_cleanup(pcbinfo,
1039 lookup_addr, lport, wild) == NULL;
1040 } while (!found);
1041 } else {
1042 struct in_addr lookup_addr;
1043
1044 /*
1045 * counting up
1046 */
1047 if (randomport) {
1048 read_frandom(&rand_port, sizeof(rand_port));
1049 *lastport =
1050 first + (rand_port % (first - last));
1051 }
1052 count = last - first;
1053
1054 lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
1055 inp->inp_laddr;
1056
1057 found = false;
1058 do {
1059 if (count-- < 0) { /* completely used? */
1060 lck_rw_done(pcbinfo->ipi_lock);
1061 socket_lock(so, 0);
1062 return EADDRNOTAVAIL;
1063 }
1064 ++*lastport;
1065 if (*lastport < first || *lastport > last) {
1066 *lastport = first;
1067 }
1068 lport = htons(*lastport);
1069
1070 /*
1071 * Skip if this is a restricted port as we do not want to
1072 * restricted ports as ephemeral
1073 */
1074 if (IS_RESTRICTED_IN_PORT(lport)) {
1075 continue;
1076 }
1077
1078 found = in_pcblookup_local_and_cleanup(pcbinfo,
1079 lookup_addr, lport, wild) == NULL;
1080 } while (!found);
1081 }
1082 }
1083 socket_lock(so, 0);
1084
1085 /*
1086 * We unlocked socket's protocol lock for a long time.
1087 * The socket might have been dropped/defuncted.
1088 * Checking if world has changed since.
1089 */
1090 if (inp->inp_state == INPCB_STATE_DEAD) {
1091 lck_rw_done(pcbinfo->ipi_lock);
1092 return ECONNABORTED;
1093 }
1094
1095 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
1096 lck_rw_done(pcbinfo->ipi_lock);
1097 return EINVAL;
1098 }
1099
1100 if (laddr.s_addr != INADDR_ANY) {
1101 inp->inp_laddr = laddr;
1102 inp->inp_last_outifp = outif;
1103 }
1104 inp->inp_lport = lport;
1105 if (anonport) {
1106 inp->inp_flags |= INP_ANONPORT;
1107 }
1108
1109 if (in_pcbinshash(inp, 1) != 0) {
1110 inp->inp_laddr.s_addr = INADDR_ANY;
1111 inp->inp_last_outifp = NULL;
1112
1113 inp->inp_lport = 0;
1114 if (anonport) {
1115 inp->inp_flags &= ~INP_ANONPORT;
1116 }
1117 lck_rw_done(pcbinfo->ipi_lock);
1118 return EAGAIN;
1119 }
1120 lck_rw_done(pcbinfo->ipi_lock);
1121 sflt_notify(so, sock_evt_bound, NULL);
1122 return 0;
1123 }
1124
1125 #define APN_FALLBACK_IP_FILTER(a) \
1126 (IN_LINKLOCAL(ntohl((a)->sin_addr.s_addr)) || \
1127 IN_LOOPBACK(ntohl((a)->sin_addr.s_addr)) || \
1128 IN_ZERONET(ntohl((a)->sin_addr.s_addr)) || \
1129 IN_MULTICAST(ntohl((a)->sin_addr.s_addr)) || \
1130 IN_PRIVATE(ntohl((a)->sin_addr.s_addr)))
1131
1132 #define APN_FALLBACK_NOTIF_INTERVAL 2 /* Magic Number */
1133 static uint64_t last_apn_fallback = 0;
1134
1135 static boolean_t
1136 apn_fallback_required(proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4)
1137 {
1138 uint64_t timenow;
1139 struct sockaddr_storage lookup_default_addr;
1140 struct rtentry *rt = NULL;
1141
1142 VERIFY(proc != NULL);
1143
1144 if (apn_fallbk_enabled == FALSE) {
1145 return FALSE;
1146 }
1147
1148 if (proc == kernproc) {
1149 return FALSE;
1150 }
1151
1152 if (so && (so->so_options & SO_NOAPNFALLBK)) {
1153 return FALSE;
1154 }
1155
1156 timenow = net_uptime();
1157 if ((timenow - last_apn_fallback) < APN_FALLBACK_NOTIF_INTERVAL) {
1158 apn_fallbk_log((LOG_INFO, "APN fallback notification throttled.\n"));
1159 return FALSE;
1160 }
1161
1162 if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4)) {
1163 return FALSE;
1164 }
1165
1166 /* Check if we have unscoped IPv6 default route through cellular */
1167 bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1168 lookup_default_addr.ss_family = AF_INET6;
1169 lookup_default_addr.ss_len = sizeof(struct sockaddr_in6);
1170
1171 rt = rtalloc1((struct sockaddr *)&lookup_default_addr, 0, 0);
1172 if (NULL == rt) {
1173 apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1174 "unscoped default IPv6 route.\n"));
1175 return FALSE;
1176 }
1177
1178 if (!IFNET_IS_CELLULAR(rt->rt_ifp)) {
1179 rtfree(rt);
1180 apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1181 "unscoped default IPv6 route through cellular interface.\n"));
1182 return FALSE;
1183 }
1184
1185 /*
1186 * We have a default IPv6 route, ensure that
1187 * we do not have IPv4 default route before triggering
1188 * the event
1189 */
1190 rtfree(rt);
1191 rt = NULL;
1192
1193 bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1194 lookup_default_addr.ss_family = AF_INET;
1195 lookup_default_addr.ss_len = sizeof(struct sockaddr_in);
1196
1197 rt = rtalloc1((struct sockaddr *)&lookup_default_addr, 0, 0);
1198
1199 if (rt) {
1200 rtfree(rt);
1201 rt = NULL;
1202 apn_fallbk_log((LOG_INFO, "APN fallback notification found unscoped "
1203 "IPv4 default route!\n"));
1204 return FALSE;
1205 }
1206
1207 {
1208 /*
1209 * We disable APN fallback if the binary is not a third-party app.
1210 * Note that platform daemons use their process name as a
1211 * bundle ID so we filter out bundle IDs without dots.
1212 */
1213 const char *bundle_id = cs_identity_get(proc);
1214 if (bundle_id == NULL ||
1215 bundle_id[0] == '\0' ||
1216 strchr(bundle_id, '.') == NULL ||
1217 strncmp(bundle_id, "com.apple.", sizeof("com.apple.") - 1) == 0) {
1218 apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found first-"
1219 "party bundle ID \"%s\"!\n", (bundle_id ? bundle_id : "NULL")));
1220 return FALSE;
1221 }
1222 }
1223
1224 {
1225 /*
1226 * The Apple App Store IPv6 requirement started on
1227 * June 1st, 2016 at 12:00:00 AM PDT.
1228 * We disable APN fallback if the binary is more recent than that.
1229 * We check both atime and birthtime since birthtime is not always supported.
1230 */
1231 static const long ipv6_start_date = 1464764400L;
1232 vfs_context_t context;
1233 struct stat64 sb;
1234 int vn_stat_error;
1235
1236 bzero(&sb, sizeof(struct stat64));
1237 context = vfs_context_create(NULL);
1238 vn_stat_error = vn_stat(proc->p_textvp, &sb, NULL, 1, 0, context);
1239 (void)vfs_context_rele(context);
1240
1241 if (vn_stat_error != 0 ||
1242 sb.st_atimespec.tv_sec >= ipv6_start_date ||
1243 sb.st_birthtimespec.tv_sec >= ipv6_start_date) {
1244 apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found binary "
1245 "too recent! (err %d atime %ld mtime %ld ctime %ld birthtime %ld)\n",
1246 vn_stat_error, sb.st_atimespec.tv_sec, sb.st_mtimespec.tv_sec,
1247 sb.st_ctimespec.tv_sec, sb.st_birthtimespec.tv_sec));
1248 return FALSE;
1249 }
1250 }
1251 return TRUE;
1252 }
1253
1254 static void
1255 apn_fallback_trigger(proc_t proc, struct socket *so)
1256 {
1257 pid_t pid = 0;
1258 struct kev_msg ev_msg;
1259 struct kev_netevent_apnfallbk_data apnfallbk_data;
1260
1261 last_apn_fallback = net_uptime();
1262 pid = proc_pid(proc);
1263 uuid_t application_uuid;
1264 uuid_clear(application_uuid);
1265 proc_getexecutableuuid(proc, application_uuid,
1266 sizeof(application_uuid));
1267
1268 bzero(&ev_msg, sizeof(struct kev_msg));
1269 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1270 ev_msg.kev_class = KEV_NETWORK_CLASS;
1271 ev_msg.kev_subclass = KEV_NETEVENT_SUBCLASS;
1272 ev_msg.event_code = KEV_NETEVENT_APNFALLBACK;
1273
1274 bzero(&apnfallbk_data, sizeof(apnfallbk_data));
1275
1276 if (so->so_flags & SOF_DELEGATED) {
1277 apnfallbk_data.epid = so->e_pid;
1278 uuid_copy(apnfallbk_data.euuid, so->e_uuid);
1279 } else {
1280 apnfallbk_data.epid = so->last_pid;
1281 uuid_copy(apnfallbk_data.euuid, so->last_uuid);
1282 }
1283
1284 ev_msg.dv[0].data_ptr = &apnfallbk_data;
1285 ev_msg.dv[0].data_length = sizeof(apnfallbk_data);
1286 kev_post_msg(&ev_msg);
1287 apn_fallbk_log((LOG_INFO, "APN fallback notification issued.\n"));
1288 }
1289
1290 /*
1291 * Transform old in_pcbconnect() into an inner subroutine for new
1292 * in_pcbconnect(); do some validity-checking on the remote address
1293 * (in "nam") and then determine local host address (i.e., which
1294 * interface) to use to access that remote host.
1295 *
1296 * This routine may alter the caller-supplied remote address "nam".
1297 *
1298 * The caller may override the bound-to-interface setting of the socket
1299 * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1300 *
1301 * This routine might return an ifp with a reference held if the caller
1302 * provides a non-NULL outif, even in the error case. The caller is
1303 * responsible for releasing its reference.
1304 *
1305 * Returns: 0 Success
1306 * EINVAL Invalid argument
1307 * EAFNOSUPPORT Address family not supported
1308 * EADDRNOTAVAIL Address not available
1309 */
1310 int
1311 in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr,
1312 unsigned int ifscope, struct ifnet **outif, int raw)
1313 {
1314 struct route *ro = &inp->inp_route;
1315 struct in_ifaddr *ia = NULL;
1316 struct sockaddr_in sin;
1317 int error = 0;
1318 boolean_t restricted = FALSE;
1319
1320 if (outif != NULL) {
1321 *outif = NULL;
1322 }
1323 if (nam->sa_len != sizeof(struct sockaddr_in)) {
1324 return EINVAL;
1325 }
1326 if (SIN(nam)->sin_family != AF_INET) {
1327 return EAFNOSUPPORT;
1328 }
1329 if (raw == 0 && SIN(nam)->sin_port == 0) {
1330 return EADDRNOTAVAIL;
1331 }
1332
1333 /*
1334 * If the destination address is INADDR_ANY,
1335 * use the primary local address.
1336 * If the supplied address is INADDR_BROADCAST,
1337 * and the primary interface supports broadcast,
1338 * choose the broadcast address for that interface.
1339 */
1340 if (raw == 0 && (SIN(nam)->sin_addr.s_addr == INADDR_ANY ||
1341 SIN(nam)->sin_addr.s_addr == (u_int32_t)INADDR_BROADCAST)) {
1342 lck_rw_lock_shared(in_ifaddr_rwlock);
1343 if (!TAILQ_EMPTY(&in_ifaddrhead)) {
1344 ia = TAILQ_FIRST(&in_ifaddrhead);
1345 IFA_LOCK_SPIN(&ia->ia_ifa);
1346 if (SIN(nam)->sin_addr.s_addr == INADDR_ANY) {
1347 SIN(nam)->sin_addr = IA_SIN(ia)->sin_addr;
1348 } else if (ia->ia_ifp->if_flags & IFF_BROADCAST) {
1349 SIN(nam)->sin_addr =
1350 SIN(&ia->ia_broadaddr)->sin_addr;
1351 }
1352 IFA_UNLOCK(&ia->ia_ifa);
1353 ia = NULL;
1354 }
1355 lck_rw_done(in_ifaddr_rwlock);
1356 }
1357 /*
1358 * Otherwise, if the socket has already bound the source, just use it.
1359 */
1360 if (inp->inp_laddr.s_addr != INADDR_ANY) {
1361 VERIFY(ia == NULL);
1362 *laddr = inp->inp_laddr;
1363 return 0;
1364 }
1365
1366 /*
1367 * If the ifscope is specified by the caller (e.g. IP_PKTINFO)
1368 * then it overrides the sticky ifscope set for the socket.
1369 */
1370 if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF)) {
1371 ifscope = inp->inp_boundifp->if_index;
1372 }
1373
1374 /*
1375 * If route is known or can be allocated now,
1376 * our src addr is taken from the i/f, else punt.
1377 * Note that we should check the address family of the cached
1378 * destination, in case of sharing the cache with IPv6.
1379 */
1380 if (ro->ro_rt != NULL) {
1381 RT_LOCK_SPIN(ro->ro_rt);
1382 }
1383 if (ROUTE_UNUSABLE(ro) || ro->ro_dst.sa_family != AF_INET ||
1384 SIN(&ro->ro_dst)->sin_addr.s_addr != SIN(nam)->sin_addr.s_addr ||
1385 (inp->inp_socket->so_options & SO_DONTROUTE)) {
1386 if (ro->ro_rt != NULL) {
1387 RT_UNLOCK(ro->ro_rt);
1388 }
1389 ROUTE_RELEASE(ro);
1390 }
1391 if (!(inp->inp_socket->so_options & SO_DONTROUTE) &&
1392 (ro->ro_rt == NULL || ro->ro_rt->rt_ifp == NULL)) {
1393 if (ro->ro_rt != NULL) {
1394 RT_UNLOCK(ro->ro_rt);
1395 }
1396 ROUTE_RELEASE(ro);
1397 /* No route yet, so try to acquire one */
1398 bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
1399 ro->ro_dst.sa_family = AF_INET;
1400 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1401 SIN(&ro->ro_dst)->sin_addr = SIN(nam)->sin_addr;
1402 rtalloc_scoped(ro, ifscope);
1403 if (ro->ro_rt != NULL) {
1404 RT_LOCK_SPIN(ro->ro_rt);
1405 }
1406 }
1407 /* Sanitized local copy for interface address searches */
1408 bzero(&sin, sizeof(sin));
1409 sin.sin_family = AF_INET;
1410 sin.sin_len = sizeof(struct sockaddr_in);
1411 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
1412 /*
1413 * If we did not find (or use) a route, assume dest is reachable
1414 * on a directly connected network and try to find a corresponding
1415 * interface to take the source address from.
1416 */
1417 if (ro->ro_rt == NULL) {
1418 proc_t proc = current_proc();
1419
1420 VERIFY(ia == NULL);
1421 ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
1422 if (ia == NULL) {
1423 ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
1424 }
1425 error = ((ia == NULL) ? ENETUNREACH : 0);
1426
1427 if (apn_fallback_required(proc, inp->inp_socket,
1428 (void *)nam)) {
1429 apn_fallback_trigger(proc, inp->inp_socket);
1430 }
1431
1432 goto done;
1433 }
1434 RT_LOCK_ASSERT_HELD(ro->ro_rt);
1435 /*
1436 * If the outgoing interface on the route found is not
1437 * a loopback interface, use the address from that interface.
1438 */
1439 if (!(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK)) {
1440 VERIFY(ia == NULL);
1441 /*
1442 * If the route points to a cellular interface and the
1443 * caller forbids our using interfaces of such type,
1444 * pretend that there is no route.
1445 * Apply the same logic for expensive interfaces.
1446 */
1447 if (inp_restricted_send(inp, ro->ro_rt->rt_ifp)) {
1448 RT_UNLOCK(ro->ro_rt);
1449 ROUTE_RELEASE(ro);
1450 error = EHOSTUNREACH;
1451 restricted = TRUE;
1452 } else {
1453 /* Become a regular mutex */
1454 RT_CONVERT_LOCK(ro->ro_rt);
1455 ia = ifatoia(ro->ro_rt->rt_ifa);
1456 IFA_ADDREF(&ia->ia_ifa);
1457
1458 /*
1459 * Mark the control block for notification of
1460 * a possible flow that might undergo clat46
1461 * translation.
1462 *
1463 * We defer the decision to a later point when
1464 * inpcb is being disposed off.
1465 * The reason is that we only want to send notification
1466 * if the flow was ever used to send data.
1467 */
1468 if (IS_INTF_CLAT46(ro->ro_rt->rt_ifp)) {
1469 inp->inp_flags2 |= INP2_CLAT46_FLOW;
1470 }
1471
1472 RT_UNLOCK(ro->ro_rt);
1473 error = 0;
1474 }
1475 goto done;
1476 }
1477 VERIFY(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK);
1478 RT_UNLOCK(ro->ro_rt);
1479 /*
1480 * The outgoing interface is marked with 'loopback net', so a route
1481 * to ourselves is here.
1482 * Try to find the interface of the destination address and then
1483 * take the address from there. That interface is not necessarily
1484 * a loopback interface.
1485 */
1486 VERIFY(ia == NULL);
1487 ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
1488 if (ia == NULL) {
1489 ia = ifatoia(ifa_ifwithaddr_scoped(SA(&sin), ifscope));
1490 }
1491 if (ia == NULL) {
1492 ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
1493 }
1494 if (ia == NULL) {
1495 RT_LOCK(ro->ro_rt);
1496 ia = ifatoia(ro->ro_rt->rt_ifa);
1497 if (ia != NULL) {
1498 IFA_ADDREF(&ia->ia_ifa);
1499 }
1500 RT_UNLOCK(ro->ro_rt);
1501 }
1502 error = ((ia == NULL) ? ENETUNREACH : 0);
1503
1504 done:
1505 /*
1506 * If the destination address is multicast and an outgoing
1507 * interface has been set as a multicast option, use the
1508 * address of that interface as our source address.
1509 */
1510 if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
1511 inp->inp_moptions != NULL) {
1512 struct ip_moptions *imo;
1513 struct ifnet *ifp;
1514
1515 imo = inp->inp_moptions;
1516 IMO_LOCK(imo);
1517 if (imo->imo_multicast_ifp != NULL && (ia == NULL ||
1518 ia->ia_ifp != imo->imo_multicast_ifp)) {
1519 ifp = imo->imo_multicast_ifp;
1520 if (ia != NULL) {
1521 IFA_REMREF(&ia->ia_ifa);
1522 }
1523 lck_rw_lock_shared(in_ifaddr_rwlock);
1524 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
1525 if (ia->ia_ifp == ifp) {
1526 break;
1527 }
1528 }
1529 if (ia != NULL) {
1530 IFA_ADDREF(&ia->ia_ifa);
1531 }
1532 lck_rw_done(in_ifaddr_rwlock);
1533 if (ia == NULL) {
1534 error = EADDRNOTAVAIL;
1535 } else {
1536 error = 0;
1537 }
1538 }
1539 IMO_UNLOCK(imo);
1540 }
1541 /*
1542 * Don't do pcblookup call here; return interface in laddr
1543 * and exit to caller, that will do the lookup.
1544 */
1545 if (ia != NULL) {
1546 /*
1547 * If the source address belongs to a cellular interface
1548 * and the socket forbids our using interfaces of such
1549 * type, pretend that there is no source address.
1550 * Apply the same logic for expensive interfaces.
1551 */
1552 IFA_LOCK_SPIN(&ia->ia_ifa);
1553 if (inp_restricted_send(inp, ia->ia_ifa.ifa_ifp)) {
1554 IFA_UNLOCK(&ia->ia_ifa);
1555 error = EHOSTUNREACH;
1556 restricted = TRUE;
1557 } else if (error == 0) {
1558 *laddr = ia->ia_addr.sin_addr;
1559 if (outif != NULL) {
1560 struct ifnet *ifp;
1561
1562 if (ro->ro_rt != NULL) {
1563 ifp = ro->ro_rt->rt_ifp;
1564 } else {
1565 ifp = ia->ia_ifp;
1566 }
1567
1568 VERIFY(ifp != NULL);
1569 IFA_CONVERT_LOCK(&ia->ia_ifa);
1570 ifnet_reference(ifp); /* for caller */
1571 if (*outif != NULL) {
1572 ifnet_release(*outif);
1573 }
1574 *outif = ifp;
1575 }
1576 IFA_UNLOCK(&ia->ia_ifa);
1577 } else {
1578 IFA_UNLOCK(&ia->ia_ifa);
1579 }
1580 IFA_REMREF(&ia->ia_ifa);
1581 ia = NULL;
1582 }
1583
1584 if (restricted && error == EHOSTUNREACH) {
1585 soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED |
1586 SO_FILT_HINT_IFDENIED));
1587 }
1588
1589 return error;
1590 }
1591
1592 /*
1593 * Outer subroutine:
1594 * Connect from a socket to a specified address.
1595 * Both address and port must be specified in argument sin.
1596 * If don't have a local address for this socket yet,
1597 * then pick one.
1598 *
1599 * The caller may override the bound-to-interface setting of the socket
1600 * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1601 */
1602 int
1603 in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p,
1604 unsigned int ifscope, struct ifnet **outif)
1605 {
1606 struct in_addr laddr;
1607 struct sockaddr_in *sin = (struct sockaddr_in *)(void *)nam;
1608 struct inpcb *pcb;
1609 int error;
1610 struct socket *so = inp->inp_socket;
1611
1612 #if CONTENT_FILTER
1613 if (so) {
1614 so->so_state_change_cnt++;
1615 }
1616 #endif
1617
1618 /*
1619 * Call inner routine, to assign local interface address.
1620 */
1621 if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0) {
1622 return error;
1623 }
1624
1625 socket_unlock(so, 0);
1626 pcb = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port,
1627 inp->inp_laddr.s_addr ? inp->inp_laddr : laddr,
1628 inp->inp_lport, 0, NULL);
1629 socket_lock(so, 0);
1630
1631 /*
1632 * Check if the socket is still in a valid state. When we unlock this
1633 * embryonic socket, it can get aborted if another thread is closing
1634 * the listener (radar 7947600).
1635 */
1636 if ((so->so_flags & SOF_ABORTED) != 0) {
1637 return ECONNREFUSED;
1638 }
1639
1640 if (pcb != NULL) {
1641 in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
1642 return EADDRINUSE;
1643 }
1644 if (inp->inp_laddr.s_addr == INADDR_ANY) {
1645 if (inp->inp_lport == 0) {
1646 error = in_pcbbind(inp, NULL, p);
1647 if (error) {
1648 return error;
1649 }
1650 }
1651 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1652 /*
1653 * Lock inversion issue, mostly with udp
1654 * multicast packets.
1655 */
1656 socket_unlock(so, 0);
1657 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1658 socket_lock(so, 0);
1659 }
1660 inp->inp_laddr = laddr;
1661 /* no reference needed */
1662 inp->inp_last_outifp = (outif != NULL) ? *outif : NULL;
1663 inp->inp_flags |= INP_INADDR_ANY;
1664 } else {
1665 /*
1666 * Usage of IP_PKTINFO, without local port already
1667 * speficified will cause kernel to panic,
1668 * see rdar://problem/18508185.
1669 * For now returning error to avoid a kernel panic
1670 * This routines can be refactored and handle this better
1671 * in future.
1672 */
1673 if (inp->inp_lport == 0) {
1674 return EINVAL;
1675 }
1676 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1677 /*
1678 * Lock inversion issue, mostly with udp
1679 * multicast packets.
1680 */
1681 socket_unlock(so, 0);
1682 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1683 socket_lock(so, 0);
1684 }
1685 }
1686 inp->inp_faddr = sin->sin_addr;
1687 inp->inp_fport = sin->sin_port;
1688 if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
1689 nstat_pcb_invalidate_cache(inp);
1690 }
1691 in_pcbrehash(inp);
1692 lck_rw_done(inp->inp_pcbinfo->ipi_lock);
1693 return 0;
1694 }
1695
1696 void
1697 in_pcbdisconnect(struct inpcb *inp)
1698 {
1699 struct socket *so = inp->inp_socket;
1700
1701 if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) {
1702 nstat_pcb_cache(inp);
1703 }
1704
1705 inp->inp_faddr.s_addr = INADDR_ANY;
1706 inp->inp_fport = 0;
1707
1708 #if CONTENT_FILTER
1709 if (so) {
1710 so->so_state_change_cnt++;
1711 }
1712 #endif
1713
1714 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1715 /* lock inversion issue, mostly with udp multicast packets */
1716 socket_unlock(so, 0);
1717 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1718 socket_lock(so, 0);
1719 }
1720
1721 in_pcbrehash(inp);
1722 lck_rw_done(inp->inp_pcbinfo->ipi_lock);
1723 /*
1724 * A multipath subflow socket would have its SS_NOFDREF set by default,
1725 * so check for SOF_MP_SUBFLOW socket flag before detaching the PCB;
1726 * when the socket is closed for real, SOF_MP_SUBFLOW would be cleared.
1727 */
1728 if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) {
1729 in_pcbdetach(inp);
1730 }
1731 }
1732
1733 void
1734 in_pcbdetach(struct inpcb *inp)
1735 {
1736 struct socket *so = inp->inp_socket;
1737
1738 if (so->so_pcb == NULL) {
1739 /* PCB has been disposed */
1740 panic("%s: inp=%p so=%p proto=%d so_pcb is null!\n", __func__,
1741 inp, so, SOCK_PROTO(so));
1742 /* NOTREACHED */
1743 }
1744
1745 #if IPSEC
1746 if (inp->inp_sp != NULL) {
1747 (void) ipsec4_delete_pcbpolicy(inp);
1748 }
1749 #endif /* IPSEC */
1750
1751 if (inp->inp_stat != NULL && SOCK_PROTO(so) == IPPROTO_UDP) {
1752 if (inp->inp_stat->rxpackets == 0 && inp->inp_stat->txpackets == 0) {
1753 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_no_data);
1754 }
1755 }
1756
1757 /*
1758 * Let NetworkStatistics know this PCB is going away
1759 * before we detach it.
1760 */
1761 if (nstat_collect &&
1762 (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) {
1763 nstat_pcb_detach(inp);
1764 }
1765
1766 /* Free memory buffer held for generating keep alives */
1767 if (inp->inp_keepalive_data != NULL) {
1768 FREE(inp->inp_keepalive_data, M_TEMP);
1769 inp->inp_keepalive_data = NULL;
1770 }
1771
1772 /* mark socket state as dead */
1773 if (in_pcb_checkstate(inp, WNT_STOPUSING, 1) != WNT_STOPUSING) {
1774 panic("%s: so=%p proto=%d couldn't set to STOPUSING\n",
1775 __func__, so, SOCK_PROTO(so));
1776 /* NOTREACHED */
1777 }
1778
1779 if (!(so->so_flags & SOF_PCBCLEARING)) {
1780 struct ip_moptions *imo;
1781
1782 inp->inp_vflag = 0;
1783 if (inp->inp_options != NULL) {
1784 (void) m_free(inp->inp_options);
1785 inp->inp_options = NULL;
1786 }
1787 ROUTE_RELEASE(&inp->inp_route);
1788 imo = inp->inp_moptions;
1789 inp->inp_moptions = NULL;
1790 sofreelastref(so, 0);
1791 inp->inp_state = INPCB_STATE_DEAD;
1792
1793 /*
1794 * Enqueue an event to send kernel event notification
1795 * if the flow has to CLAT46 for data packets
1796 */
1797 if (inp->inp_flags2 & INP2_CLAT46_FLOW) {
1798 /*
1799 * If there has been any exchange of data bytes
1800 * over this flow.
1801 * Schedule a notification to report that flow is
1802 * using client side translation.
1803 */
1804 if (inp->inp_stat != NULL &&
1805 (inp->inp_stat->txbytes != 0 ||
1806 inp->inp_stat->rxbytes != 0)) {
1807 if (so->so_flags & SOF_DELEGATED) {
1808 in6_clat46_event_enqueue_nwk_wq_entry(
1809 IN6_CLAT46_EVENT_V4_FLOW,
1810 so->e_pid,
1811 so->e_uuid);
1812 } else {
1813 in6_clat46_event_enqueue_nwk_wq_entry(
1814 IN6_CLAT46_EVENT_V4_FLOW,
1815 so->last_pid,
1816 so->last_uuid);
1817 }
1818 }
1819 }
1820
1821 /* makes sure we're not called twice from so_close */
1822 so->so_flags |= SOF_PCBCLEARING;
1823
1824 inpcb_gc_sched(inp->inp_pcbinfo, INPCB_TIMER_FAST);
1825
1826 /*
1827 * See inp_join_group() for why we need to unlock
1828 */
1829 if (imo != NULL) {
1830 socket_unlock(so, 0);
1831 IMO_REMREF(imo);
1832 socket_lock(so, 0);
1833 }
1834 }
1835 }
1836
1837
1838 void
1839 in_pcbdispose(struct inpcb *inp)
1840 {
1841 struct socket *so = inp->inp_socket;
1842 struct inpcbinfo *ipi = inp->inp_pcbinfo;
1843
1844 if (so != NULL && so->so_usecount != 0) {
1845 panic("%s: so %p [%d,%d] usecount %d lockhistory %s\n",
1846 __func__, so, SOCK_DOM(so), SOCK_TYPE(so), so->so_usecount,
1847 solockhistory_nr(so));
1848 /* NOTREACHED */
1849 } else if (inp->inp_wantcnt != WNT_STOPUSING) {
1850 if (so != NULL) {
1851 panic_plain("%s: inp %p invalid wantcnt %d, so %p "
1852 "[%d,%d] usecount %d retaincnt %d state 0x%x "
1853 "flags 0x%x lockhistory %s\n", __func__, inp,
1854 inp->inp_wantcnt, so, SOCK_DOM(so), SOCK_TYPE(so),
1855 so->so_usecount, so->so_retaincnt, so->so_state,
1856 so->so_flags, solockhistory_nr(so));
1857 /* NOTREACHED */
1858 } else {
1859 panic("%s: inp %p invalid wantcnt %d no socket\n",
1860 __func__, inp, inp->inp_wantcnt);
1861 /* NOTREACHED */
1862 }
1863 }
1864
1865 LCK_RW_ASSERT(ipi->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
1866
1867 inp->inp_gencnt = ++ipi->ipi_gencnt;
1868 /* access ipi in in_pcbremlists */
1869 in_pcbremlists(inp);
1870
1871 if (so != NULL) {
1872 if (so->so_proto->pr_flags & PR_PCBLOCK) {
1873 sofreelastref(so, 0);
1874 if (so->so_rcv.sb_cc > 0 || so->so_snd.sb_cc > 0) {
1875 /*
1876 * selthreadclear() already called
1877 * during sofreelastref() above.
1878 */
1879 sbrelease(&so->so_rcv);
1880 sbrelease(&so->so_snd);
1881 }
1882 if (so->so_head != NULL) {
1883 panic("%s: so=%p head still exist\n",
1884 __func__, so);
1885 /* NOTREACHED */
1886 }
1887 lck_mtx_unlock(&inp->inpcb_mtx);
1888
1889 #if NECP
1890 necp_inpcb_remove_cb(inp);
1891 #endif /* NECP */
1892
1893 lck_mtx_destroy(&inp->inpcb_mtx, ipi->ipi_lock_grp);
1894 }
1895 /* makes sure we're not called twice from so_close */
1896 so->so_flags |= SOF_PCBCLEARING;
1897 so->so_saved_pcb = (caddr_t)inp;
1898 so->so_pcb = NULL;
1899 inp->inp_socket = NULL;
1900 #if CONFIG_MACF_NET
1901 mac_inpcb_label_destroy(inp);
1902 #endif /* CONFIG_MACF_NET */
1903 #if NECP
1904 necp_inpcb_dispose(inp);
1905 #endif /* NECP */
1906 /*
1907 * In case there a route cached after a detach (possible
1908 * in the tcp case), make sure that it is freed before
1909 * we deallocate the structure.
1910 */
1911 ROUTE_RELEASE(&inp->inp_route);
1912 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
1913 zfree(ipi->ipi_zone, inp);
1914 }
1915 sodealloc(so);
1916 }
1917 }
1918
1919 /*
1920 * The calling convention of in_getsockaddr() and in_getpeeraddr() was
1921 * modified to match the pru_sockaddr() and pru_peeraddr() entry points
1922 * in struct pr_usrreqs, so that protocols can just reference then directly
1923 * without the need for a wrapper function.
1924 */
1925 int
1926 in_getsockaddr(struct socket *so, struct sockaddr **nam)
1927 {
1928 struct inpcb *inp;
1929 struct sockaddr_in *sin;
1930
1931 /*
1932 * Do the malloc first in case it blocks.
1933 */
1934 MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK);
1935 if (sin == NULL) {
1936 return ENOBUFS;
1937 }
1938 bzero(sin, sizeof(*sin));
1939 sin->sin_family = AF_INET;
1940 sin->sin_len = sizeof(*sin);
1941
1942 if ((inp = sotoinpcb(so)) == NULL) {
1943 FREE(sin, M_SONAME);
1944 return EINVAL;
1945 }
1946 sin->sin_port = inp->inp_lport;
1947 sin->sin_addr = inp->inp_laddr;
1948
1949 *nam = (struct sockaddr *)sin;
1950 return 0;
1951 }
1952
1953 int
1954 in_getsockaddr_s(struct socket *so, struct sockaddr_in *ss)
1955 {
1956 struct sockaddr_in *sin = ss;
1957 struct inpcb *inp;
1958
1959 VERIFY(ss != NULL);
1960 bzero(ss, sizeof(*ss));
1961
1962 sin->sin_family = AF_INET;
1963 sin->sin_len = sizeof(*sin);
1964
1965 if ((inp = sotoinpcb(so)) == NULL) {
1966 return EINVAL;
1967 }
1968
1969 sin->sin_port = inp->inp_lport;
1970 sin->sin_addr = inp->inp_laddr;
1971 return 0;
1972 }
1973
1974 int
1975 in_getpeeraddr(struct socket *so, struct sockaddr **nam)
1976 {
1977 struct inpcb *inp;
1978 struct sockaddr_in *sin;
1979
1980 /*
1981 * Do the malloc first in case it blocks.
1982 */
1983 MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK);
1984 if (sin == NULL) {
1985 return ENOBUFS;
1986 }
1987 bzero((caddr_t)sin, sizeof(*sin));
1988 sin->sin_family = AF_INET;
1989 sin->sin_len = sizeof(*sin);
1990
1991 if ((inp = sotoinpcb(so)) == NULL) {
1992 FREE(sin, M_SONAME);
1993 return EINVAL;
1994 }
1995 sin->sin_port = inp->inp_fport;
1996 sin->sin_addr = inp->inp_faddr;
1997
1998 *nam = (struct sockaddr *)sin;
1999 return 0;
2000 }
2001
2002 void
2003 in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2004 int errno, void (*notify)(struct inpcb *, int))
2005 {
2006 struct inpcb *inp;
2007
2008 lck_rw_lock_shared(pcbinfo->ipi_lock);
2009
2010 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
2011 #if INET6
2012 if (!(inp->inp_vflag & INP_IPV4)) {
2013 continue;
2014 }
2015 #endif /* INET6 */
2016 if (inp->inp_faddr.s_addr != faddr.s_addr ||
2017 inp->inp_socket == NULL) {
2018 continue;
2019 }
2020 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2021 continue;
2022 }
2023 socket_lock(inp->inp_socket, 1);
2024 (*notify)(inp, errno);
2025 (void) in_pcb_checkstate(inp, WNT_RELEASE, 1);
2026 socket_unlock(inp->inp_socket, 1);
2027 }
2028 lck_rw_done(pcbinfo->ipi_lock);
2029 }
2030
2031 /*
2032 * Check for alternatives when higher level complains
2033 * about service problems. For now, invalidate cached
2034 * routing information. If the route was created dynamically
2035 * (by a redirect), time to try a default gateway again.
2036 */
2037 void
2038 in_losing(struct inpcb *inp)
2039 {
2040 boolean_t release = FALSE;
2041 struct rtentry *rt;
2042
2043 if ((rt = inp->inp_route.ro_rt) != NULL) {
2044 struct in_ifaddr *ia = NULL;
2045
2046 RT_LOCK(rt);
2047 if (rt->rt_flags & RTF_DYNAMIC) {
2048 /*
2049 * Prevent another thread from modifying rt_key,
2050 * rt_gateway via rt_setgate() after rt_lock is
2051 * dropped by marking the route as defunct.
2052 */
2053 rt->rt_flags |= RTF_CONDEMNED;
2054 RT_UNLOCK(rt);
2055 (void) rtrequest(RTM_DELETE, rt_key(rt),
2056 rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
2057 } else {
2058 RT_UNLOCK(rt);
2059 }
2060 /* if the address is gone keep the old route in the pcb */
2061 if (inp->inp_laddr.s_addr != INADDR_ANY &&
2062 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
2063 /*
2064 * Address is around; ditch the route. A new route
2065 * can be allocated the next time output is attempted.
2066 */
2067 release = TRUE;
2068 }
2069 if (ia != NULL) {
2070 IFA_REMREF(&ia->ia_ifa);
2071 }
2072 }
2073 if (rt == NULL || release) {
2074 ROUTE_RELEASE(&inp->inp_route);
2075 }
2076 }
2077
2078 /*
2079 * After a routing change, flush old routing
2080 * and allocate a (hopefully) better one.
2081 */
2082 void
2083 in_rtchange(struct inpcb *inp, int errno)
2084 {
2085 #pragma unused(errno)
2086 boolean_t release = FALSE;
2087 struct rtentry *rt;
2088
2089 if ((rt = inp->inp_route.ro_rt) != NULL) {
2090 struct in_ifaddr *ia = NULL;
2091
2092 /* if address is gone, keep the old route */
2093 if (inp->inp_laddr.s_addr != INADDR_ANY &&
2094 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
2095 /*
2096 * Address is around; ditch the route. A new route
2097 * can be allocated the next time output is attempted.
2098 */
2099 release = TRUE;
2100 }
2101 if (ia != NULL) {
2102 IFA_REMREF(&ia->ia_ifa);
2103 }
2104 }
2105 if (rt == NULL || release) {
2106 ROUTE_RELEASE(&inp->inp_route);
2107 }
2108 }
2109
2110 /*
2111 * Lookup a PCB based on the local address and port.
2112 */
2113 struct inpcb *
2114 in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
2115 unsigned int lport_arg, int wild_okay)
2116 {
2117 struct inpcb *inp;
2118 int matchwild = 3, wildcard;
2119 u_short lport = lport_arg;
2120
2121 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_START, 0, 0, 0, 0, 0);
2122
2123 if (!wild_okay) {
2124 struct inpcbhead *head;
2125 /*
2126 * Look for an unconnected (wildcard foreign addr) PCB that
2127 * matches the local address and port we're looking for.
2128 */
2129 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2130 pcbinfo->ipi_hashmask)];
2131 LIST_FOREACH(inp, head, inp_hash) {
2132 #if INET6
2133 if (!(inp->inp_vflag & INP_IPV4)) {
2134 continue;
2135 }
2136 #endif /* INET6 */
2137 if (inp->inp_faddr.s_addr == INADDR_ANY &&
2138 inp->inp_laddr.s_addr == laddr.s_addr &&
2139 inp->inp_lport == lport) {
2140 /*
2141 * Found.
2142 */
2143 return inp;
2144 }
2145 }
2146 /*
2147 * Not found.
2148 */
2149 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0, 0, 0, 0, 0);
2150 return NULL;
2151 } else {
2152 struct inpcbporthead *porthash;
2153 struct inpcbport *phd;
2154 struct inpcb *match = NULL;
2155 /*
2156 * Best fit PCB lookup.
2157 *
2158 * First see if this local port is in use by looking on the
2159 * port hash list.
2160 */
2161 porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
2162 pcbinfo->ipi_porthashmask)];
2163 LIST_FOREACH(phd, porthash, phd_hash) {
2164 if (phd->phd_port == lport) {
2165 break;
2166 }
2167 }
2168 if (phd != NULL) {
2169 /*
2170 * Port is in use by one or more PCBs. Look for best
2171 * fit.
2172 */
2173 LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
2174 wildcard = 0;
2175 #if INET6
2176 if (!(inp->inp_vflag & INP_IPV4)) {
2177 continue;
2178 }
2179 #endif /* INET6 */
2180 if (inp->inp_faddr.s_addr != INADDR_ANY) {
2181 wildcard++;
2182 }
2183 if (inp->inp_laddr.s_addr != INADDR_ANY) {
2184 if (laddr.s_addr == INADDR_ANY) {
2185 wildcard++;
2186 } else if (inp->inp_laddr.s_addr !=
2187 laddr.s_addr) {
2188 continue;
2189 }
2190 } else {
2191 if (laddr.s_addr != INADDR_ANY) {
2192 wildcard++;
2193 }
2194 }
2195 if (wildcard < matchwild) {
2196 match = inp;
2197 matchwild = wildcard;
2198 if (matchwild == 0) {
2199 break;
2200 }
2201 }
2202 }
2203 }
2204 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match,
2205 0, 0, 0, 0);
2206 return match;
2207 }
2208 }
2209
2210 /*
2211 * Check if PCB exists in hash list.
2212 */
2213 int
2214 in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2215 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2216 uid_t *uid, gid_t *gid, struct ifnet *ifp)
2217 {
2218 struct inpcbhead *head;
2219 struct inpcb *inp;
2220 u_short fport = fport_arg, lport = lport_arg;
2221 int found = 0;
2222 struct inpcb *local_wild = NULL;
2223 #if INET6
2224 struct inpcb *local_wild_mapped = NULL;
2225 #endif /* INET6 */
2226
2227 *uid = UID_MAX;
2228 *gid = GID_MAX;
2229
2230 /*
2231 * We may have found the pcb in the last lookup - check this first.
2232 */
2233
2234 lck_rw_lock_shared(pcbinfo->ipi_lock);
2235
2236 /*
2237 * First look for an exact match.
2238 */
2239 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2240 pcbinfo->ipi_hashmask)];
2241 LIST_FOREACH(inp, head, inp_hash) {
2242 #if INET6
2243 if (!(inp->inp_vflag & INP_IPV4)) {
2244 continue;
2245 }
2246 #endif /* INET6 */
2247 if (inp_restricted_recv(inp, ifp)) {
2248 continue;
2249 }
2250
2251 #if NECP
2252 if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2253 continue;
2254 }
2255 #endif /* NECP */
2256
2257 if (inp->inp_faddr.s_addr == faddr.s_addr &&
2258 inp->inp_laddr.s_addr == laddr.s_addr &&
2259 inp->inp_fport == fport &&
2260 inp->inp_lport == lport) {
2261 if ((found = (inp->inp_socket != NULL))) {
2262 /*
2263 * Found.
2264 */
2265 *uid = kauth_cred_getuid(
2266 inp->inp_socket->so_cred);
2267 *gid = kauth_cred_getgid(
2268 inp->inp_socket->so_cred);
2269 }
2270 lck_rw_done(pcbinfo->ipi_lock);
2271 return found;
2272 }
2273 }
2274
2275 if (!wildcard) {
2276 /*
2277 * Not found.
2278 */
2279 lck_rw_done(pcbinfo->ipi_lock);
2280 return 0;
2281 }
2282
2283 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2284 pcbinfo->ipi_hashmask)];
2285 LIST_FOREACH(inp, head, inp_hash) {
2286 #if INET6
2287 if (!(inp->inp_vflag & INP_IPV4)) {
2288 continue;
2289 }
2290 #endif /* INET6 */
2291 if (inp_restricted_recv(inp, ifp)) {
2292 continue;
2293 }
2294
2295 #if NECP
2296 if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2297 continue;
2298 }
2299 #endif /* NECP */
2300
2301 if (inp->inp_faddr.s_addr == INADDR_ANY &&
2302 inp->inp_lport == lport) {
2303 if (inp->inp_laddr.s_addr == laddr.s_addr) {
2304 if ((found = (inp->inp_socket != NULL))) {
2305 *uid = kauth_cred_getuid(
2306 inp->inp_socket->so_cred);
2307 *gid = kauth_cred_getgid(
2308 inp->inp_socket->so_cred);
2309 }
2310 lck_rw_done(pcbinfo->ipi_lock);
2311 return found;
2312 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2313 #if INET6
2314 if (inp->inp_socket &&
2315 SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
2316 local_wild_mapped = inp;
2317 } else
2318 #endif /* INET6 */
2319 local_wild = inp;
2320 }
2321 }
2322 }
2323 if (local_wild == NULL) {
2324 #if INET6
2325 if (local_wild_mapped != NULL) {
2326 if ((found = (local_wild_mapped->inp_socket != NULL))) {
2327 *uid = kauth_cred_getuid(
2328 local_wild_mapped->inp_socket->so_cred);
2329 *gid = kauth_cred_getgid(
2330 local_wild_mapped->inp_socket->so_cred);
2331 }
2332 lck_rw_done(pcbinfo->ipi_lock);
2333 return found;
2334 }
2335 #endif /* INET6 */
2336 lck_rw_done(pcbinfo->ipi_lock);
2337 return 0;
2338 }
2339 if ((found = (local_wild->inp_socket != NULL))) {
2340 *uid = kauth_cred_getuid(
2341 local_wild->inp_socket->so_cred);
2342 *gid = kauth_cred_getgid(
2343 local_wild->inp_socket->so_cred);
2344 }
2345 lck_rw_done(pcbinfo->ipi_lock);
2346 return found;
2347 }
2348
2349 /*
2350 * Lookup PCB in hash list.
2351 */
2352 struct inpcb *
2353 in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2354 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2355 struct ifnet *ifp)
2356 {
2357 struct inpcbhead *head;
2358 struct inpcb *inp;
2359 u_short fport = fport_arg, lport = lport_arg;
2360 struct inpcb *local_wild = NULL;
2361 #if INET6
2362 struct inpcb *local_wild_mapped = NULL;
2363 #endif /* INET6 */
2364
2365 /*
2366 * We may have found the pcb in the last lookup - check this first.
2367 */
2368
2369 lck_rw_lock_shared(pcbinfo->ipi_lock);
2370
2371 /*
2372 * First look for an exact match.
2373 */
2374 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2375 pcbinfo->ipi_hashmask)];
2376 LIST_FOREACH(inp, head, inp_hash) {
2377 #if INET6
2378 if (!(inp->inp_vflag & INP_IPV4)) {
2379 continue;
2380 }
2381 #endif /* INET6 */
2382 if (inp_restricted_recv(inp, ifp)) {
2383 continue;
2384 }
2385
2386 #if NECP
2387 if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2388 continue;
2389 }
2390 #endif /* NECP */
2391
2392 if (inp->inp_faddr.s_addr == faddr.s_addr &&
2393 inp->inp_laddr.s_addr == laddr.s_addr &&
2394 inp->inp_fport == fport &&
2395 inp->inp_lport == lport) {
2396 /*
2397 * Found.
2398 */
2399 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2400 WNT_STOPUSING) {
2401 lck_rw_done(pcbinfo->ipi_lock);
2402 return inp;
2403 } else {
2404 /* it's there but dead, say it isn't found */
2405 lck_rw_done(pcbinfo->ipi_lock);
2406 return NULL;
2407 }
2408 }
2409 }
2410
2411 if (!wildcard) {
2412 /*
2413 * Not found.
2414 */
2415 lck_rw_done(pcbinfo->ipi_lock);
2416 return NULL;
2417 }
2418
2419 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2420 pcbinfo->ipi_hashmask)];
2421 LIST_FOREACH(inp, head, inp_hash) {
2422 #if INET6
2423 if (!(inp->inp_vflag & INP_IPV4)) {
2424 continue;
2425 }
2426 #endif /* INET6 */
2427 if (inp_restricted_recv(inp, ifp)) {
2428 continue;
2429 }
2430
2431 #if NECP
2432 if (!necp_socket_is_allowed_to_recv_on_interface(inp, ifp)) {
2433 continue;
2434 }
2435 #endif /* NECP */
2436
2437 if (inp->inp_faddr.s_addr == INADDR_ANY &&
2438 inp->inp_lport == lport) {
2439 if (inp->inp_laddr.s_addr == laddr.s_addr) {
2440 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2441 WNT_STOPUSING) {
2442 lck_rw_done(pcbinfo->ipi_lock);
2443 return inp;
2444 } else {
2445 /* it's dead; say it isn't found */
2446 lck_rw_done(pcbinfo->ipi_lock);
2447 return NULL;
2448 }
2449 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2450 #if INET6
2451 if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) {
2452 local_wild_mapped = inp;
2453 } else
2454 #endif /* INET6 */
2455 local_wild = inp;
2456 }
2457 }
2458 }
2459 if (local_wild == NULL) {
2460 #if INET6
2461 if (local_wild_mapped != NULL) {
2462 if (in_pcb_checkstate(local_wild_mapped,
2463 WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2464 lck_rw_done(pcbinfo->ipi_lock);
2465 return local_wild_mapped;
2466 } else {
2467 /* it's dead; say it isn't found */
2468 lck_rw_done(pcbinfo->ipi_lock);
2469 return NULL;
2470 }
2471 }
2472 #endif /* INET6 */
2473 lck_rw_done(pcbinfo->ipi_lock);
2474 return NULL;
2475 }
2476 if (in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2477 lck_rw_done(pcbinfo->ipi_lock);
2478 return local_wild;
2479 }
2480 /*
2481 * It's either not found or is already dead.
2482 */
2483 lck_rw_done(pcbinfo->ipi_lock);
2484 return NULL;
2485 }
2486
2487 /*
2488 * @brief Insert PCB onto various hash lists.
2489 *
2490 * @param inp Pointer to internet protocol control block
2491 * @param locked Implies if ipi_lock (protecting pcb list)
2492 * is already locked or not.
2493 *
2494 * @return int error on failure and 0 on success
2495 */
2496 int
2497 in_pcbinshash(struct inpcb *inp, int locked)
2498 {
2499 struct inpcbhead *pcbhash;
2500 struct inpcbporthead *pcbporthash;
2501 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2502 struct inpcbport *phd;
2503 u_int32_t hashkey_faddr;
2504
2505 if (!locked) {
2506 if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) {
2507 /*
2508 * Lock inversion issue, mostly with udp
2509 * multicast packets
2510 */
2511 socket_unlock(inp->inp_socket, 0);
2512 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
2513 socket_lock(inp->inp_socket, 0);
2514 }
2515 }
2516
2517 /*
2518 * This routine or its caller may have given up
2519 * socket's protocol lock briefly.
2520 * During that time the socket may have been dropped.
2521 * Safe-guarding against that.
2522 */
2523 if (inp->inp_state == INPCB_STATE_DEAD) {
2524 if (!locked) {
2525 lck_rw_done(pcbinfo->ipi_lock);
2526 }
2527 return ECONNABORTED;
2528 }
2529
2530
2531 #if INET6
2532 if (inp->inp_vflag & INP_IPV6) {
2533 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2534 } else
2535 #endif /* INET6 */
2536 hashkey_faddr = inp->inp_faddr.s_addr;
2537
2538 inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2539 inp->inp_fport, pcbinfo->ipi_hashmask);
2540
2541 pcbhash = &pcbinfo->ipi_hashbase[inp->inp_hash_element];
2542
2543 pcbporthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(inp->inp_lport,
2544 pcbinfo->ipi_porthashmask)];
2545
2546 /*
2547 * Go through port list and look for a head for this lport.
2548 */
2549 LIST_FOREACH(phd, pcbporthash, phd_hash) {
2550 if (phd->phd_port == inp->inp_lport) {
2551 break;
2552 }
2553 }
2554
2555 /*
2556 * If none exists, malloc one and tack it on.
2557 */
2558 if (phd == NULL) {
2559 MALLOC(phd, struct inpcbport *, sizeof(struct inpcbport),
2560 M_PCB, M_WAITOK);
2561 if (phd == NULL) {
2562 if (!locked) {
2563 lck_rw_done(pcbinfo->ipi_lock);
2564 }
2565 return ENOBUFS; /* XXX */
2566 }
2567 phd->phd_port = inp->inp_lport;
2568 LIST_INIT(&phd->phd_pcblist);
2569 LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
2570 }
2571
2572 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2573
2574
2575 inp->inp_phd = phd;
2576 LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
2577 LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
2578 inp->inp_flags2 |= INP2_INHASHLIST;
2579
2580 if (!locked) {
2581 lck_rw_done(pcbinfo->ipi_lock);
2582 }
2583
2584 #if NECP
2585 // This call catches the original setting of the local address
2586 inp_update_necp_policy(inp, NULL, NULL, 0);
2587 #endif /* NECP */
2588
2589 return 0;
2590 }
2591
2592 /*
2593 * Move PCB to the proper hash bucket when { faddr, fport } have been
2594 * changed. NOTE: This does not handle the case of the lport changing (the
2595 * hashed port list would have to be updated as well), so the lport must
2596 * not change after in_pcbinshash() has been called.
2597 */
2598 void
2599 in_pcbrehash(struct inpcb *inp)
2600 {
2601 struct inpcbhead *head;
2602 u_int32_t hashkey_faddr;
2603
2604 #if INET6
2605 if (inp->inp_vflag & INP_IPV6) {
2606 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2607 } else
2608 #endif /* INET6 */
2609 hashkey_faddr = inp->inp_faddr.s_addr;
2610
2611 inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2612 inp->inp_fport, inp->inp_pcbinfo->ipi_hashmask);
2613 head = &inp->inp_pcbinfo->ipi_hashbase[inp->inp_hash_element];
2614
2615 if (inp->inp_flags2 & INP2_INHASHLIST) {
2616 LIST_REMOVE(inp, inp_hash);
2617 inp->inp_flags2 &= ~INP2_INHASHLIST;
2618 }
2619
2620 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2621 LIST_INSERT_HEAD(head, inp, inp_hash);
2622 inp->inp_flags2 |= INP2_INHASHLIST;
2623
2624 #if NECP
2625 // This call catches updates to the remote addresses
2626 inp_update_necp_policy(inp, NULL, NULL, 0);
2627 #endif /* NECP */
2628 }
2629
2630 /*
2631 * Remove PCB from various lists.
2632 * Must be called pcbinfo lock is held in exclusive mode.
2633 */
2634 void
2635 in_pcbremlists(struct inpcb *inp)
2636 {
2637 inp->inp_gencnt = ++inp->inp_pcbinfo->ipi_gencnt;
2638
2639 /*
2640 * Check if it's in hashlist -- an inp is placed in hashlist when
2641 * it's local port gets assigned. So it should also be present
2642 * in the port list.
2643 */
2644 if (inp->inp_flags2 & INP2_INHASHLIST) {
2645 struct inpcbport *phd = inp->inp_phd;
2646
2647 VERIFY(phd != NULL && inp->inp_lport > 0);
2648
2649 LIST_REMOVE(inp, inp_hash);
2650 inp->inp_hash.le_next = NULL;
2651 inp->inp_hash.le_prev = NULL;
2652
2653 LIST_REMOVE(inp, inp_portlist);
2654 inp->inp_portlist.le_next = NULL;
2655 inp->inp_portlist.le_prev = NULL;
2656 if (LIST_EMPTY(&phd->phd_pcblist)) {
2657 LIST_REMOVE(phd, phd_hash);
2658 FREE(phd, M_PCB);
2659 }
2660 inp->inp_phd = NULL;
2661 inp->inp_flags2 &= ~INP2_INHASHLIST;
2662 }
2663 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2664
2665 if (inp->inp_flags2 & INP2_TIMEWAIT) {
2666 /* Remove from time-wait queue */
2667 tcp_remove_from_time_wait(inp);
2668 inp->inp_flags2 &= ~INP2_TIMEWAIT;
2669 VERIFY(inp->inp_pcbinfo->ipi_twcount != 0);
2670 inp->inp_pcbinfo->ipi_twcount--;
2671 } else {
2672 /* Remove from global inp list if it is not time-wait */
2673 LIST_REMOVE(inp, inp_list);
2674 }
2675
2676 if (inp->inp_flags2 & INP2_IN_FCTREE) {
2677 inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED | INPFC_REMOVE));
2678 VERIFY(!(inp->inp_flags2 & INP2_IN_FCTREE));
2679 }
2680
2681 inp->inp_pcbinfo->ipi_count--;
2682 }
2683
2684 /*
2685 * Mechanism used to defer the memory release of PCBs
2686 * The pcb list will contain the pcb until the reaper can clean it up if
2687 * the following conditions are met:
2688 * 1) state "DEAD",
2689 * 2) wantcnt is STOPUSING
2690 * 3) usecount is 0
2691 * This function will be called to either mark the pcb as
2692 */
2693 int
2694 in_pcb_checkstate(struct inpcb *pcb, int mode, int locked)
2695 {
2696 volatile UInt32 *wantcnt = (volatile UInt32 *)&pcb->inp_wantcnt;
2697 UInt32 origwant;
2698 UInt32 newwant;
2699
2700 switch (mode) {
2701 case WNT_STOPUSING:
2702 /*
2703 * Try to mark the pcb as ready for recycling. CAS with
2704 * STOPUSING, if success we're good, if it's in use, will
2705 * be marked later
2706 */
2707 if (locked == 0) {
2708 socket_lock(pcb->inp_socket, 1);
2709 }
2710 pcb->inp_state = INPCB_STATE_DEAD;
2711
2712 stopusing:
2713 if (pcb->inp_socket->so_usecount < 0) {
2714 panic("%s: pcb=%p so=%p usecount is negative\n",
2715 __func__, pcb, pcb->inp_socket);
2716 /* NOTREACHED */
2717 }
2718 if (locked == 0) {
2719 socket_unlock(pcb->inp_socket, 1);
2720 }
2721
2722 inpcb_gc_sched(pcb->inp_pcbinfo, INPCB_TIMER_FAST);
2723
2724 origwant = *wantcnt;
2725 if ((UInt16) origwant == 0xffff) { /* should stop using */
2726 return WNT_STOPUSING;
2727 }
2728 newwant = 0xffff;
2729 if ((UInt16) origwant == 0) {
2730 /* try to mark it as unsuable now */
2731 OSCompareAndSwap(origwant, newwant, wantcnt);
2732 }
2733 return WNT_STOPUSING;
2734
2735 case WNT_ACQUIRE:
2736 /*
2737 * Try to increase reference to pcb. If WNT_STOPUSING
2738 * should bail out. If socket state DEAD, try to set count
2739 * to STOPUSING, return failed otherwise increase cnt.
2740 */
2741 do {
2742 origwant = *wantcnt;
2743 if ((UInt16) origwant == 0xffff) {
2744 /* should stop using */
2745 return WNT_STOPUSING;
2746 }
2747 newwant = origwant + 1;
2748 } while (!OSCompareAndSwap(origwant, newwant, wantcnt));
2749 return WNT_ACQUIRE;
2750
2751 case WNT_RELEASE:
2752 /*
2753 * Release reference. If result is null and pcb state
2754 * is DEAD, set wanted bit to STOPUSING
2755 */
2756 if (locked == 0) {
2757 socket_lock(pcb->inp_socket, 1);
2758 }
2759
2760 do {
2761 origwant = *wantcnt;
2762 if ((UInt16) origwant == 0x0) {
2763 panic("%s: pcb=%p release with zero count",
2764 __func__, pcb);
2765 /* NOTREACHED */
2766 }
2767 if ((UInt16) origwant == 0xffff) {
2768 /* should stop using */
2769 if (locked == 0) {
2770 socket_unlock(pcb->inp_socket, 1);
2771 }
2772 return WNT_STOPUSING;
2773 }
2774 newwant = origwant - 1;
2775 } while (!OSCompareAndSwap(origwant, newwant, wantcnt));
2776
2777 if (pcb->inp_state == INPCB_STATE_DEAD) {
2778 goto stopusing;
2779 }
2780 if (pcb->inp_socket->so_usecount < 0) {
2781 panic("%s: RELEASE pcb=%p so=%p usecount is negative\n",
2782 __func__, pcb, pcb->inp_socket);
2783 /* NOTREACHED */
2784 }
2785
2786 if (locked == 0) {
2787 socket_unlock(pcb->inp_socket, 1);
2788 }
2789 return WNT_RELEASE;
2790
2791 default:
2792 panic("%s: so=%p not a valid state =%x\n", __func__,
2793 pcb->inp_socket, mode);
2794 /* NOTREACHED */
2795 }
2796
2797 /* NOTREACHED */
2798 return mode;
2799 }
2800
2801 /*
2802 * inpcb_to_compat copies specific bits of an inpcb to a inpcb_compat.
2803 * The inpcb_compat data structure is passed to user space and must
2804 * not change. We intentionally avoid copying pointers.
2805 */
2806 void
2807 inpcb_to_compat(struct inpcb *inp, struct inpcb_compat *inp_compat)
2808 {
2809 bzero(inp_compat, sizeof(*inp_compat));
2810 inp_compat->inp_fport = inp->inp_fport;
2811 inp_compat->inp_lport = inp->inp_lport;
2812 inp_compat->nat_owner = 0;
2813 inp_compat->nat_cookie = 0;
2814 inp_compat->inp_gencnt = inp->inp_gencnt;
2815 inp_compat->inp_flags = inp->inp_flags;
2816 inp_compat->inp_flow = inp->inp_flow;
2817 inp_compat->inp_vflag = inp->inp_vflag;
2818 inp_compat->inp_ip_ttl = inp->inp_ip_ttl;
2819 inp_compat->inp_ip_p = inp->inp_ip_p;
2820 inp_compat->inp_dependfaddr.inp6_foreign =
2821 inp->inp_dependfaddr.inp6_foreign;
2822 inp_compat->inp_dependladdr.inp6_local =
2823 inp->inp_dependladdr.inp6_local;
2824 inp_compat->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
2825 inp_compat->inp_depend6.inp6_hlim = 0;
2826 inp_compat->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
2827 inp_compat->inp_depend6.inp6_ifindex = 0;
2828 inp_compat->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
2829 }
2830
2831 #if !CONFIG_EMBEDDED
2832 void
2833 inpcb_to_xinpcb64(struct inpcb *inp, struct xinpcb64 *xinp)
2834 {
2835 xinp->inp_fport = inp->inp_fport;
2836 xinp->inp_lport = inp->inp_lport;
2837 xinp->inp_gencnt = inp->inp_gencnt;
2838 xinp->inp_flags = inp->inp_flags;
2839 xinp->inp_flow = inp->inp_flow;
2840 xinp->inp_vflag = inp->inp_vflag;
2841 xinp->inp_ip_ttl = inp->inp_ip_ttl;
2842 xinp->inp_ip_p = inp->inp_ip_p;
2843 xinp->inp_dependfaddr.inp6_foreign = inp->inp_dependfaddr.inp6_foreign;
2844 xinp->inp_dependladdr.inp6_local = inp->inp_dependladdr.inp6_local;
2845 xinp->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
2846 xinp->inp_depend6.inp6_hlim = 0;
2847 xinp->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
2848 xinp->inp_depend6.inp6_ifindex = 0;
2849 xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
2850 }
2851 #endif /* !CONFIG_EMBEDDED */
2852
2853 /*
2854 * The following routines implement this scheme:
2855 *
2856 * Callers of ip_output() that intend to cache the route in the inpcb pass
2857 * a local copy of the struct route to ip_output(). Using a local copy of
2858 * the cached route significantly simplifies things as IP no longer has to
2859 * worry about having exclusive access to the passed in struct route, since
2860 * it's defined in the caller's stack; in essence, this allows for a lock-
2861 * less operation when updating the struct route at the IP level and below,
2862 * whenever necessary. The scheme works as follows:
2863 *
2864 * Prior to dropping the socket's lock and calling ip_output(), the caller
2865 * copies the struct route from the inpcb into its stack, and adds a reference
2866 * to the cached route entry, if there was any. The socket's lock is then
2867 * dropped and ip_output() is called with a pointer to the copy of struct
2868 * route defined on the stack (not to the one in the inpcb.)
2869 *
2870 * Upon returning from ip_output(), the caller then acquires the socket's
2871 * lock and synchronizes the cache; if there is no route cached in the inpcb,
2872 * it copies the local copy of struct route (which may or may not contain any
2873 * route) back into the cache; otherwise, if the inpcb has a route cached in
2874 * it, the one in the local copy will be freed, if there's any. Trashing the
2875 * cached route in the inpcb can be avoided because ip_output() is single-
2876 * threaded per-PCB (i.e. multiple transmits on a PCB are always serialized
2877 * by the socket/transport layer.)
2878 */
2879 void
2880 inp_route_copyout(struct inpcb *inp, struct route *dst)
2881 {
2882 struct route *src = &inp->inp_route;
2883
2884 socket_lock_assert_owned(inp->inp_socket);
2885
2886 /*
2887 * If the route in the PCB is stale or not for IPv4, blow it away;
2888 * this is possible in the case of IPv4-mapped address case.
2889 */
2890 if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET) {
2891 ROUTE_RELEASE(src);
2892 }
2893
2894 route_copyout(dst, src, sizeof(*dst));
2895 }
2896
2897 void
2898 inp_route_copyin(struct inpcb *inp, struct route *src)
2899 {
2900 struct route *dst = &inp->inp_route;
2901
2902 socket_lock_assert_owned(inp->inp_socket);
2903
2904 /* Minor sanity check */
2905 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
2906 panic("%s: wrong or corrupted route: %p", __func__, src);
2907 }
2908
2909 route_copyin(src, dst, sizeof(*src));
2910 }
2911
2912 /*
2913 * Handler for setting IP_BOUND_IF/IPV6_BOUND_IF socket option.
2914 */
2915 int
2916 inp_bindif(struct inpcb *inp, unsigned int ifscope, struct ifnet **pifp)
2917 {
2918 struct ifnet *ifp = NULL;
2919
2920 ifnet_head_lock_shared();
2921 if ((ifscope > (unsigned)if_index) || (ifscope != IFSCOPE_NONE &&
2922 (ifp = ifindex2ifnet[ifscope]) == NULL)) {
2923 ifnet_head_done();
2924 return ENXIO;
2925 }
2926 ifnet_head_done();
2927
2928 VERIFY(ifp != NULL || ifscope == IFSCOPE_NONE);
2929
2930 /*
2931 * A zero interface scope value indicates an "unbind".
2932 * Otherwise, take in whatever value the app desires;
2933 * the app may already know the scope (or force itself
2934 * to such a scope) ahead of time before the interface
2935 * gets attached. It doesn't matter either way; any
2936 * route lookup from this point on will require an
2937 * exact match for the embedded interface scope.
2938 */
2939 inp->inp_boundifp = ifp;
2940 if (inp->inp_boundifp == NULL) {
2941 inp->inp_flags &= ~INP_BOUND_IF;
2942 } else {
2943 inp->inp_flags |= INP_BOUND_IF;
2944 }
2945
2946 /* Blow away any cached route in the PCB */
2947 ROUTE_RELEASE(&inp->inp_route);
2948
2949 if (pifp != NULL) {
2950 *pifp = ifp;
2951 }
2952
2953 return 0;
2954 }
2955
2956 /*
2957 * Handler for setting IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
2958 * as well as for setting PROC_UUID_NO_CELLULAR policy.
2959 */
2960 void
2961 inp_set_nocellular(struct inpcb *inp)
2962 {
2963 inp->inp_flags |= INP_NO_IFT_CELLULAR;
2964
2965 /* Blow away any cached route in the PCB */
2966 ROUTE_RELEASE(&inp->inp_route);
2967 }
2968
2969 /*
2970 * Handler for clearing IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
2971 * as well as for clearing PROC_UUID_NO_CELLULAR policy.
2972 */
2973 void
2974 inp_clear_nocellular(struct inpcb *inp)
2975 {
2976 struct socket *so = inp->inp_socket;
2977
2978 /*
2979 * SO_RESTRICT_DENY_CELLULAR socket restriction issued on the socket
2980 * has a higher precendence than INP_NO_IFT_CELLULAR. Clear the flag
2981 * if and only if the socket is unrestricted.
2982 */
2983 if (so != NULL && !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
2984 inp->inp_flags &= ~INP_NO_IFT_CELLULAR;
2985
2986 /* Blow away any cached route in the PCB */
2987 ROUTE_RELEASE(&inp->inp_route);
2988 }
2989 }
2990
2991 void
2992 inp_set_noexpensive(struct inpcb *inp)
2993 {
2994 inp->inp_flags2 |= INP2_NO_IFF_EXPENSIVE;
2995
2996 /* Blow away any cached route in the PCB */
2997 ROUTE_RELEASE(&inp->inp_route);
2998 }
2999
3000 void
3001 inp_set_noconstrained(struct inpcb *inp)
3002 {
3003 inp->inp_flags2 |= INP2_NO_IFF_CONSTRAINED;
3004
3005 /* Blow away any cached route in the PCB */
3006 ROUTE_RELEASE(&inp->inp_route);
3007 }
3008
3009 void
3010 inp_set_awdl_unrestricted(struct inpcb *inp)
3011 {
3012 inp->inp_flags2 |= INP2_AWDL_UNRESTRICTED;
3013
3014 /* Blow away any cached route in the PCB */
3015 ROUTE_RELEASE(&inp->inp_route);
3016 }
3017
3018 boolean_t
3019 inp_get_awdl_unrestricted(struct inpcb *inp)
3020 {
3021 return (inp->inp_flags2 & INP2_AWDL_UNRESTRICTED) ? TRUE : FALSE;
3022 }
3023
3024 void
3025 inp_clear_awdl_unrestricted(struct inpcb *inp)
3026 {
3027 inp->inp_flags2 &= ~INP2_AWDL_UNRESTRICTED;
3028
3029 /* Blow away any cached route in the PCB */
3030 ROUTE_RELEASE(&inp->inp_route);
3031 }
3032
3033 void
3034 inp_set_intcoproc_allowed(struct inpcb *inp)
3035 {
3036 inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
3037
3038 /* Blow away any cached route in the PCB */
3039 ROUTE_RELEASE(&inp->inp_route);
3040 }
3041
3042 boolean_t
3043 inp_get_intcoproc_allowed(struct inpcb *inp)
3044 {
3045 return (inp->inp_flags2 & INP2_INTCOPROC_ALLOWED) ? TRUE : FALSE;
3046 }
3047
3048 void
3049 inp_clear_intcoproc_allowed(struct inpcb *inp)
3050 {
3051 inp->inp_flags2 &= ~INP2_INTCOPROC_ALLOWED;
3052
3053 /* Blow away any cached route in the PCB */
3054 ROUTE_RELEASE(&inp->inp_route);
3055 }
3056
3057 #if NECP
3058 /*
3059 * Called when PROC_UUID_NECP_APP_POLICY is set.
3060 */
3061 void
3062 inp_set_want_app_policy(struct inpcb *inp)
3063 {
3064 inp->inp_flags2 |= INP2_WANT_APP_POLICY;
3065 }
3066
3067 /*
3068 * Called when PROC_UUID_NECP_APP_POLICY is cleared.
3069 */
3070 void
3071 inp_clear_want_app_policy(struct inpcb *inp)
3072 {
3073 inp->inp_flags2 &= ~INP2_WANT_APP_POLICY;
3074 }
3075 #endif /* NECP */
3076
3077 /*
3078 * Calculate flow hash for an inp, used by an interface to identify a
3079 * flow. When an interface provides flow control advisory, this flow
3080 * hash is used as an identifier.
3081 */
3082 u_int32_t
3083 inp_calc_flowhash(struct inpcb *inp)
3084 {
3085 struct inp_flowhash_key fh __attribute__((aligned(8)));
3086 u_int32_t flowhash = 0;
3087 struct inpcb *tmp_inp = NULL;
3088
3089 if (inp_hash_seed == 0) {
3090 inp_hash_seed = RandomULong();
3091 }
3092
3093 bzero(&fh, sizeof(fh));
3094
3095 bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof(fh.infh_laddr));
3096 bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof(fh.infh_faddr));
3097
3098 fh.infh_lport = inp->inp_lport;
3099 fh.infh_fport = inp->inp_fport;
3100 fh.infh_af = (inp->inp_vflag & INP_IPV6) ? AF_INET6 : AF_INET;
3101 fh.infh_proto = inp->inp_ip_p;
3102 fh.infh_rand1 = RandomULong();
3103 fh.infh_rand2 = RandomULong();
3104
3105 try_again:
3106 flowhash = net_flowhash(&fh, sizeof(fh), inp_hash_seed);
3107 if (flowhash == 0) {
3108 /* try to get a non-zero flowhash */
3109 inp_hash_seed = RandomULong();
3110 goto try_again;
3111 }
3112
3113 inp->inp_flowhash = flowhash;
3114
3115 /* Insert the inp into inp_fc_tree */
3116 lck_mtx_lock_spin(&inp_fc_lck);
3117 tmp_inp = RB_FIND(inp_fc_tree, &inp_fc_tree, inp);
3118 if (tmp_inp != NULL) {
3119 /*
3120 * There is a different inp with the same flowhash.
3121 * There can be a collision on flow hash but the
3122 * probability is low. Let's recompute the
3123 * flowhash.
3124 */
3125 lck_mtx_unlock(&inp_fc_lck);
3126 /* recompute hash seed */
3127 inp_hash_seed = RandomULong();
3128 goto try_again;
3129 }
3130
3131 RB_INSERT(inp_fc_tree, &inp_fc_tree, inp);
3132 inp->inp_flags2 |= INP2_IN_FCTREE;
3133 lck_mtx_unlock(&inp_fc_lck);
3134
3135 return flowhash;
3136 }
3137
3138 void
3139 inp_flowadv(uint32_t flowhash)
3140 {
3141 struct inpcb *inp;
3142
3143 inp = inp_fc_getinp(flowhash, 0);
3144
3145 if (inp == NULL) {
3146 return;
3147 }
3148 inp_fc_feedback(inp);
3149 }
3150
3151 /*
3152 * Function to compare inp_fc_entries in inp flow control tree
3153 */
3154 static inline int
3155 infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2)
3156 {
3157 return memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
3158 sizeof(inp1->inp_flowhash));
3159 }
3160
3161 static struct inpcb *
3162 inp_fc_getinp(u_int32_t flowhash, u_int32_t flags)
3163 {
3164 struct inpcb *inp = NULL;
3165 int locked = (flags & INPFC_SOLOCKED) ? 1 : 0;
3166
3167 lck_mtx_lock_spin(&inp_fc_lck);
3168 key_inp.inp_flowhash = flowhash;
3169 inp = RB_FIND(inp_fc_tree, &inp_fc_tree, &key_inp);
3170 if (inp == NULL) {
3171 /* inp is not present, return */
3172 lck_mtx_unlock(&inp_fc_lck);
3173 return NULL;
3174 }
3175
3176 if (flags & INPFC_REMOVE) {
3177 RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp);
3178 lck_mtx_unlock(&inp_fc_lck);
3179
3180 bzero(&(inp->infc_link), sizeof(inp->infc_link));
3181 inp->inp_flags2 &= ~INP2_IN_FCTREE;
3182 return NULL;
3183 }
3184
3185 if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
3186 inp = NULL;
3187 }
3188 lck_mtx_unlock(&inp_fc_lck);
3189
3190 return inp;
3191 }
3192
3193 static void
3194 inp_fc_feedback(struct inpcb *inp)
3195 {
3196 struct socket *so = inp->inp_socket;
3197
3198 /* we already hold a want_cnt on this inp, socket can't be null */
3199 VERIFY(so != NULL);
3200 socket_lock(so, 1);
3201
3202 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3203 socket_unlock(so, 1);
3204 return;
3205 }
3206
3207 if (inp->inp_sndinprog_cnt > 0) {
3208 inp->inp_flags |= INP_FC_FEEDBACK;
3209 }
3210
3211 /*
3212 * Return if the connection is not in flow-controlled state.
3213 * This can happen if the connection experienced
3214 * loss while it was in flow controlled state
3215 */
3216 if (!INP_WAIT_FOR_IF_FEEDBACK(inp)) {
3217 socket_unlock(so, 1);
3218 return;
3219 }
3220 inp_reset_fc_state(inp);
3221
3222 if (SOCK_TYPE(so) == SOCK_STREAM) {
3223 inp_fc_unthrottle_tcp(inp);
3224 }
3225
3226 socket_unlock(so, 1);
3227 }
3228
3229 void
3230 inp_reset_fc_state(struct inpcb *inp)
3231 {
3232 struct socket *so = inp->inp_socket;
3233 int suspended = (INP_IS_FLOW_SUSPENDED(inp)) ? 1 : 0;
3234 int needwakeup = (INP_WAIT_FOR_IF_FEEDBACK(inp)) ? 1 : 0;
3235
3236 inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
3237
3238 if (suspended) {
3239 so->so_flags &= ~(SOF_SUSPENDED);
3240 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME));
3241 }
3242
3243 /* Give a write wakeup to unblock the socket */
3244 if (needwakeup) {
3245 sowwakeup(so);
3246 }
3247 }
3248
3249 int
3250 inp_set_fc_state(struct inpcb *inp, int advcode)
3251 {
3252 struct inpcb *tmp_inp = NULL;
3253 /*
3254 * If there was a feedback from the interface when
3255 * send operation was in progress, we should ignore
3256 * this flow advisory to avoid a race between setting
3257 * flow controlled state and receiving feedback from
3258 * the interface
3259 */
3260 if (inp->inp_flags & INP_FC_FEEDBACK) {
3261 return 0;
3262 }
3263
3264 inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
3265 if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash,
3266 INPFC_SOLOCKED)) != NULL) {
3267 if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3268 return 0;
3269 }
3270 VERIFY(tmp_inp == inp);
3271 switch (advcode) {
3272 case FADV_FLOW_CONTROLLED:
3273 inp->inp_flags |= INP_FLOW_CONTROLLED;
3274 break;
3275 case FADV_SUSPENDED:
3276 inp->inp_flags |= INP_FLOW_SUSPENDED;
3277 soevent(inp->inp_socket,
3278 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND));
3279
3280 /* Record the fact that suspend event was sent */
3281 inp->inp_socket->so_flags |= SOF_SUSPENDED;
3282 break;
3283 }
3284 return 1;
3285 }
3286 return 0;
3287 }
3288
3289 /*
3290 * Handler for SO_FLUSH socket option.
3291 */
3292 int
3293 inp_flush(struct inpcb *inp, int optval)
3294 {
3295 u_int32_t flowhash = inp->inp_flowhash;
3296 struct ifnet *rtifp, *oifp;
3297
3298 /* Either all classes or one of the valid ones */
3299 if (optval != SO_TC_ALL && !SO_VALID_TC(optval)) {
3300 return EINVAL;
3301 }
3302
3303 /* We need a flow hash for identification */
3304 if (flowhash == 0) {
3305 return 0;
3306 }
3307
3308 /* Grab the interfaces from the route and pcb */
3309 rtifp = ((inp->inp_route.ro_rt != NULL) ?
3310 inp->inp_route.ro_rt->rt_ifp : NULL);
3311 oifp = inp->inp_last_outifp;
3312
3313 if (rtifp != NULL) {
3314 if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
3315 }
3316 if (oifp != NULL && oifp != rtifp) {
3317 if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
3318 }
3319
3320 return 0;
3321 }
3322
3323 /*
3324 * Clear the INP_INADDR_ANY flag (special case for PPP only)
3325 */
3326 void
3327 inp_clear_INP_INADDR_ANY(struct socket *so)
3328 {
3329 struct inpcb *inp = NULL;
3330
3331 socket_lock(so, 1);
3332 inp = sotoinpcb(so);
3333 if (inp) {
3334 inp->inp_flags &= ~INP_INADDR_ANY;
3335 }
3336 socket_unlock(so, 1);
3337 }
3338
3339 void
3340 inp_get_soprocinfo(struct inpcb *inp, struct so_procinfo *soprocinfo)
3341 {
3342 struct socket *so = inp->inp_socket;
3343
3344 soprocinfo->spi_pid = so->last_pid;
3345 strlcpy(&soprocinfo->spi_proc_name[0], &inp->inp_last_proc_name[0],
3346 sizeof(soprocinfo->spi_proc_name));
3347 if (so->last_pid != 0) {
3348 uuid_copy(soprocinfo->spi_uuid, so->last_uuid);
3349 }
3350 /*
3351 * When not delegated, the effective pid is the same as the real pid
3352 */
3353 if (so->so_flags & SOF_DELEGATED) {
3354 soprocinfo->spi_delegated = 1;
3355 soprocinfo->spi_epid = so->e_pid;
3356 uuid_copy(soprocinfo->spi_euuid, so->e_uuid);
3357 } else {
3358 soprocinfo->spi_delegated = 0;
3359 soprocinfo->spi_epid = so->last_pid;
3360 }
3361 strlcpy(&soprocinfo->spi_e_proc_name[0], &inp->inp_e_proc_name[0],
3362 sizeof(soprocinfo->spi_e_proc_name));
3363 }
3364
3365 int
3366 inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash,
3367 struct so_procinfo *soprocinfo)
3368 {
3369 struct inpcb *inp = NULL;
3370 int found = 0;
3371
3372 bzero(soprocinfo, sizeof(struct so_procinfo));
3373
3374 if (!flowhash) {
3375 return -1;
3376 }
3377
3378 lck_rw_lock_shared(pcbinfo->ipi_lock);
3379 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
3380 if (inp->inp_state != INPCB_STATE_DEAD &&
3381 inp->inp_socket != NULL &&
3382 inp->inp_flowhash == flowhash) {
3383 found = 1;
3384 inp_get_soprocinfo(inp, soprocinfo);
3385 break;
3386 }
3387 }
3388 lck_rw_done(pcbinfo->ipi_lock);
3389
3390 return found;
3391 }
3392
3393 #if CONFIG_PROC_UUID_POLICY
3394 static void
3395 inp_update_cellular_policy(struct inpcb *inp, boolean_t set)
3396 {
3397 struct socket *so = inp->inp_socket;
3398 int before, after;
3399
3400 VERIFY(so != NULL);
3401 VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3402
3403 before = INP_NO_CELLULAR(inp);
3404 if (set) {
3405 inp_set_nocellular(inp);
3406 } else {
3407 inp_clear_nocellular(inp);
3408 }
3409 after = INP_NO_CELLULAR(inp);
3410 if (net_io_policy_log && (before != after)) {
3411 static const char *ok = "OK";
3412 static const char *nok = "NOACCESS";
3413 uuid_string_t euuid_buf;
3414 pid_t epid;
3415
3416 if (so->so_flags & SOF_DELEGATED) {
3417 uuid_unparse(so->e_uuid, euuid_buf);
3418 epid = so->e_pid;
3419 } else {
3420 uuid_unparse(so->last_uuid, euuid_buf);
3421 epid = so->last_pid;
3422 }
3423
3424 /* allow this socket to generate another notification event */
3425 so->so_ifdenied_notifies = 0;
3426
3427 log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
3428 "euuid %s%s %s->%s\n", __func__,
3429 (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
3430 SOCK_TYPE(so), epid, euuid_buf,
3431 (so->so_flags & SOF_DELEGATED) ?
3432 " [delegated]" : "",
3433 ((before < after) ? ok : nok),
3434 ((before < after) ? nok : ok));
3435 }
3436 }
3437
3438 #if NECP
3439 static void
3440 inp_update_necp_want_app_policy(struct inpcb *inp, boolean_t set)
3441 {
3442 struct socket *so = inp->inp_socket;
3443 int before, after;
3444
3445 VERIFY(so != NULL);
3446 VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3447
3448 before = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
3449 if (set) {
3450 inp_set_want_app_policy(inp);
3451 } else {
3452 inp_clear_want_app_policy(inp);
3453 }
3454 after = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
3455 if (net_io_policy_log && (before != after)) {
3456 static const char *wanted = "WANTED";
3457 static const char *unwanted = "UNWANTED";
3458 uuid_string_t euuid_buf;
3459 pid_t epid;
3460
3461 if (so->so_flags & SOF_DELEGATED) {
3462 uuid_unparse(so->e_uuid, euuid_buf);
3463 epid = so->e_pid;
3464 } else {
3465 uuid_unparse(so->last_uuid, euuid_buf);
3466 epid = so->last_pid;
3467 }
3468
3469 log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
3470 "euuid %s%s %s->%s\n", __func__,
3471 (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
3472 SOCK_TYPE(so), epid, euuid_buf,
3473 (so->so_flags & SOF_DELEGATED) ?
3474 " [delegated]" : "",
3475 ((before < after) ? unwanted : wanted),
3476 ((before < after) ? wanted : unwanted));
3477 }
3478 }
3479 #endif /* NECP */
3480 #endif /* !CONFIG_PROC_UUID_POLICY */
3481
3482 #if NECP
3483 void
3484 inp_update_necp_policy(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int override_bound_interface)
3485 {
3486 necp_socket_find_policy_match(inp, override_local_addr, override_remote_addr, override_bound_interface);
3487 if (necp_socket_should_rescope(inp) &&
3488 inp->inp_lport == 0 &&
3489 inp->inp_laddr.s_addr == INADDR_ANY &&
3490 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
3491 // If we should rescope, and the socket is not yet bound
3492 inp_bindif(inp, necp_socket_get_rescope_if_index(inp), NULL);
3493 }
3494 }
3495 #endif /* NECP */
3496
3497 int
3498 inp_update_policy(struct inpcb *inp)
3499 {
3500 #if CONFIG_PROC_UUID_POLICY
3501 struct socket *so = inp->inp_socket;
3502 uint32_t pflags = 0;
3503 int32_t ogencnt;
3504 int err = 0;
3505 uint8_t *lookup_uuid = NULL;
3506
3507 if (!net_io_policy_uuid ||
3508 so == NULL || inp->inp_state == INPCB_STATE_DEAD) {
3509 return 0;
3510 }
3511
3512 /*
3513 * Kernel-created sockets that aren't delegating other sockets
3514 * are currently exempted from UUID policy checks.
3515 */
3516 if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED)) {
3517 return 0;
3518 }
3519
3520 #if defined(XNU_TARGET_OS_OSX)
3521 if (so->so_rpid > 0) {
3522 lookup_uuid = so->so_ruuid;
3523 ogencnt = so->so_policy_gencnt;
3524 err = proc_uuid_policy_lookup(lookup_uuid, &pflags, &so->so_policy_gencnt);
3525 }
3526 #endif
3527 if (lookup_uuid == NULL || err == ENOENT) {
3528 lookup_uuid = ((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid);
3529 ogencnt = so->so_policy_gencnt;
3530 err = proc_uuid_policy_lookup(lookup_uuid, &pflags, &so->so_policy_gencnt);
3531 }
3532
3533 /*
3534 * Discard cached generation count if the entry is gone (ENOENT),
3535 * so that we go thru the checks below.
3536 */
3537 if (err == ENOENT && ogencnt != 0) {
3538 so->so_policy_gencnt = 0;
3539 }
3540
3541 /*
3542 * If the generation count has changed, inspect the policy flags
3543 * and act accordingly. If a policy flag was previously set and
3544 * the UUID is no longer present in the table (ENOENT), treat it
3545 * as if the flag has been cleared.
3546 */
3547 if ((err == 0 || err == ENOENT) && ogencnt != so->so_policy_gencnt) {
3548 /* update cellular policy for this socket */
3549 if (err == 0 && (pflags & PROC_UUID_NO_CELLULAR)) {
3550 inp_update_cellular_policy(inp, TRUE);
3551 } else if (!(pflags & PROC_UUID_NO_CELLULAR)) {
3552 inp_update_cellular_policy(inp, FALSE);
3553 }
3554 #if NECP
3555 /* update necp want app policy for this socket */
3556 if (err == 0 && (pflags & PROC_UUID_NECP_APP_POLICY)) {
3557 inp_update_necp_want_app_policy(inp, TRUE);
3558 } else if (!(pflags & PROC_UUID_NECP_APP_POLICY)) {
3559 inp_update_necp_want_app_policy(inp, FALSE);
3560 }
3561 #endif /* NECP */
3562 }
3563
3564 return (err == ENOENT) ? 0 : err;
3565 #else /* !CONFIG_PROC_UUID_POLICY */
3566 #pragma unused(inp)
3567 return 0;
3568 #endif /* !CONFIG_PROC_UUID_POLICY */
3569 }
3570
3571 static unsigned int log_restricted;
3572 SYSCTL_DECL(_net_inet);
3573 SYSCTL_INT(_net_inet, OID_AUTO, log_restricted,
3574 CTLFLAG_RW | CTLFLAG_LOCKED, &log_restricted, 0,
3575 "Log network restrictions");
3576 /*
3577 * Called when we need to enforce policy restrictions in the input path.
3578 *
3579 * Returns TRUE if we're not allowed to receive data, otherwise FALSE.
3580 */
3581 static boolean_t
3582 _inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
3583 {
3584 VERIFY(inp != NULL);
3585
3586 /*
3587 * Inbound restrictions.
3588 */
3589 if (!sorestrictrecv) {
3590 return FALSE;
3591 }
3592
3593 if (ifp == NULL) {
3594 return FALSE;
3595 }
3596
3597 if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
3598 return TRUE;
3599 }
3600
3601 if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
3602 return TRUE;
3603 }
3604
3605 if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
3606 return TRUE;
3607 }
3608
3609 if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
3610 return TRUE;
3611 }
3612
3613 if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV)) {
3614 return FALSE;
3615 }
3616
3617 if (inp->inp_flags & INP_RECV_ANYIF) {
3618 return FALSE;
3619 }
3620
3621 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp) {
3622 return FALSE;
3623 }
3624
3625 if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
3626 return TRUE;
3627 }
3628
3629 return TRUE;
3630 }
3631
3632 boolean_t
3633 inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
3634 {
3635 boolean_t ret;
3636
3637 ret = _inp_restricted_recv(inp, ifp);
3638 if (ret == TRUE && log_restricted) {
3639 printf("pid %d (%s) is unable to receive packets on %s\n",
3640 current_proc()->p_pid, proc_best_name(current_proc()),
3641 ifp->if_xname);
3642 }
3643 return ret;
3644 }
3645
3646 /*
3647 * Called when we need to enforce policy restrictions in the output path.
3648 *
3649 * Returns TRUE if we're not allowed to send data out, otherwise FALSE.
3650 */
3651 static boolean_t
3652 _inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
3653 {
3654 VERIFY(inp != NULL);
3655
3656 /*
3657 * Outbound restrictions.
3658 */
3659 if (!sorestrictsend) {
3660 return FALSE;
3661 }
3662
3663 if (ifp == NULL) {
3664 return FALSE;
3665 }
3666
3667 if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
3668 return TRUE;
3669 }
3670
3671 if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
3672 return TRUE;
3673 }
3674
3675 if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
3676 return TRUE;
3677 }
3678
3679 if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
3680 return TRUE;
3681 }
3682
3683 if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
3684 return TRUE;
3685 }
3686
3687 return FALSE;
3688 }
3689
3690 boolean_t
3691 inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
3692 {
3693 boolean_t ret;
3694
3695 ret = _inp_restricted_send(inp, ifp);
3696 if (ret == TRUE && log_restricted) {
3697 printf("pid %d (%s) is unable to transmit packets on %s\n",
3698 current_proc()->p_pid, proc_best_name(current_proc()),
3699 ifp->if_xname);
3700 }
3701 return ret;
3702 }
3703
3704 inline void
3705 inp_count_sndbytes(struct inpcb *inp, u_int32_t th_ack)
3706 {
3707 struct ifnet *ifp = inp->inp_last_outifp;
3708 struct socket *so = inp->inp_socket;
3709 if (ifp != NULL && !(so->so_flags & SOF_MP_SUBFLOW) &&
3710 (ifp->if_type == IFT_CELLULAR || IFNET_IS_WIFI(ifp))) {
3711 int32_t unsent;
3712
3713 so->so_snd.sb_flags |= SB_SNDBYTE_CNT;
3714
3715 /*
3716 * There can be data outstanding before the connection
3717 * becomes established -- TFO case
3718 */
3719 if (so->so_snd.sb_cc > 0) {
3720 inp_incr_sndbytes_total(so, so->so_snd.sb_cc);
3721 }
3722
3723 unsent = inp_get_sndbytes_allunsent(so, th_ack);
3724 if (unsent > 0) {
3725 inp_incr_sndbytes_unsent(so, unsent);
3726 }
3727 }
3728 }
3729
3730 inline void
3731 inp_incr_sndbytes_total(struct socket *so, int32_t len)
3732 {
3733 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3734 struct ifnet *ifp = inp->inp_last_outifp;
3735
3736 if (ifp != NULL) {
3737 VERIFY(ifp->if_sndbyte_total >= 0);
3738 OSAddAtomic64(len, &ifp->if_sndbyte_total);
3739 }
3740 }
3741
3742 inline void
3743 inp_decr_sndbytes_total(struct socket *so, int32_t len)
3744 {
3745 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3746 struct ifnet *ifp = inp->inp_last_outifp;
3747
3748 if (ifp != NULL) {
3749 VERIFY(ifp->if_sndbyte_total >= len);
3750 OSAddAtomic64(-len, &ifp->if_sndbyte_total);
3751 }
3752 }
3753
3754 inline void
3755 inp_incr_sndbytes_unsent(struct socket *so, int32_t len)
3756 {
3757 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3758 struct ifnet *ifp = inp->inp_last_outifp;
3759
3760 if (ifp != NULL) {
3761 VERIFY(ifp->if_sndbyte_unsent >= 0);
3762 OSAddAtomic64(len, &ifp->if_sndbyte_unsent);
3763 }
3764 }
3765
3766 inline void
3767 inp_decr_sndbytes_unsent(struct socket *so, int32_t len)
3768 {
3769 if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
3770 return;
3771 }
3772
3773 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3774 struct ifnet *ifp = inp->inp_last_outifp;
3775
3776 if (ifp != NULL) {
3777 if (ifp->if_sndbyte_unsent >= len) {
3778 OSAddAtomic64(-len, &ifp->if_sndbyte_unsent);
3779 } else {
3780 ifp->if_sndbyte_unsent = 0;
3781 }
3782 }
3783 }
3784
3785 inline void
3786 inp_decr_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
3787 {
3788 int32_t len;
3789
3790 if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
3791 return;
3792 }
3793
3794 len = inp_get_sndbytes_allunsent(so, th_ack);
3795 inp_decr_sndbytes_unsent(so, len);
3796 }
3797
3798
3799 inline void
3800 inp_set_activity_bitmap(struct inpcb *inp)
3801 {
3802 in_stat_set_activity_bitmap(&inp->inp_nw_activity, net_uptime());
3803 }
3804
3805 inline void
3806 inp_get_activity_bitmap(struct inpcb *inp, activity_bitmap_t *ab)
3807 {
3808 bcopy(&inp->inp_nw_activity, ab, sizeof(*ab));
3809 }
3810
3811 void
3812 inp_update_last_owner(struct socket *so, struct proc *p, struct proc *ep)
3813 {
3814 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3815
3816 if (inp == NULL) {
3817 return;
3818 }
3819
3820 if (p != NULL) {
3821 strlcpy(&inp->inp_last_proc_name[0], proc_name_address(p), sizeof(inp->inp_last_proc_name));
3822 }
3823 if (so->so_flags & SOF_DELEGATED) {
3824 if (ep != NULL) {
3825 strlcpy(&inp->inp_e_proc_name[0], proc_name_address(ep), sizeof(inp->inp_e_proc_name));
3826 } else {
3827 inp->inp_e_proc_name[0] = 0;
3828 }
3829 } else {
3830 inp->inp_e_proc_name[0] = 0;
3831 }
3832 }
3833
3834 void
3835 inp_copy_last_owner(struct socket *so, struct socket *head)
3836 {
3837 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3838 struct inpcb *head_inp = (struct inpcb *)head->so_pcb;
3839
3840 if (inp == NULL || head_inp == NULL) {
3841 return;
3842 }
3843
3844 strlcpy(&inp->inp_last_proc_name[0], &head_inp->inp_last_proc_name[0], sizeof(inp->inp_last_proc_name));
3845 strlcpy(&inp->inp_e_proc_name[0], &head_inp->inp_e_proc_name[0], sizeof(inp->inp_e_proc_name));
3846 }