]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/in_pcb.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / bsd / netinet / in_pcb.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1991, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/in_pcb.c,v 1.59.2.17 2001/08/13 16:26:17 ume Exp $
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/domain.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/proc.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/mcache.h>
76 #include <sys/kauth.h>
77 #include <sys/priv.h>
78 #include <sys/proc_uuid_policy.h>
79 #include <sys/syslog.h>
80 #include <sys/priv.h>
81 #include <net/dlil.h>
82
83 #include <libkern/OSAtomic.h>
84 #include <kern/locks.h>
85
86 #include <machine/limits.h>
87
88 #include <kern/zalloc.h>
89
90 #include <net/if.h>
91 #include <net/if_types.h>
92 #include <net/route.h>
93 #include <net/flowhash.h>
94 #include <net/flowadv.h>
95 #include <net/ntstat.h>
96
97 #include <netinet/in.h>
98 #include <netinet/in_pcb.h>
99 #include <netinet/in_var.h>
100 #include <netinet/ip_var.h>
101 #if INET6
102 #include <netinet/ip6.h>
103 #include <netinet6/ip6_var.h>
104 #endif /* INET6 */
105
106 #include <sys/kdebug.h>
107 #include <sys/random.h>
108
109 #include <dev/random/randomdev.h>
110 #include <mach/boolean.h>
111
112 #include <pexpert/pexpert.h>
113
114 #if NECP
115 #include <net/necp.h>
116 #endif
117
118 #include <sys/stat.h>
119 #include <sys/ubc.h>
120 #include <sys/vnode.h>
121
122 static lck_grp_t *inpcb_lock_grp;
123 static lck_attr_t *inpcb_lock_attr;
124 static lck_grp_attr_t *inpcb_lock_grp_attr;
125 decl_lck_mtx_data(static, inpcb_lock); /* global INPCB lock */
126 decl_lck_mtx_data(static, inpcb_timeout_lock);
127
128 static TAILQ_HEAD(, inpcbinfo) inpcb_head = TAILQ_HEAD_INITIALIZER(inpcb_head);
129
130 static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */
131 static boolean_t inpcb_garbage_collecting = FALSE; /* gc timer is scheduled */
132 static boolean_t inpcb_ticking = FALSE; /* "slow" timer is scheduled */
133 static boolean_t inpcb_fast_timer_on = FALSE;
134
135 extern char *proc_best_name(proc_t);
136
137 #define INPCB_GCREQ_THRESHOLD 50000
138
139 static thread_call_t inpcb_thread_call, inpcb_fast_thread_call;
140 static void inpcb_sched_timeout(void);
141 static void inpcb_sched_lazy_timeout(void);
142 static void _inpcb_sched_timeout(unsigned int);
143 static void inpcb_timeout(void *, void *);
144 const int inpcb_timeout_lazy = 10; /* 10 seconds leeway for lazy timers */
145 extern int tvtohz(struct timeval *);
146
147 #if CONFIG_PROC_UUID_POLICY
148 static void inp_update_cellular_policy(struct inpcb *, boolean_t);
149 #if NECP
150 static void inp_update_necp_want_app_policy(struct inpcb *, boolean_t);
151 #endif /* NECP */
152 #endif /* !CONFIG_PROC_UUID_POLICY */
153
154 #define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8))
155 #define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
156
157 /*
158 * These configure the range of local port addresses assigned to
159 * "unspecified" outgoing connections/packets/whatever.
160 */
161 int ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */
162 int ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */
163 int ipport_firstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
164 int ipport_lastauto = IPPORT_HILASTAUTO; /* 65535 */
165 int ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */
166 int ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */
167
168 #define RANGECHK(var, min, max) \
169 if ((var) < (min)) { (var) = (min); } \
170 else if ((var) > (max)) { (var) = (max); }
171
172 static int
173 sysctl_net_ipport_check SYSCTL_HANDLER_ARGS
174 {
175 #pragma unused(arg1, arg2)
176 int error;
177
178 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
179 if (!error) {
180 RANGECHK(ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
181 RANGECHK(ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
182 RANGECHK(ipport_firstauto, IPPORT_RESERVED, USHRT_MAX);
183 RANGECHK(ipport_lastauto, IPPORT_RESERVED, USHRT_MAX);
184 RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, USHRT_MAX);
185 RANGECHK(ipport_hilastauto, IPPORT_RESERVED, USHRT_MAX);
186 }
187 return (error);
188 }
189
190 #undef RANGECHK
191
192 SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange,
193 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IP Ports");
194
195 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
196 CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
197 &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", "");
198 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
199 CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
200 &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", "");
201 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first,
202 CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
203 &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", "");
204 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last,
205 CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
206 &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", "");
207 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
208 CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
209 &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", "");
210 SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
211 CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
212 &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", "");
213
214 static uint32_t apn_fallbk_debug = 0;
215 #define apn_fallbk_log(x) do { if (apn_fallbk_debug >= 1) log x; } while (0)
216
217 #if CONFIG_EMBEDDED
218 static boolean_t apn_fallbk_enabled = TRUE;
219
220 SYSCTL_DECL(_net_inet);
221 SYSCTL_NODE(_net_inet, OID_AUTO, apn_fallback, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "APN Fallback");
222 SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
223 &apn_fallbk_debug, 0, "APN fallback debug enable");
224 #else
225 static boolean_t apn_fallbk_enabled = FALSE;
226 #endif
227
228 extern int udp_use_randomport;
229 extern int tcp_use_randomport;
230
231 /* Structs used for flowhash computation */
232 struct inp_flowhash_key_addr {
233 union {
234 struct in_addr v4;
235 struct in6_addr v6;
236 u_int8_t addr8[16];
237 u_int16_t addr16[8];
238 u_int32_t addr32[4];
239 } infha;
240 };
241
242 struct inp_flowhash_key {
243 struct inp_flowhash_key_addr infh_laddr;
244 struct inp_flowhash_key_addr infh_faddr;
245 u_int32_t infh_lport;
246 u_int32_t infh_fport;
247 u_int32_t infh_af;
248 u_int32_t infh_proto;
249 u_int32_t infh_rand1;
250 u_int32_t infh_rand2;
251 };
252
253 static u_int32_t inp_hash_seed = 0;
254
255 static int infc_cmp(const struct inpcb *, const struct inpcb *);
256
257 /* Flags used by inp_fc_getinp */
258 #define INPFC_SOLOCKED 0x1
259 #define INPFC_REMOVE 0x2
260 static struct inpcb *inp_fc_getinp(u_int32_t, u_int32_t);
261
262 static void inp_fc_feedback(struct inpcb *);
263 extern void tcp_remove_from_time_wait(struct inpcb *inp);
264
265 decl_lck_mtx_data(static, inp_fc_lck);
266
267 RB_HEAD(inp_fc_tree, inpcb) inp_fc_tree;
268 RB_PROTOTYPE(inp_fc_tree, inpcb, infc_link, infc_cmp);
269 RB_GENERATE(inp_fc_tree, inpcb, infc_link, infc_cmp);
270
271 /*
272 * Use this inp as a key to find an inp in the flowhash tree.
273 * Accesses to it are protected by inp_fc_lck.
274 */
275 struct inpcb key_inp;
276
277 /*
278 * in_pcb.c: manage the Protocol Control Blocks.
279 */
280
281 void
282 in_pcbinit(void)
283 {
284 static int inpcb_initialized = 0;
285
286 VERIFY(!inpcb_initialized);
287 inpcb_initialized = 1;
288
289 inpcb_lock_grp_attr = lck_grp_attr_alloc_init();
290 inpcb_lock_grp = lck_grp_alloc_init("inpcb", inpcb_lock_grp_attr);
291 inpcb_lock_attr = lck_attr_alloc_init();
292 lck_mtx_init(&inpcb_lock, inpcb_lock_grp, inpcb_lock_attr);
293 lck_mtx_init(&inpcb_timeout_lock, inpcb_lock_grp, inpcb_lock_attr);
294 inpcb_thread_call = thread_call_allocate_with_priority(inpcb_timeout,
295 NULL, THREAD_CALL_PRIORITY_KERNEL);
296 inpcb_fast_thread_call = thread_call_allocate_with_priority(
297 inpcb_timeout, NULL, THREAD_CALL_PRIORITY_KERNEL);
298 if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL)
299 panic("unable to alloc the inpcb thread call");
300
301 /*
302 * Initialize data structures required to deliver
303 * flow advisories.
304 */
305 lck_mtx_init(&inp_fc_lck, inpcb_lock_grp, inpcb_lock_attr);
306 lck_mtx_lock(&inp_fc_lck);
307 RB_INIT(&inp_fc_tree);
308 bzero(&key_inp, sizeof(key_inp));
309 lck_mtx_unlock(&inp_fc_lck);
310 }
311
312 #define INPCB_HAVE_TIMER_REQ(req) (((req).intimer_lazy > 0) || \
313 ((req).intimer_fast > 0) || ((req).intimer_nodelay > 0))
314 static void
315 inpcb_timeout(void *arg0, void *arg1)
316 {
317 #pragma unused(arg0, arg1)
318 struct inpcbinfo *ipi;
319 boolean_t t, gc;
320 struct intimercount gccnt, tmcnt;
321
322 /*
323 * Update coarse-grained networking timestamp (in sec.); the idea
324 * is to piggy-back on the timeout callout to update the counter
325 * returnable via net_uptime().
326 */
327 net_update_uptime();
328
329 bzero(&gccnt, sizeof(gccnt));
330 bzero(&tmcnt, sizeof(tmcnt));
331
332 lck_mtx_lock_spin(&inpcb_timeout_lock);
333 gc = inpcb_garbage_collecting;
334 inpcb_garbage_collecting = FALSE;
335
336 t = inpcb_ticking;
337 inpcb_ticking = FALSE;
338
339 if (gc || t) {
340 lck_mtx_unlock(&inpcb_timeout_lock);
341
342 lck_mtx_lock(&inpcb_lock);
343 TAILQ_FOREACH(ipi, &inpcb_head, ipi_entry) {
344 if (INPCB_HAVE_TIMER_REQ(ipi->ipi_gc_req)) {
345 bzero(&ipi->ipi_gc_req,
346 sizeof(ipi->ipi_gc_req));
347 if (gc && ipi->ipi_gc != NULL) {
348 ipi->ipi_gc(ipi);
349 gccnt.intimer_lazy +=
350 ipi->ipi_gc_req.intimer_lazy;
351 gccnt.intimer_fast +=
352 ipi->ipi_gc_req.intimer_fast;
353 gccnt.intimer_nodelay +=
354 ipi->ipi_gc_req.intimer_nodelay;
355 }
356 }
357 if (INPCB_HAVE_TIMER_REQ(ipi->ipi_timer_req)) {
358 bzero(&ipi->ipi_timer_req,
359 sizeof(ipi->ipi_timer_req));
360 if (t && ipi->ipi_timer != NULL) {
361 ipi->ipi_timer(ipi);
362 tmcnt.intimer_lazy +=
363 ipi->ipi_timer_req.intimer_lazy;
364 tmcnt.intimer_fast +=
365 ipi->ipi_timer_req.intimer_fast;
366 tmcnt.intimer_nodelay +=
367 ipi->ipi_timer_req.intimer_nodelay;
368 }
369 }
370 }
371 lck_mtx_unlock(&inpcb_lock);
372 lck_mtx_lock_spin(&inpcb_timeout_lock);
373 }
374
375 /* lock was dropped above, so check first before overriding */
376 if (!inpcb_garbage_collecting)
377 inpcb_garbage_collecting = INPCB_HAVE_TIMER_REQ(gccnt);
378 if (!inpcb_ticking)
379 inpcb_ticking = INPCB_HAVE_TIMER_REQ(tmcnt);
380
381 /* re-arm the timer if there's work to do */
382 inpcb_timeout_run--;
383 VERIFY(inpcb_timeout_run >= 0 && inpcb_timeout_run < 2);
384
385 if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0)
386 inpcb_sched_timeout();
387 else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5)
388 /* be lazy when idle with little activity */
389 inpcb_sched_lazy_timeout();
390 else
391 inpcb_sched_timeout();
392
393 lck_mtx_unlock(&inpcb_timeout_lock);
394 }
395
396 static void
397 inpcb_sched_timeout(void)
398 {
399 _inpcb_sched_timeout(0);
400 }
401
402 static void
403 inpcb_sched_lazy_timeout(void)
404 {
405 _inpcb_sched_timeout(inpcb_timeout_lazy);
406 }
407
408 static void
409 _inpcb_sched_timeout(unsigned int offset)
410 {
411 uint64_t deadline, leeway;
412
413 clock_interval_to_deadline(1, NSEC_PER_SEC, &deadline);
414 LCK_MTX_ASSERT(&inpcb_timeout_lock, LCK_MTX_ASSERT_OWNED);
415 if (inpcb_timeout_run == 0 &&
416 (inpcb_garbage_collecting || inpcb_ticking)) {
417 lck_mtx_convert_spin(&inpcb_timeout_lock);
418 inpcb_timeout_run++;
419 if (offset == 0) {
420 inpcb_fast_timer_on = TRUE;
421 thread_call_enter_delayed(inpcb_thread_call,
422 deadline);
423 } else {
424 inpcb_fast_timer_on = FALSE;
425 clock_interval_to_absolutetime_interval(offset,
426 NSEC_PER_SEC, &leeway);
427 thread_call_enter_delayed_with_leeway(
428 inpcb_thread_call, NULL, deadline, leeway,
429 THREAD_CALL_DELAY_LEEWAY);
430 }
431 } else if (inpcb_timeout_run == 1 &&
432 offset == 0 && !inpcb_fast_timer_on) {
433 /*
434 * Since the request was for a fast timer but the
435 * scheduled timer is a lazy timer, try to schedule
436 * another instance of fast timer also.
437 */
438 lck_mtx_convert_spin(&inpcb_timeout_lock);
439 inpcb_timeout_run++;
440 inpcb_fast_timer_on = TRUE;
441 thread_call_enter_delayed(inpcb_fast_thread_call, deadline);
442 }
443 }
444
445 void
446 inpcb_gc_sched(struct inpcbinfo *ipi, u_int32_t type)
447 {
448 u_int32_t gccnt;
449
450 lck_mtx_lock_spin(&inpcb_timeout_lock);
451 inpcb_garbage_collecting = TRUE;
452 gccnt = ipi->ipi_gc_req.intimer_nodelay +
453 ipi->ipi_gc_req.intimer_fast;
454
455 if (gccnt > INPCB_GCREQ_THRESHOLD) {
456 type = INPCB_TIMER_FAST;
457 }
458
459 switch (type) {
460 case INPCB_TIMER_NODELAY:
461 atomic_add_32(&ipi->ipi_gc_req.intimer_nodelay, 1);
462 inpcb_sched_timeout();
463 break;
464 case INPCB_TIMER_FAST:
465 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
466 inpcb_sched_timeout();
467 break;
468 default:
469 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
470 inpcb_sched_lazy_timeout();
471 break;
472 }
473 lck_mtx_unlock(&inpcb_timeout_lock);
474 }
475
476 void
477 inpcb_timer_sched(struct inpcbinfo *ipi, u_int32_t type)
478 {
479
480 lck_mtx_lock_spin(&inpcb_timeout_lock);
481 inpcb_ticking = TRUE;
482 switch (type) {
483 case INPCB_TIMER_NODELAY:
484 atomic_add_32(&ipi->ipi_timer_req.intimer_nodelay, 1);
485 inpcb_sched_timeout();
486 break;
487 case INPCB_TIMER_FAST:
488 atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
489 inpcb_sched_timeout();
490 break;
491 default:
492 atomic_add_32(&ipi->ipi_timer_req.intimer_lazy, 1);
493 inpcb_sched_lazy_timeout();
494 break;
495 }
496 lck_mtx_unlock(&inpcb_timeout_lock);
497 }
498
499 void
500 in_pcbinfo_attach(struct inpcbinfo *ipi)
501 {
502 struct inpcbinfo *ipi0;
503
504 lck_mtx_lock(&inpcb_lock);
505 TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
506 if (ipi0 == ipi) {
507 panic("%s: ipi %p already in the list\n",
508 __func__, ipi);
509 /* NOTREACHED */
510 }
511 }
512 TAILQ_INSERT_TAIL(&inpcb_head, ipi, ipi_entry);
513 lck_mtx_unlock(&inpcb_lock);
514 }
515
516 int
517 in_pcbinfo_detach(struct inpcbinfo *ipi)
518 {
519 struct inpcbinfo *ipi0;
520 int error = 0;
521
522 lck_mtx_lock(&inpcb_lock);
523 TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) {
524 if (ipi0 == ipi)
525 break;
526 }
527 if (ipi0 != NULL)
528 TAILQ_REMOVE(&inpcb_head, ipi0, ipi_entry);
529 else
530 error = ENXIO;
531 lck_mtx_unlock(&inpcb_lock);
532
533 return (error);
534 }
535
536 /*
537 * Allocate a PCB and associate it with the socket.
538 *
539 * Returns: 0 Success
540 * ENOBUFS
541 * ENOMEM
542 */
543 int
544 in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p)
545 {
546 #pragma unused(p)
547 struct inpcb *inp;
548 caddr_t temp;
549 #if CONFIG_MACF_NET
550 int mac_error;
551 #endif /* CONFIG_MACF_NET */
552
553 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
554 inp = (struct inpcb *)zalloc(pcbinfo->ipi_zone);
555 if (inp == NULL)
556 return (ENOBUFS);
557 bzero((caddr_t)inp, sizeof (*inp));
558 } else {
559 inp = (struct inpcb *)(void *)so->so_saved_pcb;
560 temp = inp->inp_saved_ppcb;
561 bzero((caddr_t)inp, sizeof (*inp));
562 inp->inp_saved_ppcb = temp;
563 }
564
565 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
566 inp->inp_pcbinfo = pcbinfo;
567 inp->inp_socket = so;
568 #if CONFIG_MACF_NET
569 mac_error = mac_inpcb_label_init(inp, M_WAITOK);
570 if (mac_error != 0) {
571 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0)
572 zfree(pcbinfo->ipi_zone, inp);
573 return (mac_error);
574 }
575 mac_inpcb_label_associate(so, inp);
576 #endif /* CONFIG_MACF_NET */
577 /* make sure inp_stat is always 64-bit aligned */
578 inp->inp_stat = (struct inp_stat *)P2ROUNDUP(inp->inp_stat_store,
579 sizeof (u_int64_t));
580 if (((uintptr_t)inp->inp_stat - (uintptr_t)inp->inp_stat_store) +
581 sizeof (*inp->inp_stat) > sizeof (inp->inp_stat_store)) {
582 panic("%s: insufficient space to align inp_stat", __func__);
583 /* NOTREACHED */
584 }
585
586 /* make sure inp_cstat is always 64-bit aligned */
587 inp->inp_cstat = (struct inp_stat *)P2ROUNDUP(inp->inp_cstat_store,
588 sizeof (u_int64_t));
589 if (((uintptr_t)inp->inp_cstat - (uintptr_t)inp->inp_cstat_store) +
590 sizeof (*inp->inp_cstat) > sizeof (inp->inp_cstat_store)) {
591 panic("%s: insufficient space to align inp_cstat", __func__);
592 /* NOTREACHED */
593 }
594
595 /* make sure inp_wstat is always 64-bit aligned */
596 inp->inp_wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_wstat_store,
597 sizeof (u_int64_t));
598 if (((uintptr_t)inp->inp_wstat - (uintptr_t)inp->inp_wstat_store) +
599 sizeof (*inp->inp_wstat) > sizeof (inp->inp_wstat_store)) {
600 panic("%s: insufficient space to align inp_wstat", __func__);
601 /* NOTREACHED */
602 }
603
604 /* make sure inp_Wstat is always 64-bit aligned */
605 inp->inp_Wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_Wstat_store,
606 sizeof (u_int64_t));
607 if (((uintptr_t)inp->inp_Wstat - (uintptr_t)inp->inp_Wstat_store) +
608 sizeof (*inp->inp_Wstat) > sizeof (inp->inp_Wstat_store)) {
609 panic("%s: insufficient space to align inp_Wstat", __func__);
610 /* NOTREACHED */
611 }
612
613 so->so_pcb = (caddr_t)inp;
614
615 if (so->so_proto->pr_flags & PR_PCBLOCK) {
616 lck_mtx_init(&inp->inpcb_mtx, pcbinfo->ipi_lock_grp,
617 pcbinfo->ipi_lock_attr);
618 }
619
620 #if INET6
621 if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on)
622 inp->inp_flags |= IN6P_IPV6_V6ONLY;
623
624 if (ip6_auto_flowlabel)
625 inp->inp_flags |= IN6P_AUTOFLOWLABEL;
626 #endif /* INET6 */
627 if (intcoproc_unrestricted)
628 inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
629
630 (void) inp_update_policy(inp);
631
632 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
633 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
634 LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
635 pcbinfo->ipi_count++;
636 lck_rw_done(pcbinfo->ipi_lock);
637 return (0);
638 }
639
640 /*
641 * in_pcblookup_local_and_cleanup does everything
642 * in_pcblookup_local does but it checks for a socket
643 * that's going away. Since we know that the lock is
644 * held read+write when this funciton is called, we
645 * can safely dispose of this socket like the slow
646 * timer would usually do and return NULL. This is
647 * great for bind.
648 */
649 struct inpcb *
650 in_pcblookup_local_and_cleanup(struct inpcbinfo *pcbinfo, struct in_addr laddr,
651 u_int lport_arg, int wild_okay)
652 {
653 struct inpcb *inp;
654
655 /* Perform normal lookup */
656 inp = in_pcblookup_local(pcbinfo, laddr, lport_arg, wild_okay);
657
658 /* Check if we found a match but it's waiting to be disposed */
659 if (inp != NULL && inp->inp_wantcnt == WNT_STOPUSING) {
660 struct socket *so = inp->inp_socket;
661
662 socket_lock(so, 0);
663
664 if (so->so_usecount == 0) {
665 if (inp->inp_state != INPCB_STATE_DEAD)
666 in_pcbdetach(inp);
667 in_pcbdispose(inp); /* will unlock & destroy */
668 inp = NULL;
669 } else {
670 socket_unlock(so, 0);
671 }
672 }
673
674 return (inp);
675 }
676
677 static void
678 in_pcb_conflict_post_msg(u_int16_t port)
679 {
680 /*
681 * Radar 5523020 send a kernel event notification if a
682 * non-participating socket tries to bind the port a socket
683 * who has set SOF_NOTIFYCONFLICT owns.
684 */
685 struct kev_msg ev_msg;
686 struct kev_in_portinuse in_portinuse;
687
688 bzero(&in_portinuse, sizeof (struct kev_in_portinuse));
689 bzero(&ev_msg, sizeof (struct kev_msg));
690 in_portinuse.port = ntohs(port); /* port in host order */
691 in_portinuse.req_pid = proc_selfpid();
692 ev_msg.vendor_code = KEV_VENDOR_APPLE;
693 ev_msg.kev_class = KEV_NETWORK_CLASS;
694 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
695 ev_msg.event_code = KEV_INET_PORTINUSE;
696 ev_msg.dv[0].data_ptr = &in_portinuse;
697 ev_msg.dv[0].data_length = sizeof (struct kev_in_portinuse);
698 ev_msg.dv[1].data_length = 0;
699 dlil_post_complete_msg(NULL, &ev_msg);
700 }
701
702 /*
703 * Bind an INPCB to an address and/or port. This routine should not alter
704 * the caller-supplied local address "nam".
705 *
706 * Returns: 0 Success
707 * EADDRNOTAVAIL Address not available.
708 * EINVAL Invalid argument
709 * EAFNOSUPPORT Address family not supported [notdef]
710 * EACCES Permission denied
711 * EADDRINUSE Address in use
712 * EAGAIN Resource unavailable, try again
713 * priv_check_cred:EPERM Operation not permitted
714 */
715 int
716 in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p)
717 {
718 struct socket *so = inp->inp_socket;
719 unsigned short *lastport;
720 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
721 u_short lport = 0, rand_port = 0;
722 int wild = 0, reuseport = (so->so_options & SO_REUSEPORT);
723 int error, randomport, conflict = 0;
724 boolean_t anonport = FALSE;
725 kauth_cred_t cred;
726 struct in_addr laddr;
727 struct ifnet *outif = NULL;
728
729 if (TAILQ_EMPTY(&in_ifaddrhead)) /* XXX broken! */
730 return (EADDRNOTAVAIL);
731 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY)
732 return (EINVAL);
733 if (!(so->so_options & (SO_REUSEADDR|SO_REUSEPORT)))
734 wild = 1;
735
736 bzero(&laddr, sizeof(laddr));
737
738 socket_unlock(so, 0); /* keep reference on socket */
739 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
740
741 if (nam != NULL) {
742
743 if (nam->sa_len != sizeof (struct sockaddr_in)) {
744 lck_rw_done(pcbinfo->ipi_lock);
745 socket_lock(so, 0);
746 return (EINVAL);
747 }
748 #if 0
749 /*
750 * We should check the family, but old programs
751 * incorrectly fail to initialize it.
752 */
753 if (nam->sa_family != AF_INET) {
754 lck_rw_done(pcbinfo->ipi_lock);
755 socket_lock(so, 0);
756 return (EAFNOSUPPORT);
757 }
758 #endif /* 0 */
759 lport = SIN(nam)->sin_port;
760
761 if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr))) {
762 /*
763 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
764 * allow complete duplication of binding if
765 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
766 * and a multicast address is bound on both
767 * new and duplicated sockets.
768 */
769 if (so->so_options & SO_REUSEADDR)
770 reuseport = SO_REUSEADDR|SO_REUSEPORT;
771 } else if (SIN(nam)->sin_addr.s_addr != INADDR_ANY) {
772 struct sockaddr_in sin;
773 struct ifaddr *ifa;
774
775 /* Sanitized for interface address searches */
776 bzero(&sin, sizeof (sin));
777 sin.sin_family = AF_INET;
778 sin.sin_len = sizeof (struct sockaddr_in);
779 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
780
781 ifa = ifa_ifwithaddr(SA(&sin));
782 if (ifa == NULL) {
783 lck_rw_done(pcbinfo->ipi_lock);
784 socket_lock(so, 0);
785 return (EADDRNOTAVAIL);
786 } else {
787 /*
788 * Opportunistically determine the outbound
789 * interface that may be used; this may not
790 * hold true if we end up using a route
791 * going over a different interface, e.g.
792 * when sending to a local address. This
793 * will get updated again after sending.
794 */
795 IFA_LOCK(ifa);
796 outif = ifa->ifa_ifp;
797 IFA_UNLOCK(ifa);
798 IFA_REMREF(ifa);
799 }
800 }
801 if (lport != 0) {
802 struct inpcb *t;
803 uid_t u;
804
805 #if !CONFIG_EMBEDDED
806 if (ntohs(lport) < IPPORT_RESERVED) {
807 cred = kauth_cred_proc_ref(p);
808 error = priv_check_cred(cred,
809 PRIV_NETINET_RESERVEDPORT, 0);
810 kauth_cred_unref(&cred);
811 if (error != 0) {
812 lck_rw_done(pcbinfo->ipi_lock);
813 socket_lock(so, 0);
814 return (EACCES);
815 }
816 }
817 #endif /* !CONFIG_EMBEDDED */
818 if (!IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
819 (u = kauth_cred_getuid(so->so_cred)) != 0 &&
820 (t = in_pcblookup_local_and_cleanup(
821 inp->inp_pcbinfo, SIN(nam)->sin_addr, lport,
822 INPLOOKUP_WILDCARD)) != NULL &&
823 (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
824 t->inp_laddr.s_addr != INADDR_ANY ||
825 !(t->inp_socket->so_options & SO_REUSEPORT)) &&
826 (u != kauth_cred_getuid(t->inp_socket->so_cred)) &&
827 !(t->inp_socket->so_flags & SOF_REUSESHAREUID) &&
828 (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
829 t->inp_laddr.s_addr != INADDR_ANY)) {
830 if ((t->inp_socket->so_flags &
831 SOF_NOTIFYCONFLICT) &&
832 !(so->so_flags & SOF_NOTIFYCONFLICT))
833 conflict = 1;
834
835 lck_rw_done(pcbinfo->ipi_lock);
836
837 if (conflict)
838 in_pcb_conflict_post_msg(lport);
839
840 socket_lock(so, 0);
841 return (EADDRINUSE);
842 }
843 t = in_pcblookup_local_and_cleanup(pcbinfo,
844 SIN(nam)->sin_addr, lport, wild);
845 if (t != NULL &&
846 (reuseport & t->inp_socket->so_options) == 0) {
847 #if INET6
848 if (SIN(nam)->sin_addr.s_addr != INADDR_ANY ||
849 t->inp_laddr.s_addr != INADDR_ANY ||
850 SOCK_DOM(so) != PF_INET6 ||
851 SOCK_DOM(t->inp_socket) != PF_INET6)
852 #endif /* INET6 */
853 {
854
855 if ((t->inp_socket->so_flags &
856 SOF_NOTIFYCONFLICT) &&
857 !(so->so_flags & SOF_NOTIFYCONFLICT))
858 conflict = 1;
859
860 lck_rw_done(pcbinfo->ipi_lock);
861
862 if (conflict)
863 in_pcb_conflict_post_msg(lport);
864 socket_lock(so, 0);
865 return (EADDRINUSE);
866 }
867 }
868 }
869 laddr = SIN(nam)->sin_addr;
870 }
871 if (lport == 0) {
872 u_short first, last;
873 int count;
874 bool found;
875
876 randomport = (so->so_flags & SOF_BINDRANDOMPORT) ||
877 (so->so_type == SOCK_STREAM ? tcp_use_randomport :
878 udp_use_randomport);
879
880 /*
881 * Even though this looks similar to the code in
882 * in6_pcbsetport, the v6 vs v4 checks are different.
883 */
884 anonport = TRUE;
885 if (inp->inp_flags & INP_HIGHPORT) {
886 first = ipport_hifirstauto; /* sysctl */
887 last = ipport_hilastauto;
888 lastport = &pcbinfo->ipi_lasthi;
889 } else if (inp->inp_flags & INP_LOWPORT) {
890 cred = kauth_cred_proc_ref(p);
891 error = priv_check_cred(cred,
892 PRIV_NETINET_RESERVEDPORT, 0);
893 kauth_cred_unref(&cred);
894 if (error != 0) {
895 lck_rw_done(pcbinfo->ipi_lock);
896 socket_lock(so, 0);
897 return (error);
898 }
899 first = ipport_lowfirstauto; /* 1023 */
900 last = ipport_lowlastauto; /* 600 */
901 lastport = &pcbinfo->ipi_lastlow;
902 } else {
903 first = ipport_firstauto; /* sysctl */
904 last = ipport_lastauto;
905 lastport = &pcbinfo->ipi_lastport;
906 }
907 /* No point in randomizing if only one port is available */
908
909 if (first == last)
910 randomport = 0;
911 /*
912 * Simple check to ensure all ports are not used up causing
913 * a deadlock here.
914 *
915 * We split the two cases (up and down) so that the direction
916 * is not being tested on each round of the loop.
917 */
918 if (first > last) {
919 struct in_addr lookup_addr;
920
921 /*
922 * counting down
923 */
924 if (randomport) {
925 read_frandom(&rand_port, sizeof (rand_port));
926 *lastport =
927 first - (rand_port % (first - last));
928 }
929 count = first - last;
930
931 lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
932 inp->inp_laddr;
933
934 found = false;
935 do {
936 if (count-- < 0) { /* completely used? */
937 lck_rw_done(pcbinfo->ipi_lock);
938 socket_lock(so, 0);
939 return (EADDRNOTAVAIL);
940 }
941 --*lastport;
942 if (*lastport > first || *lastport < last)
943 *lastport = first;
944 lport = htons(*lastport);
945
946 found = in_pcblookup_local_and_cleanup(pcbinfo,
947 lookup_addr, lport, wild) == NULL;
948 } while (!found);
949 } else {
950 struct in_addr lookup_addr;
951
952 /*
953 * counting up
954 */
955 if (randomport) {
956 read_frandom(&rand_port, sizeof (rand_port));
957 *lastport =
958 first + (rand_port % (first - last));
959 }
960 count = last - first;
961
962 lookup_addr = (laddr.s_addr != INADDR_ANY) ? laddr :
963 inp->inp_laddr;
964
965 found = false;
966 do {
967 if (count-- < 0) { /* completely used? */
968 lck_rw_done(pcbinfo->ipi_lock);
969 socket_lock(so, 0);
970 return (EADDRNOTAVAIL);
971 }
972 ++*lastport;
973 if (*lastport < first || *lastport > last)
974 *lastport = first;
975 lport = htons(*lastport);
976
977 found = in_pcblookup_local_and_cleanup(pcbinfo,
978 lookup_addr, lport, wild) == NULL;
979 } while (!found);
980 }
981 }
982 socket_lock(so, 0);
983
984 /*
985 * We unlocked socket's protocol lock for a long time.
986 * The socket might have been dropped/defuncted.
987 * Checking if world has changed since.
988 */
989 if (inp->inp_state == INPCB_STATE_DEAD) {
990 lck_rw_done(pcbinfo->ipi_lock);
991 return (ECONNABORTED);
992 }
993
994 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
995 lck_rw_done(pcbinfo->ipi_lock);
996 return (EINVAL);
997 }
998
999 if (laddr.s_addr != INADDR_ANY) {
1000 inp->inp_laddr = laddr;
1001 inp->inp_last_outifp = outif;
1002 }
1003 inp->inp_lport = lport;
1004 if (anonport)
1005 inp->inp_flags |= INP_ANONPORT;
1006
1007 if (in_pcbinshash(inp, 1) != 0) {
1008 inp->inp_laddr.s_addr = INADDR_ANY;
1009 inp->inp_last_outifp = NULL;
1010
1011 inp->inp_lport = 0;
1012 if (anonport)
1013 inp->inp_flags &= ~INP_ANONPORT;
1014 lck_rw_done(pcbinfo->ipi_lock);
1015 return (EAGAIN);
1016 }
1017 lck_rw_done(pcbinfo->ipi_lock);
1018 sflt_notify(so, sock_evt_bound, NULL);
1019 return (0);
1020 }
1021
1022 #define APN_FALLBACK_IP_FILTER(a) \
1023 (IN_LINKLOCAL(ntohl((a)->sin_addr.s_addr)) || \
1024 IN_LOOPBACK(ntohl((a)->sin_addr.s_addr)) || \
1025 IN_ZERONET(ntohl((a)->sin_addr.s_addr)) || \
1026 IN_MULTICAST(ntohl((a)->sin_addr.s_addr)) || \
1027 IN_PRIVATE(ntohl((a)->sin_addr.s_addr)))
1028
1029 #define APN_FALLBACK_NOTIF_INTERVAL 2 /* Magic Number */
1030 static uint64_t last_apn_fallback = 0;
1031
1032 static boolean_t
1033 apn_fallback_required (proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4)
1034 {
1035 uint64_t timenow;
1036 struct sockaddr_storage lookup_default_addr;
1037 struct rtentry *rt = NULL;
1038
1039 VERIFY(proc != NULL);
1040
1041 if (apn_fallbk_enabled == FALSE)
1042 return FALSE;
1043
1044 if (proc == kernproc)
1045 return FALSE;
1046
1047 if (so && (so->so_options & SO_NOAPNFALLBK))
1048 return FALSE;
1049
1050 timenow = net_uptime();
1051 if ((timenow - last_apn_fallback) < APN_FALLBACK_NOTIF_INTERVAL) {
1052 apn_fallbk_log((LOG_INFO, "APN fallback notification throttled.\n"));
1053 return FALSE;
1054 }
1055
1056 if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4))
1057 return FALSE;
1058
1059 /* Check if we have unscoped IPv6 default route through cellular */
1060 bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1061 lookup_default_addr.ss_family = AF_INET6;
1062 lookup_default_addr.ss_len = sizeof(struct sockaddr_in6);
1063
1064 rt = rtalloc1((struct sockaddr *)&lookup_default_addr, 0, 0);
1065 if (NULL == rt) {
1066 apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1067 "unscoped default IPv6 route.\n"));
1068 return FALSE;
1069 }
1070
1071 if (!IFNET_IS_CELLULAR(rt->rt_ifp)) {
1072 rtfree(rt);
1073 apn_fallbk_log((LOG_INFO, "APN fallback notification could not find "
1074 "unscoped default IPv6 route through cellular interface.\n"));
1075 return FALSE;
1076 }
1077
1078 /*
1079 * We have a default IPv6 route, ensure that
1080 * we do not have IPv4 default route before triggering
1081 * the event
1082 */
1083 rtfree(rt);
1084 rt = NULL;
1085
1086 bzero(&lookup_default_addr, sizeof(lookup_default_addr));
1087 lookup_default_addr.ss_family = AF_INET;
1088 lookup_default_addr.ss_len = sizeof(struct sockaddr_in);
1089
1090 rt = rtalloc1((struct sockaddr *)&lookup_default_addr, 0, 0);
1091
1092 if (rt) {
1093 rtfree(rt);
1094 rt = NULL;
1095 apn_fallbk_log((LOG_INFO, "APN fallback notification found unscoped "
1096 "IPv4 default route!\n"));
1097 return FALSE;
1098 }
1099
1100 {
1101 /*
1102 * We disable APN fallback if the binary is not a third-party app.
1103 * Note that platform daemons use their process name as a
1104 * bundle ID so we filter out bundle IDs without dots.
1105 */
1106 const char *bundle_id = cs_identity_get(proc);
1107 if (bundle_id == NULL ||
1108 bundle_id[0] == '\0' ||
1109 strchr(bundle_id, '.') == NULL ||
1110 strncmp(bundle_id, "com.apple.", sizeof("com.apple.") - 1) == 0) {
1111 apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found first-"
1112 "party bundle ID \"%s\"!\n", (bundle_id ? bundle_id : "NULL")));
1113 return FALSE;
1114 }
1115 }
1116
1117 {
1118 /*
1119 * The Apple App Store IPv6 requirement started on
1120 * June 1st, 2016 at 12:00:00 AM PDT.
1121 * We disable APN fallback if the binary is more recent than that.
1122 * We check both atime and birthtime since birthtime is not always supported.
1123 */
1124 static const long ipv6_start_date = 1464764400L;
1125 vfs_context_t context;
1126 struct stat64 sb;
1127 int vn_stat_error;
1128
1129 bzero(&sb, sizeof(struct stat64));
1130 context = vfs_context_create(NULL);
1131 vn_stat_error = vn_stat(proc->p_textvp, &sb, NULL, 1, context);
1132 (void)vfs_context_rele(context);
1133
1134 if (vn_stat_error != 0 ||
1135 sb.st_atimespec.tv_sec >= ipv6_start_date ||
1136 sb.st_birthtimespec.tv_sec >= ipv6_start_date) {
1137 apn_fallbk_log((LOG_INFO, "Abort: APN fallback notification found binary "
1138 "too recent! (err %d atime %ld mtime %ld ctime %ld birthtime %ld)\n",
1139 vn_stat_error, sb.st_atimespec.tv_sec, sb.st_mtimespec.tv_sec,
1140 sb.st_ctimespec.tv_sec, sb.st_birthtimespec.tv_sec));
1141 return FALSE;
1142 }
1143 }
1144 return TRUE;
1145 }
1146
1147 static void
1148 apn_fallback_trigger(proc_t proc)
1149 {
1150 pid_t pid = 0;
1151 struct kev_msg ev_msg;
1152 struct kev_netevent_apnfallbk_data apnfallbk_data;
1153
1154 last_apn_fallback = net_uptime();
1155 pid = proc_pid(proc);
1156 uuid_t application_uuid;
1157 uuid_clear(application_uuid);
1158 proc_getexecutableuuid(proc, application_uuid,
1159 sizeof(application_uuid));
1160
1161 bzero(&ev_msg, sizeof (struct kev_msg));
1162 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1163 ev_msg.kev_class = KEV_NETWORK_CLASS;
1164 ev_msg.kev_subclass = KEV_NETEVENT_SUBCLASS;
1165 ev_msg.event_code = KEV_NETEVENT_APNFALLBACK;
1166
1167 bzero(&apnfallbk_data, sizeof(apnfallbk_data));
1168 apnfallbk_data.epid = pid;
1169 uuid_copy(apnfallbk_data.euuid, application_uuid);
1170
1171 ev_msg.dv[0].data_ptr = &apnfallbk_data;
1172 ev_msg.dv[0].data_length = sizeof(apnfallbk_data);
1173 kev_post_msg(&ev_msg);
1174 apn_fallbk_log((LOG_INFO, "APN fallback notification issued.\n"));
1175 }
1176
1177 /*
1178 * Transform old in_pcbconnect() into an inner subroutine for new
1179 * in_pcbconnect(); do some validity-checking on the remote address
1180 * (in "nam") and then determine local host address (i.e., which
1181 * interface) to use to access that remote host.
1182 *
1183 * This routine may alter the caller-supplied remote address "nam".
1184 *
1185 * The caller may override the bound-to-interface setting of the socket
1186 * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1187 *
1188 * This routine might return an ifp with a reference held if the caller
1189 * provides a non-NULL outif, even in the error case. The caller is
1190 * responsible for releasing its reference.
1191 *
1192 * Returns: 0 Success
1193 * EINVAL Invalid argument
1194 * EAFNOSUPPORT Address family not supported
1195 * EADDRNOTAVAIL Address not available
1196 */
1197 int
1198 in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr,
1199 unsigned int ifscope, struct ifnet **outif, int raw)
1200 {
1201 struct route *ro = &inp->inp_route;
1202 struct in_ifaddr *ia = NULL;
1203 struct sockaddr_in sin;
1204 int error = 0;
1205 boolean_t restricted = FALSE;
1206
1207 if (outif != NULL)
1208 *outif = NULL;
1209 if (nam->sa_len != sizeof (struct sockaddr_in))
1210 return (EINVAL);
1211 if (SIN(nam)->sin_family != AF_INET)
1212 return (EAFNOSUPPORT);
1213 if (raw == 0 && SIN(nam)->sin_port == 0)
1214 return (EADDRNOTAVAIL);
1215
1216 /*
1217 * If the destination address is INADDR_ANY,
1218 * use the primary local address.
1219 * If the supplied address is INADDR_BROADCAST,
1220 * and the primary interface supports broadcast,
1221 * choose the broadcast address for that interface.
1222 */
1223 if (raw == 0 && (SIN(nam)->sin_addr.s_addr == INADDR_ANY ||
1224 SIN(nam)->sin_addr.s_addr == (u_int32_t)INADDR_BROADCAST)) {
1225 lck_rw_lock_shared(in_ifaddr_rwlock);
1226 if (!TAILQ_EMPTY(&in_ifaddrhead)) {
1227 ia = TAILQ_FIRST(&in_ifaddrhead);
1228 IFA_LOCK_SPIN(&ia->ia_ifa);
1229 if (SIN(nam)->sin_addr.s_addr == INADDR_ANY) {
1230 SIN(nam)->sin_addr = IA_SIN(ia)->sin_addr;
1231 } else if (ia->ia_ifp->if_flags & IFF_BROADCAST) {
1232 SIN(nam)->sin_addr =
1233 SIN(&ia->ia_broadaddr)->sin_addr;
1234 }
1235 IFA_UNLOCK(&ia->ia_ifa);
1236 ia = NULL;
1237 }
1238 lck_rw_done(in_ifaddr_rwlock);
1239 }
1240 /*
1241 * Otherwise, if the socket has already bound the source, just use it.
1242 */
1243 if (inp->inp_laddr.s_addr != INADDR_ANY) {
1244 VERIFY(ia == NULL);
1245 *laddr = inp->inp_laddr;
1246 return (0);
1247 }
1248
1249 /*
1250 * If the ifscope is specified by the caller (e.g. IP_PKTINFO)
1251 * then it overrides the sticky ifscope set for the socket.
1252 */
1253 if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF))
1254 ifscope = inp->inp_boundifp->if_index;
1255
1256 /*
1257 * If route is known or can be allocated now,
1258 * our src addr is taken from the i/f, else punt.
1259 * Note that we should check the address family of the cached
1260 * destination, in case of sharing the cache with IPv6.
1261 */
1262 if (ro->ro_rt != NULL)
1263 RT_LOCK_SPIN(ro->ro_rt);
1264 if (ROUTE_UNUSABLE(ro) || ro->ro_dst.sa_family != AF_INET ||
1265 SIN(&ro->ro_dst)->sin_addr.s_addr != SIN(nam)->sin_addr.s_addr ||
1266 (inp->inp_socket->so_options & SO_DONTROUTE)) {
1267 if (ro->ro_rt != NULL)
1268 RT_UNLOCK(ro->ro_rt);
1269 ROUTE_RELEASE(ro);
1270 }
1271 if (!(inp->inp_socket->so_options & SO_DONTROUTE) &&
1272 (ro->ro_rt == NULL || ro->ro_rt->rt_ifp == NULL)) {
1273 if (ro->ro_rt != NULL)
1274 RT_UNLOCK(ro->ro_rt);
1275 ROUTE_RELEASE(ro);
1276 /* No route yet, so try to acquire one */
1277 bzero(&ro->ro_dst, sizeof (struct sockaddr_in));
1278 ro->ro_dst.sa_family = AF_INET;
1279 ro->ro_dst.sa_len = sizeof (struct sockaddr_in);
1280 SIN(&ro->ro_dst)->sin_addr = SIN(nam)->sin_addr;
1281 rtalloc_scoped(ro, ifscope);
1282 if (ro->ro_rt != NULL)
1283 RT_LOCK_SPIN(ro->ro_rt);
1284 }
1285 /* Sanitized local copy for interface address searches */
1286 bzero(&sin, sizeof (sin));
1287 sin.sin_family = AF_INET;
1288 sin.sin_len = sizeof (struct sockaddr_in);
1289 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
1290 /*
1291 * If we did not find (or use) a route, assume dest is reachable
1292 * on a directly connected network and try to find a corresponding
1293 * interface to take the source address from.
1294 */
1295 if (ro->ro_rt == NULL) {
1296 proc_t proc = current_proc();
1297
1298 VERIFY(ia == NULL);
1299 ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
1300 if (ia == NULL)
1301 ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
1302 error = ((ia == NULL) ? ENETUNREACH : 0);
1303
1304 if (apn_fallback_required(proc, inp->inp_socket,
1305 (void *)nam))
1306 apn_fallback_trigger(proc);
1307
1308 goto done;
1309 }
1310 RT_LOCK_ASSERT_HELD(ro->ro_rt);
1311 /*
1312 * If the outgoing interface on the route found is not
1313 * a loopback interface, use the address from that interface.
1314 */
1315 if (!(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK)) {
1316 VERIFY(ia == NULL);
1317 /*
1318 * If the route points to a cellular interface and the
1319 * caller forbids our using interfaces of such type,
1320 * pretend that there is no route.
1321 * Apply the same logic for expensive interfaces.
1322 */
1323 if (inp_restricted_send(inp, ro->ro_rt->rt_ifp)) {
1324 RT_UNLOCK(ro->ro_rt);
1325 ROUTE_RELEASE(ro);
1326 error = EHOSTUNREACH;
1327 restricted = TRUE;
1328 } else {
1329 /* Become a regular mutex */
1330 RT_CONVERT_LOCK(ro->ro_rt);
1331 ia = ifatoia(ro->ro_rt->rt_ifa);
1332 IFA_ADDREF(&ia->ia_ifa);
1333 RT_UNLOCK(ro->ro_rt);
1334 error = 0;
1335 }
1336 goto done;
1337 }
1338 VERIFY(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK);
1339 RT_UNLOCK(ro->ro_rt);
1340 /*
1341 * The outgoing interface is marked with 'loopback net', so a route
1342 * to ourselves is here.
1343 * Try to find the interface of the destination address and then
1344 * take the address from there. That interface is not necessarily
1345 * a loopback interface.
1346 */
1347 VERIFY(ia == NULL);
1348 ia = ifatoia(ifa_ifwithdstaddr(SA(&sin)));
1349 if (ia == NULL)
1350 ia = ifatoia(ifa_ifwithaddr_scoped(SA(&sin), ifscope));
1351 if (ia == NULL)
1352 ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope));
1353 if (ia == NULL) {
1354 RT_LOCK(ro->ro_rt);
1355 ia = ifatoia(ro->ro_rt->rt_ifa);
1356 if (ia != NULL)
1357 IFA_ADDREF(&ia->ia_ifa);
1358 RT_UNLOCK(ro->ro_rt);
1359 }
1360 error = ((ia == NULL) ? ENETUNREACH : 0);
1361
1362 done:
1363 /*
1364 * If the destination address is multicast and an outgoing
1365 * interface has been set as a multicast option, use the
1366 * address of that interface as our source address.
1367 */
1368 if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
1369 inp->inp_moptions != NULL) {
1370 struct ip_moptions *imo;
1371 struct ifnet *ifp;
1372
1373 imo = inp->inp_moptions;
1374 IMO_LOCK(imo);
1375 if (imo->imo_multicast_ifp != NULL && (ia == NULL ||
1376 ia->ia_ifp != imo->imo_multicast_ifp)) {
1377 ifp = imo->imo_multicast_ifp;
1378 if (ia != NULL)
1379 IFA_REMREF(&ia->ia_ifa);
1380 lck_rw_lock_shared(in_ifaddr_rwlock);
1381 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
1382 if (ia->ia_ifp == ifp)
1383 break;
1384 }
1385 if (ia != NULL)
1386 IFA_ADDREF(&ia->ia_ifa);
1387 lck_rw_done(in_ifaddr_rwlock);
1388 if (ia == NULL)
1389 error = EADDRNOTAVAIL;
1390 else
1391 error = 0;
1392 }
1393 IMO_UNLOCK(imo);
1394 }
1395 /*
1396 * Don't do pcblookup call here; return interface in laddr
1397 * and exit to caller, that will do the lookup.
1398 */
1399 if (ia != NULL) {
1400 /*
1401 * If the source address belongs to a cellular interface
1402 * and the socket forbids our using interfaces of such
1403 * type, pretend that there is no source address.
1404 * Apply the same logic for expensive interfaces.
1405 */
1406 IFA_LOCK_SPIN(&ia->ia_ifa);
1407 if (inp_restricted_send(inp, ia->ia_ifa.ifa_ifp)) {
1408 IFA_UNLOCK(&ia->ia_ifa);
1409 error = EHOSTUNREACH;
1410 restricted = TRUE;
1411 } else if (error == 0) {
1412 *laddr = ia->ia_addr.sin_addr;
1413 if (outif != NULL) {
1414 struct ifnet *ifp;
1415
1416 if (ro->ro_rt != NULL)
1417 ifp = ro->ro_rt->rt_ifp;
1418 else
1419 ifp = ia->ia_ifp;
1420
1421 VERIFY(ifp != NULL);
1422 IFA_CONVERT_LOCK(&ia->ia_ifa);
1423 ifnet_reference(ifp); /* for caller */
1424 if (*outif != NULL)
1425 ifnet_release(*outif);
1426 *outif = ifp;
1427 }
1428 IFA_UNLOCK(&ia->ia_ifa);
1429 } else {
1430 IFA_UNLOCK(&ia->ia_ifa);
1431 }
1432 IFA_REMREF(&ia->ia_ifa);
1433 ia = NULL;
1434 }
1435
1436 if (restricted && error == EHOSTUNREACH) {
1437 soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED |
1438 SO_FILT_HINT_IFDENIED));
1439 }
1440
1441 return (error);
1442 }
1443
1444 /*
1445 * Outer subroutine:
1446 * Connect from a socket to a specified address.
1447 * Both address and port must be specified in argument sin.
1448 * If don't have a local address for this socket yet,
1449 * then pick one.
1450 *
1451 * The caller may override the bound-to-interface setting of the socket
1452 * by specifying the ifscope parameter (e.g. from IP_PKTINFO.)
1453 */
1454 int
1455 in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p,
1456 unsigned int ifscope, struct ifnet **outif)
1457 {
1458 struct in_addr laddr;
1459 struct sockaddr_in *sin = (struct sockaddr_in *)(void *)nam;
1460 struct inpcb *pcb;
1461 int error;
1462 struct socket *so = inp->inp_socket;
1463
1464 /*
1465 * Call inner routine, to assign local interface address.
1466 */
1467 if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0)
1468 return (error);
1469
1470 socket_unlock(so, 0);
1471 pcb = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port,
1472 inp->inp_laddr.s_addr ? inp->inp_laddr : laddr,
1473 inp->inp_lport, 0, NULL);
1474 socket_lock(so, 0);
1475
1476 /*
1477 * Check if the socket is still in a valid state. When we unlock this
1478 * embryonic socket, it can get aborted if another thread is closing
1479 * the listener (radar 7947600).
1480 */
1481 if ((so->so_flags & SOF_ABORTED) != 0)
1482 return (ECONNREFUSED);
1483
1484 if (pcb != NULL) {
1485 in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
1486 return (EADDRINUSE);
1487 }
1488 if (inp->inp_laddr.s_addr == INADDR_ANY) {
1489 if (inp->inp_lport == 0) {
1490 error = in_pcbbind(inp, NULL, p);
1491 if (error)
1492 return (error);
1493 }
1494 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1495 /*
1496 * Lock inversion issue, mostly with udp
1497 * multicast packets.
1498 */
1499 socket_unlock(so, 0);
1500 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1501 socket_lock(so, 0);
1502 }
1503 inp->inp_laddr = laddr;
1504 /* no reference needed */
1505 inp->inp_last_outifp = (outif != NULL) ? *outif : NULL;
1506 inp->inp_flags |= INP_INADDR_ANY;
1507 } else {
1508 /*
1509 * Usage of IP_PKTINFO, without local port already
1510 * speficified will cause kernel to panic,
1511 * see rdar://problem/18508185.
1512 * For now returning error to avoid a kernel panic
1513 * This routines can be refactored and handle this better
1514 * in future.
1515 */
1516 if (inp->inp_lport == 0)
1517 return (EINVAL);
1518 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1519 /*
1520 * Lock inversion issue, mostly with udp
1521 * multicast packets.
1522 */
1523 socket_unlock(so, 0);
1524 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1525 socket_lock(so, 0);
1526 }
1527 }
1528 inp->inp_faddr = sin->sin_addr;
1529 inp->inp_fport = sin->sin_port;
1530 if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP)
1531 nstat_pcb_invalidate_cache(inp);
1532 in_pcbrehash(inp);
1533 lck_rw_done(inp->inp_pcbinfo->ipi_lock);
1534 return (0);
1535 }
1536
1537 void
1538 in_pcbdisconnect(struct inpcb *inp)
1539 {
1540 struct socket *so = inp->inp_socket;
1541
1542 if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP)
1543 nstat_pcb_cache(inp);
1544
1545 inp->inp_faddr.s_addr = INADDR_ANY;
1546 inp->inp_fport = 0;
1547
1548 if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) {
1549 /* lock inversion issue, mostly with udp multicast packets */
1550 socket_unlock(so, 0);
1551 lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock);
1552 socket_lock(so, 0);
1553 }
1554
1555 in_pcbrehash(inp);
1556 lck_rw_done(inp->inp_pcbinfo->ipi_lock);
1557 /*
1558 * A multipath subflow socket would have its SS_NOFDREF set by default,
1559 * so check for SOF_MP_SUBFLOW socket flag before detaching the PCB;
1560 * when the socket is closed for real, SOF_MP_SUBFLOW would be cleared.
1561 */
1562 if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF))
1563 in_pcbdetach(inp);
1564 }
1565
1566 void
1567 in_pcbdetach(struct inpcb *inp)
1568 {
1569 struct socket *so = inp->inp_socket;
1570
1571 if (so->so_pcb == NULL) {
1572 /* PCB has been disposed */
1573 panic("%s: inp=%p so=%p proto=%d so_pcb is null!\n", __func__,
1574 inp, so, SOCK_PROTO(so));
1575 /* NOTREACHED */
1576 }
1577
1578 #if IPSEC
1579 if (inp->inp_sp != NULL) {
1580 (void) ipsec4_delete_pcbpolicy(inp);
1581 }
1582 #endif /* IPSEC */
1583
1584 if (inp->inp_stat != NULL && SOCK_PROTO(so) == IPPROTO_UDP) {
1585 if (inp->inp_stat->rxpackets == 0 && inp->inp_stat->txpackets == 0) {
1586 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_no_data);
1587 }
1588 }
1589
1590 /*
1591 * Let NetworkStatistics know this PCB is going away
1592 * before we detach it.
1593 */
1594 if (nstat_collect &&
1595 (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP))
1596 nstat_pcb_detach(inp);
1597
1598 /* Free memory buffer held for generating keep alives */
1599 if (inp->inp_keepalive_data != NULL) {
1600 FREE(inp->inp_keepalive_data, M_TEMP);
1601 inp->inp_keepalive_data = NULL;
1602 }
1603
1604 /* mark socket state as dead */
1605 if (in_pcb_checkstate(inp, WNT_STOPUSING, 1) != WNT_STOPUSING) {
1606 panic("%s: so=%p proto=%d couldn't set to STOPUSING\n",
1607 __func__, so, SOCK_PROTO(so));
1608 /* NOTREACHED */
1609 }
1610
1611 if (!(so->so_flags & SOF_PCBCLEARING)) {
1612 struct ip_moptions *imo;
1613
1614 inp->inp_vflag = 0;
1615 if (inp->inp_options != NULL) {
1616 (void) m_free(inp->inp_options);
1617 inp->inp_options = NULL;
1618 }
1619 ROUTE_RELEASE(&inp->inp_route);
1620 imo = inp->inp_moptions;
1621 inp->inp_moptions = NULL;
1622 sofreelastref(so, 0);
1623 inp->inp_state = INPCB_STATE_DEAD;
1624 /* makes sure we're not called twice from so_close */
1625 so->so_flags |= SOF_PCBCLEARING;
1626
1627 inpcb_gc_sched(inp->inp_pcbinfo, INPCB_TIMER_FAST);
1628
1629 /*
1630 * See inp_join_group() for why we need to unlock
1631 */
1632 if (imo != NULL) {
1633 socket_unlock(so, 0);
1634 IMO_REMREF(imo);
1635 socket_lock(so, 0);
1636 }
1637 }
1638 }
1639
1640
1641 void
1642 in_pcbdispose(struct inpcb *inp)
1643 {
1644 struct socket *so = inp->inp_socket;
1645 struct inpcbinfo *ipi = inp->inp_pcbinfo;
1646
1647 if (so != NULL && so->so_usecount != 0) {
1648 panic("%s: so %p [%d,%d] usecount %d lockhistory %s\n",
1649 __func__, so, SOCK_DOM(so), SOCK_TYPE(so), so->so_usecount,
1650 solockhistory_nr(so));
1651 /* NOTREACHED */
1652 } else if (inp->inp_wantcnt != WNT_STOPUSING) {
1653 if (so != NULL) {
1654 panic_plain("%s: inp %p invalid wantcnt %d, so %p "
1655 "[%d,%d] usecount %d retaincnt %d state 0x%x "
1656 "flags 0x%x lockhistory %s\n", __func__, inp,
1657 inp->inp_wantcnt, so, SOCK_DOM(so), SOCK_TYPE(so),
1658 so->so_usecount, so->so_retaincnt, so->so_state,
1659 so->so_flags, solockhistory_nr(so));
1660 /* NOTREACHED */
1661 } else {
1662 panic("%s: inp %p invalid wantcnt %d no socket\n",
1663 __func__, inp, inp->inp_wantcnt);
1664 /* NOTREACHED */
1665 }
1666 }
1667
1668 LCK_RW_ASSERT(ipi->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
1669
1670 inp->inp_gencnt = ++ipi->ipi_gencnt;
1671 /* access ipi in in_pcbremlists */
1672 in_pcbremlists(inp);
1673
1674 if (so != NULL) {
1675 if (so->so_proto->pr_flags & PR_PCBLOCK) {
1676 sofreelastref(so, 0);
1677 if (so->so_rcv.sb_cc > 0 || so->so_snd.sb_cc > 0) {
1678 /*
1679 * selthreadclear() already called
1680 * during sofreelastref() above.
1681 */
1682 sbrelease(&so->so_rcv);
1683 sbrelease(&so->so_snd);
1684 }
1685 if (so->so_head != NULL) {
1686 panic("%s: so=%p head still exist\n",
1687 __func__, so);
1688 /* NOTREACHED */
1689 }
1690 lck_mtx_unlock(&inp->inpcb_mtx);
1691
1692 #if NECP
1693 necp_inpcb_remove_cb(inp);
1694 #endif /* NECP */
1695
1696 lck_mtx_destroy(&inp->inpcb_mtx, ipi->ipi_lock_grp);
1697 }
1698 /* makes sure we're not called twice from so_close */
1699 so->so_flags |= SOF_PCBCLEARING;
1700 so->so_saved_pcb = (caddr_t)inp;
1701 so->so_pcb = NULL;
1702 inp->inp_socket = NULL;
1703 #if CONFIG_MACF_NET
1704 mac_inpcb_label_destroy(inp);
1705 #endif /* CONFIG_MACF_NET */
1706 #if NECP
1707 necp_inpcb_dispose(inp);
1708 #endif /* NECP */
1709 /*
1710 * In case there a route cached after a detach (possible
1711 * in the tcp case), make sure that it is freed before
1712 * we deallocate the structure.
1713 */
1714 ROUTE_RELEASE(&inp->inp_route);
1715 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
1716 zfree(ipi->ipi_zone, inp);
1717 }
1718 sodealloc(so);
1719 }
1720 }
1721
1722 /*
1723 * The calling convention of in_getsockaddr() and in_getpeeraddr() was
1724 * modified to match the pru_sockaddr() and pru_peeraddr() entry points
1725 * in struct pr_usrreqs, so that protocols can just reference then directly
1726 * without the need for a wrapper function.
1727 */
1728 int
1729 in_getsockaddr(struct socket *so, struct sockaddr **nam)
1730 {
1731 struct inpcb *inp;
1732 struct sockaddr_in *sin;
1733
1734 /*
1735 * Do the malloc first in case it blocks.
1736 */
1737 MALLOC(sin, struct sockaddr_in *, sizeof (*sin), M_SONAME, M_WAITOK);
1738 if (sin == NULL)
1739 return (ENOBUFS);
1740 bzero(sin, sizeof (*sin));
1741 sin->sin_family = AF_INET;
1742 sin->sin_len = sizeof (*sin);
1743
1744 if ((inp = sotoinpcb(so)) == NULL) {
1745 FREE(sin, M_SONAME);
1746 return (EINVAL);
1747 }
1748 sin->sin_port = inp->inp_lport;
1749 sin->sin_addr = inp->inp_laddr;
1750
1751 *nam = (struct sockaddr *)sin;
1752 return (0);
1753 }
1754
1755 int
1756 in_getsockaddr_s(struct socket *so, struct sockaddr_in *ss)
1757 {
1758 struct sockaddr_in *sin = ss;
1759 struct inpcb *inp;
1760
1761 VERIFY(ss != NULL);
1762 bzero(ss, sizeof (*ss));
1763
1764 sin->sin_family = AF_INET;
1765 sin->sin_len = sizeof (*sin);
1766
1767 if ((inp = sotoinpcb(so)) == NULL)
1768 return (EINVAL);
1769
1770 sin->sin_port = inp->inp_lport;
1771 sin->sin_addr = inp->inp_laddr;
1772 return (0);
1773 }
1774
1775 int
1776 in_getpeeraddr(struct socket *so, struct sockaddr **nam)
1777 {
1778 struct inpcb *inp;
1779 struct sockaddr_in *sin;
1780
1781 /*
1782 * Do the malloc first in case it blocks.
1783 */
1784 MALLOC(sin, struct sockaddr_in *, sizeof (*sin), M_SONAME, M_WAITOK);
1785 if (sin == NULL)
1786 return (ENOBUFS);
1787 bzero((caddr_t)sin, sizeof (*sin));
1788 sin->sin_family = AF_INET;
1789 sin->sin_len = sizeof (*sin);
1790
1791 if ((inp = sotoinpcb(so)) == NULL) {
1792 FREE(sin, M_SONAME);
1793 return (EINVAL);
1794 }
1795 sin->sin_port = inp->inp_fport;
1796 sin->sin_addr = inp->inp_faddr;
1797
1798 *nam = (struct sockaddr *)sin;
1799 return (0);
1800 }
1801
1802 void
1803 in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1804 int errno, void (*notify)(struct inpcb *, int))
1805 {
1806 struct inpcb *inp;
1807
1808 lck_rw_lock_shared(pcbinfo->ipi_lock);
1809
1810 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1811 #if INET6
1812 if (!(inp->inp_vflag & INP_IPV4))
1813 continue;
1814 #endif /* INET6 */
1815 if (inp->inp_faddr.s_addr != faddr.s_addr ||
1816 inp->inp_socket == NULL)
1817 continue;
1818 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
1819 continue;
1820 socket_lock(inp->inp_socket, 1);
1821 (*notify)(inp, errno);
1822 (void) in_pcb_checkstate(inp, WNT_RELEASE, 1);
1823 socket_unlock(inp->inp_socket, 1);
1824 }
1825 lck_rw_done(pcbinfo->ipi_lock);
1826 }
1827
1828 /*
1829 * Check for alternatives when higher level complains
1830 * about service problems. For now, invalidate cached
1831 * routing information. If the route was created dynamically
1832 * (by a redirect), time to try a default gateway again.
1833 */
1834 void
1835 in_losing(struct inpcb *inp)
1836 {
1837 boolean_t release = FALSE;
1838 struct rtentry *rt;
1839
1840 if ((rt = inp->inp_route.ro_rt) != NULL) {
1841 struct in_ifaddr *ia = NULL;
1842
1843 RT_LOCK(rt);
1844 if (rt->rt_flags & RTF_DYNAMIC) {
1845 /*
1846 * Prevent another thread from modifying rt_key,
1847 * rt_gateway via rt_setgate() after rt_lock is
1848 * dropped by marking the route as defunct.
1849 */
1850 rt->rt_flags |= RTF_CONDEMNED;
1851 RT_UNLOCK(rt);
1852 (void) rtrequest(RTM_DELETE, rt_key(rt),
1853 rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
1854 } else {
1855 RT_UNLOCK(rt);
1856 }
1857 /* if the address is gone keep the old route in the pcb */
1858 if (inp->inp_laddr.s_addr != INADDR_ANY &&
1859 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
1860 /*
1861 * Address is around; ditch the route. A new route
1862 * can be allocated the next time output is attempted.
1863 */
1864 release = TRUE;
1865 }
1866 if (ia != NULL)
1867 IFA_REMREF(&ia->ia_ifa);
1868 }
1869 if (rt == NULL || release)
1870 ROUTE_RELEASE(&inp->inp_route);
1871 }
1872
1873 /*
1874 * After a routing change, flush old routing
1875 * and allocate a (hopefully) better one.
1876 */
1877 void
1878 in_rtchange(struct inpcb *inp, int errno)
1879 {
1880 #pragma unused(errno)
1881 boolean_t release = FALSE;
1882 struct rtentry *rt;
1883
1884 if ((rt = inp->inp_route.ro_rt) != NULL) {
1885 struct in_ifaddr *ia = NULL;
1886
1887 /* if address is gone, keep the old route */
1888 if (inp->inp_laddr.s_addr != INADDR_ANY &&
1889 (ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
1890 /*
1891 * Address is around; ditch the route. A new route
1892 * can be allocated the next time output is attempted.
1893 */
1894 release = TRUE;
1895 }
1896 if (ia != NULL)
1897 IFA_REMREF(&ia->ia_ifa);
1898 }
1899 if (rt == NULL || release)
1900 ROUTE_RELEASE(&inp->inp_route);
1901 }
1902
1903 /*
1904 * Lookup a PCB based on the local address and port.
1905 */
1906 struct inpcb *
1907 in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
1908 unsigned int lport_arg, int wild_okay)
1909 {
1910 struct inpcb *inp;
1911 int matchwild = 3, wildcard;
1912 u_short lport = lport_arg;
1913
1914 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_START, 0, 0, 0, 0, 0);
1915
1916 if (!wild_okay) {
1917 struct inpcbhead *head;
1918 /*
1919 * Look for an unconnected (wildcard foreign addr) PCB that
1920 * matches the local address and port we're looking for.
1921 */
1922 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
1923 pcbinfo->ipi_hashmask)];
1924 LIST_FOREACH(inp, head, inp_hash) {
1925 #if INET6
1926 if (!(inp->inp_vflag & INP_IPV4))
1927 continue;
1928 #endif /* INET6 */
1929 if (inp->inp_faddr.s_addr == INADDR_ANY &&
1930 inp->inp_laddr.s_addr == laddr.s_addr &&
1931 inp->inp_lport == lport) {
1932 /*
1933 * Found.
1934 */
1935 return (inp);
1936 }
1937 }
1938 /*
1939 * Not found.
1940 */
1941 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0, 0, 0, 0, 0);
1942 return (NULL);
1943 } else {
1944 struct inpcbporthead *porthash;
1945 struct inpcbport *phd;
1946 struct inpcb *match = NULL;
1947 /*
1948 * Best fit PCB lookup.
1949 *
1950 * First see if this local port is in use by looking on the
1951 * port hash list.
1952 */
1953 porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
1954 pcbinfo->ipi_porthashmask)];
1955 LIST_FOREACH(phd, porthash, phd_hash) {
1956 if (phd->phd_port == lport)
1957 break;
1958 }
1959 if (phd != NULL) {
1960 /*
1961 * Port is in use by one or more PCBs. Look for best
1962 * fit.
1963 */
1964 LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
1965 wildcard = 0;
1966 #if INET6
1967 if (!(inp->inp_vflag & INP_IPV4))
1968 continue;
1969 #endif /* INET6 */
1970 if (inp->inp_faddr.s_addr != INADDR_ANY)
1971 wildcard++;
1972 if (inp->inp_laddr.s_addr != INADDR_ANY) {
1973 if (laddr.s_addr == INADDR_ANY)
1974 wildcard++;
1975 else if (inp->inp_laddr.s_addr !=
1976 laddr.s_addr)
1977 continue;
1978 } else {
1979 if (laddr.s_addr != INADDR_ANY)
1980 wildcard++;
1981 }
1982 if (wildcard < matchwild) {
1983 match = inp;
1984 matchwild = wildcard;
1985 if (matchwild == 0) {
1986 break;
1987 }
1988 }
1989 }
1990 }
1991 KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match,
1992 0, 0, 0, 0);
1993 return (match);
1994 }
1995 }
1996
1997 /*
1998 * Check if PCB exists in hash list.
1999 */
2000 int
2001 in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2002 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2003 uid_t *uid, gid_t *gid, struct ifnet *ifp)
2004 {
2005 struct inpcbhead *head;
2006 struct inpcb *inp;
2007 u_short fport = fport_arg, lport = lport_arg;
2008 int found = 0;
2009 struct inpcb *local_wild = NULL;
2010 #if INET6
2011 struct inpcb *local_wild_mapped = NULL;
2012 #endif /* INET6 */
2013
2014 *uid = UID_MAX;
2015 *gid = GID_MAX;
2016
2017 /*
2018 * We may have found the pcb in the last lookup - check this first.
2019 */
2020
2021 lck_rw_lock_shared(pcbinfo->ipi_lock);
2022
2023 /*
2024 * First look for an exact match.
2025 */
2026 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2027 pcbinfo->ipi_hashmask)];
2028 LIST_FOREACH(inp, head, inp_hash) {
2029 #if INET6
2030 if (!(inp->inp_vflag & INP_IPV4))
2031 continue;
2032 #endif /* INET6 */
2033 if (inp_restricted_recv(inp, ifp))
2034 continue;
2035
2036 if (inp->inp_faddr.s_addr == faddr.s_addr &&
2037 inp->inp_laddr.s_addr == laddr.s_addr &&
2038 inp->inp_fport == fport &&
2039 inp->inp_lport == lport) {
2040 if ((found = (inp->inp_socket != NULL))) {
2041 /*
2042 * Found.
2043 */
2044 *uid = kauth_cred_getuid(
2045 inp->inp_socket->so_cred);
2046 *gid = kauth_cred_getgid(
2047 inp->inp_socket->so_cred);
2048 }
2049 lck_rw_done(pcbinfo->ipi_lock);
2050 return (found);
2051 }
2052 }
2053
2054 if (!wildcard) {
2055 /*
2056 * Not found.
2057 */
2058 lck_rw_done(pcbinfo->ipi_lock);
2059 return (0);
2060 }
2061
2062 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2063 pcbinfo->ipi_hashmask)];
2064 LIST_FOREACH(inp, head, inp_hash) {
2065 #if INET6
2066 if (!(inp->inp_vflag & INP_IPV4))
2067 continue;
2068 #endif /* INET6 */
2069 if (inp_restricted_recv(inp, ifp))
2070 continue;
2071
2072 if (inp->inp_faddr.s_addr == INADDR_ANY &&
2073 inp->inp_lport == lport) {
2074 if (inp->inp_laddr.s_addr == laddr.s_addr) {
2075 if ((found = (inp->inp_socket != NULL))) {
2076 *uid = kauth_cred_getuid(
2077 inp->inp_socket->so_cred);
2078 *gid = kauth_cred_getgid(
2079 inp->inp_socket->so_cred);
2080 }
2081 lck_rw_done(pcbinfo->ipi_lock);
2082 return (found);
2083 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2084 #if INET6
2085 if (inp->inp_socket &&
2086 SOCK_CHECK_DOM(inp->inp_socket, PF_INET6))
2087 local_wild_mapped = inp;
2088 else
2089 #endif /* INET6 */
2090 local_wild = inp;
2091 }
2092 }
2093 }
2094 if (local_wild == NULL) {
2095 #if INET6
2096 if (local_wild_mapped != NULL) {
2097 if ((found = (local_wild_mapped->inp_socket != NULL))) {
2098 *uid = kauth_cred_getuid(
2099 local_wild_mapped->inp_socket->so_cred);
2100 *gid = kauth_cred_getgid(
2101 local_wild_mapped->inp_socket->so_cred);
2102 }
2103 lck_rw_done(pcbinfo->ipi_lock);
2104 return (found);
2105 }
2106 #endif /* INET6 */
2107 lck_rw_done(pcbinfo->ipi_lock);
2108 return (0);
2109 }
2110 if ((found = (local_wild->inp_socket != NULL))) {
2111 *uid = kauth_cred_getuid(
2112 local_wild->inp_socket->so_cred);
2113 *gid = kauth_cred_getgid(
2114 local_wild->inp_socket->so_cred);
2115 }
2116 lck_rw_done(pcbinfo->ipi_lock);
2117 return (found);
2118 }
2119
2120 /*
2121 * Lookup PCB in hash list.
2122 */
2123 struct inpcb *
2124 in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
2125 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
2126 struct ifnet *ifp)
2127 {
2128 struct inpcbhead *head;
2129 struct inpcb *inp;
2130 u_short fport = fport_arg, lport = lport_arg;
2131 struct inpcb *local_wild = NULL;
2132 #if INET6
2133 struct inpcb *local_wild_mapped = NULL;
2134 #endif /* INET6 */
2135
2136 /*
2137 * We may have found the pcb in the last lookup - check this first.
2138 */
2139
2140 lck_rw_lock_shared(pcbinfo->ipi_lock);
2141
2142 /*
2143 * First look for an exact match.
2144 */
2145 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
2146 pcbinfo->ipi_hashmask)];
2147 LIST_FOREACH(inp, head, inp_hash) {
2148 #if INET6
2149 if (!(inp->inp_vflag & INP_IPV4))
2150 continue;
2151 #endif /* INET6 */
2152 if (inp_restricted_recv(inp, ifp))
2153 continue;
2154
2155 if (inp->inp_faddr.s_addr == faddr.s_addr &&
2156 inp->inp_laddr.s_addr == laddr.s_addr &&
2157 inp->inp_fport == fport &&
2158 inp->inp_lport == lport) {
2159 /*
2160 * Found.
2161 */
2162 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2163 WNT_STOPUSING) {
2164 lck_rw_done(pcbinfo->ipi_lock);
2165 return (inp);
2166 } else {
2167 /* it's there but dead, say it isn't found */
2168 lck_rw_done(pcbinfo->ipi_lock);
2169 return (NULL);
2170 }
2171 }
2172 }
2173
2174 if (!wildcard) {
2175 /*
2176 * Not found.
2177 */
2178 lck_rw_done(pcbinfo->ipi_lock);
2179 return (NULL);
2180 }
2181
2182 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
2183 pcbinfo->ipi_hashmask)];
2184 LIST_FOREACH(inp, head, inp_hash) {
2185 #if INET6
2186 if (!(inp->inp_vflag & INP_IPV4))
2187 continue;
2188 #endif /* INET6 */
2189 if (inp_restricted_recv(inp, ifp))
2190 continue;
2191
2192 if (inp->inp_faddr.s_addr == INADDR_ANY &&
2193 inp->inp_lport == lport) {
2194 if (inp->inp_laddr.s_addr == laddr.s_addr) {
2195 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
2196 WNT_STOPUSING) {
2197 lck_rw_done(pcbinfo->ipi_lock);
2198 return (inp);
2199 } else {
2200 /* it's dead; say it isn't found */
2201 lck_rw_done(pcbinfo->ipi_lock);
2202 return (NULL);
2203 }
2204 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
2205 #if INET6
2206 if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6))
2207 local_wild_mapped = inp;
2208 else
2209 #endif /* INET6 */
2210 local_wild = inp;
2211 }
2212 }
2213 }
2214 if (local_wild == NULL) {
2215 #if INET6
2216 if (local_wild_mapped != NULL) {
2217 if (in_pcb_checkstate(local_wild_mapped,
2218 WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2219 lck_rw_done(pcbinfo->ipi_lock);
2220 return (local_wild_mapped);
2221 } else {
2222 /* it's dead; say it isn't found */
2223 lck_rw_done(pcbinfo->ipi_lock);
2224 return (NULL);
2225 }
2226 }
2227 #endif /* INET6 */
2228 lck_rw_done(pcbinfo->ipi_lock);
2229 return (NULL);
2230 }
2231 if (in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
2232 lck_rw_done(pcbinfo->ipi_lock);
2233 return (local_wild);
2234 }
2235 /*
2236 * It's either not found or is already dead.
2237 */
2238 lck_rw_done(pcbinfo->ipi_lock);
2239 return (NULL);
2240 }
2241
2242 /*
2243 * @brief Insert PCB onto various hash lists.
2244 *
2245 * @param inp Pointer to internet protocol control block
2246 * @param locked Implies if ipi_lock (protecting pcb list)
2247 * is already locked or not.
2248 *
2249 * @return int error on failure and 0 on success
2250 */
2251 int
2252 in_pcbinshash(struct inpcb *inp, int locked)
2253 {
2254 struct inpcbhead *pcbhash;
2255 struct inpcbporthead *pcbporthash;
2256 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2257 struct inpcbport *phd;
2258 u_int32_t hashkey_faddr;
2259
2260 if (!locked) {
2261 if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) {
2262 /*
2263 * Lock inversion issue, mostly with udp
2264 * multicast packets
2265 */
2266 socket_unlock(inp->inp_socket, 0);
2267 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
2268 socket_lock(inp->inp_socket, 0);
2269 }
2270 }
2271
2272 /*
2273 * This routine or its caller may have given up
2274 * socket's protocol lock briefly.
2275 * During that time the socket may have been dropped.
2276 * Safe-guarding against that.
2277 */
2278 if (inp->inp_state == INPCB_STATE_DEAD) {
2279 if (!locked) {
2280 lck_rw_done(pcbinfo->ipi_lock);
2281 }
2282 return (ECONNABORTED);
2283 }
2284
2285
2286 #if INET6
2287 if (inp->inp_vflag & INP_IPV6)
2288 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2289 else
2290 #endif /* INET6 */
2291 hashkey_faddr = inp->inp_faddr.s_addr;
2292
2293 inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2294 inp->inp_fport, pcbinfo->ipi_hashmask);
2295
2296 pcbhash = &pcbinfo->ipi_hashbase[inp->inp_hash_element];
2297
2298 pcbporthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(inp->inp_lport,
2299 pcbinfo->ipi_porthashmask)];
2300
2301 /*
2302 * Go through port list and look for a head for this lport.
2303 */
2304 LIST_FOREACH(phd, pcbporthash, phd_hash) {
2305 if (phd->phd_port == inp->inp_lport)
2306 break;
2307 }
2308
2309 /*
2310 * If none exists, malloc one and tack it on.
2311 */
2312 if (phd == NULL) {
2313 MALLOC(phd, struct inpcbport *, sizeof (struct inpcbport),
2314 M_PCB, M_WAITOK);
2315 if (phd == NULL) {
2316 if (!locked)
2317 lck_rw_done(pcbinfo->ipi_lock);
2318 return (ENOBUFS); /* XXX */
2319 }
2320 phd->phd_port = inp->inp_lport;
2321 LIST_INIT(&phd->phd_pcblist);
2322 LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
2323 }
2324
2325 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2326
2327
2328 inp->inp_phd = phd;
2329 LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
2330 LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
2331 inp->inp_flags2 |= INP2_INHASHLIST;
2332
2333 if (!locked)
2334 lck_rw_done(pcbinfo->ipi_lock);
2335
2336 #if NECP
2337 // This call catches the original setting of the local address
2338 inp_update_necp_policy(inp, NULL, NULL, 0);
2339 #endif /* NECP */
2340
2341 return (0);
2342 }
2343
2344 /*
2345 * Move PCB to the proper hash bucket when { faddr, fport } have been
2346 * changed. NOTE: This does not handle the case of the lport changing (the
2347 * hashed port list would have to be updated as well), so the lport must
2348 * not change after in_pcbinshash() has been called.
2349 */
2350 void
2351 in_pcbrehash(struct inpcb *inp)
2352 {
2353 struct inpcbhead *head;
2354 u_int32_t hashkey_faddr;
2355
2356 #if INET6
2357 if (inp->inp_vflag & INP_IPV6)
2358 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2359 else
2360 #endif /* INET6 */
2361 hashkey_faddr = inp->inp_faddr.s_addr;
2362
2363 inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
2364 inp->inp_fport, inp->inp_pcbinfo->ipi_hashmask);
2365 head = &inp->inp_pcbinfo->ipi_hashbase[inp->inp_hash_element];
2366
2367 if (inp->inp_flags2 & INP2_INHASHLIST) {
2368 LIST_REMOVE(inp, inp_hash);
2369 inp->inp_flags2 &= ~INP2_INHASHLIST;
2370 }
2371
2372 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2373 LIST_INSERT_HEAD(head, inp, inp_hash);
2374 inp->inp_flags2 |= INP2_INHASHLIST;
2375
2376 #if NECP
2377 // This call catches updates to the remote addresses
2378 inp_update_necp_policy(inp, NULL, NULL, 0);
2379 #endif /* NECP */
2380 }
2381
2382 /*
2383 * Remove PCB from various lists.
2384 * Must be called pcbinfo lock is held in exclusive mode.
2385 */
2386 void
2387 in_pcbremlists(struct inpcb *inp)
2388 {
2389 inp->inp_gencnt = ++inp->inp_pcbinfo->ipi_gencnt;
2390
2391 /*
2392 * Check if it's in hashlist -- an inp is placed in hashlist when
2393 * it's local port gets assigned. So it should also be present
2394 * in the port list.
2395 */
2396 if (inp->inp_flags2 & INP2_INHASHLIST) {
2397 struct inpcbport *phd = inp->inp_phd;
2398
2399 VERIFY(phd != NULL && inp->inp_lport > 0);
2400
2401 LIST_REMOVE(inp, inp_hash);
2402 inp->inp_hash.le_next = NULL;
2403 inp->inp_hash.le_prev = NULL;
2404
2405 LIST_REMOVE(inp, inp_portlist);
2406 inp->inp_portlist.le_next = NULL;
2407 inp->inp_portlist.le_prev = NULL;
2408 if (LIST_EMPTY(&phd->phd_pcblist)) {
2409 LIST_REMOVE(phd, phd_hash);
2410 FREE(phd, M_PCB);
2411 }
2412 inp->inp_phd = NULL;
2413 inp->inp_flags2 &= ~INP2_INHASHLIST;
2414 }
2415 VERIFY(!(inp->inp_flags2 & INP2_INHASHLIST));
2416
2417 if (inp->inp_flags2 & INP2_TIMEWAIT) {
2418 /* Remove from time-wait queue */
2419 tcp_remove_from_time_wait(inp);
2420 inp->inp_flags2 &= ~INP2_TIMEWAIT;
2421 VERIFY(inp->inp_pcbinfo->ipi_twcount != 0);
2422 inp->inp_pcbinfo->ipi_twcount--;
2423 } else {
2424 /* Remove from global inp list if it is not time-wait */
2425 LIST_REMOVE(inp, inp_list);
2426 }
2427
2428 if (inp->inp_flags2 & INP2_IN_FCTREE) {
2429 inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED|INPFC_REMOVE));
2430 VERIFY(!(inp->inp_flags2 & INP2_IN_FCTREE));
2431 }
2432
2433 inp->inp_pcbinfo->ipi_count--;
2434 }
2435
2436 /*
2437 * Mechanism used to defer the memory release of PCBs
2438 * The pcb list will contain the pcb until the reaper can clean it up if
2439 * the following conditions are met:
2440 * 1) state "DEAD",
2441 * 2) wantcnt is STOPUSING
2442 * 3) usecount is 0
2443 * This function will be called to either mark the pcb as
2444 */
2445 int
2446 in_pcb_checkstate(struct inpcb *pcb, int mode, int locked)
2447 {
2448 volatile UInt32 *wantcnt = (volatile UInt32 *)&pcb->inp_wantcnt;
2449 UInt32 origwant;
2450 UInt32 newwant;
2451
2452 switch (mode) {
2453 case WNT_STOPUSING:
2454 /*
2455 * Try to mark the pcb as ready for recycling. CAS with
2456 * STOPUSING, if success we're good, if it's in use, will
2457 * be marked later
2458 */
2459 if (locked == 0)
2460 socket_lock(pcb->inp_socket, 1);
2461 pcb->inp_state = INPCB_STATE_DEAD;
2462
2463 stopusing:
2464 if (pcb->inp_socket->so_usecount < 0) {
2465 panic("%s: pcb=%p so=%p usecount is negative\n",
2466 __func__, pcb, pcb->inp_socket);
2467 /* NOTREACHED */
2468 }
2469 if (locked == 0)
2470 socket_unlock(pcb->inp_socket, 1);
2471
2472 inpcb_gc_sched(pcb->inp_pcbinfo, INPCB_TIMER_FAST);
2473
2474 origwant = *wantcnt;
2475 if ((UInt16) origwant == 0xffff) /* should stop using */
2476 return (WNT_STOPUSING);
2477 newwant = 0xffff;
2478 if ((UInt16) origwant == 0) {
2479 /* try to mark it as unsuable now */
2480 OSCompareAndSwap(origwant, newwant, wantcnt);
2481 }
2482 return (WNT_STOPUSING);
2483
2484 case WNT_ACQUIRE:
2485 /*
2486 * Try to increase reference to pcb. If WNT_STOPUSING
2487 * should bail out. If socket state DEAD, try to set count
2488 * to STOPUSING, return failed otherwise increase cnt.
2489 */
2490 do {
2491 origwant = *wantcnt;
2492 if ((UInt16) origwant == 0xffff) {
2493 /* should stop using */
2494 return (WNT_STOPUSING);
2495 }
2496 newwant = origwant + 1;
2497 } while (!OSCompareAndSwap(origwant, newwant, wantcnt));
2498 return (WNT_ACQUIRE);
2499
2500 case WNT_RELEASE:
2501 /*
2502 * Release reference. If result is null and pcb state
2503 * is DEAD, set wanted bit to STOPUSING
2504 */
2505 if (locked == 0)
2506 socket_lock(pcb->inp_socket, 1);
2507
2508 do {
2509 origwant = *wantcnt;
2510 if ((UInt16) origwant == 0x0) {
2511 panic("%s: pcb=%p release with zero count",
2512 __func__, pcb);
2513 /* NOTREACHED */
2514 }
2515 if ((UInt16) origwant == 0xffff) {
2516 /* should stop using */
2517 if (locked == 0)
2518 socket_unlock(pcb->inp_socket, 1);
2519 return (WNT_STOPUSING);
2520 }
2521 newwant = origwant - 1;
2522 } while (!OSCompareAndSwap(origwant, newwant, wantcnt));
2523
2524 if (pcb->inp_state == INPCB_STATE_DEAD)
2525 goto stopusing;
2526 if (pcb->inp_socket->so_usecount < 0) {
2527 panic("%s: RELEASE pcb=%p so=%p usecount is negative\n",
2528 __func__, pcb, pcb->inp_socket);
2529 /* NOTREACHED */
2530 }
2531
2532 if (locked == 0)
2533 socket_unlock(pcb->inp_socket, 1);
2534 return (WNT_RELEASE);
2535
2536 default:
2537 panic("%s: so=%p not a valid state =%x\n", __func__,
2538 pcb->inp_socket, mode);
2539 /* NOTREACHED */
2540 }
2541
2542 /* NOTREACHED */
2543 return (mode);
2544 }
2545
2546 /*
2547 * inpcb_to_compat copies specific bits of an inpcb to a inpcb_compat.
2548 * The inpcb_compat data structure is passed to user space and must
2549 * not change. We intentionally avoid copying pointers.
2550 */
2551 void
2552 inpcb_to_compat(struct inpcb *inp, struct inpcb_compat *inp_compat)
2553 {
2554 bzero(inp_compat, sizeof (*inp_compat));
2555 inp_compat->inp_fport = inp->inp_fport;
2556 inp_compat->inp_lport = inp->inp_lport;
2557 inp_compat->nat_owner = 0;
2558 inp_compat->nat_cookie = 0;
2559 inp_compat->inp_gencnt = inp->inp_gencnt;
2560 inp_compat->inp_flags = inp->inp_flags;
2561 inp_compat->inp_flow = inp->inp_flow;
2562 inp_compat->inp_vflag = inp->inp_vflag;
2563 inp_compat->inp_ip_ttl = inp->inp_ip_ttl;
2564 inp_compat->inp_ip_p = inp->inp_ip_p;
2565 inp_compat->inp_dependfaddr.inp6_foreign =
2566 inp->inp_dependfaddr.inp6_foreign;
2567 inp_compat->inp_dependladdr.inp6_local =
2568 inp->inp_dependladdr.inp6_local;
2569 inp_compat->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
2570 inp_compat->inp_depend6.inp6_hlim = 0;
2571 inp_compat->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
2572 inp_compat->inp_depend6.inp6_ifindex = 0;
2573 inp_compat->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
2574 }
2575
2576 #if !CONFIG_EMBEDDED
2577 void
2578 inpcb_to_xinpcb64(struct inpcb *inp, struct xinpcb64 *xinp)
2579 {
2580 xinp->inp_fport = inp->inp_fport;
2581 xinp->inp_lport = inp->inp_lport;
2582 xinp->inp_gencnt = inp->inp_gencnt;
2583 xinp->inp_flags = inp->inp_flags;
2584 xinp->inp_flow = inp->inp_flow;
2585 xinp->inp_vflag = inp->inp_vflag;
2586 xinp->inp_ip_ttl = inp->inp_ip_ttl;
2587 xinp->inp_ip_p = inp->inp_ip_p;
2588 xinp->inp_dependfaddr.inp6_foreign = inp->inp_dependfaddr.inp6_foreign;
2589 xinp->inp_dependladdr.inp6_local = inp->inp_dependladdr.inp6_local;
2590 xinp->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
2591 xinp->inp_depend6.inp6_hlim = 0;
2592 xinp->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
2593 xinp->inp_depend6.inp6_ifindex = 0;
2594 xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
2595 }
2596 #endif /* !CONFIG_EMBEDDED */
2597
2598 /*
2599 * The following routines implement this scheme:
2600 *
2601 * Callers of ip_output() that intend to cache the route in the inpcb pass
2602 * a local copy of the struct route to ip_output(). Using a local copy of
2603 * the cached route significantly simplifies things as IP no longer has to
2604 * worry about having exclusive access to the passed in struct route, since
2605 * it's defined in the caller's stack; in essence, this allows for a lock-
2606 * less operation when updating the struct route at the IP level and below,
2607 * whenever necessary. The scheme works as follows:
2608 *
2609 * Prior to dropping the socket's lock and calling ip_output(), the caller
2610 * copies the struct route from the inpcb into its stack, and adds a reference
2611 * to the cached route entry, if there was any. The socket's lock is then
2612 * dropped and ip_output() is called with a pointer to the copy of struct
2613 * route defined on the stack (not to the one in the inpcb.)
2614 *
2615 * Upon returning from ip_output(), the caller then acquires the socket's
2616 * lock and synchronizes the cache; if there is no route cached in the inpcb,
2617 * it copies the local copy of struct route (which may or may not contain any
2618 * route) back into the cache; otherwise, if the inpcb has a route cached in
2619 * it, the one in the local copy will be freed, if there's any. Trashing the
2620 * cached route in the inpcb can be avoided because ip_output() is single-
2621 * threaded per-PCB (i.e. multiple transmits on a PCB are always serialized
2622 * by the socket/transport layer.)
2623 */
2624 void
2625 inp_route_copyout(struct inpcb *inp, struct route *dst)
2626 {
2627 struct route *src = &inp->inp_route;
2628
2629 socket_lock_assert_owned(inp->inp_socket);
2630
2631 /*
2632 * If the route in the PCB is stale or not for IPv4, blow it away;
2633 * this is possible in the case of IPv4-mapped address case.
2634 */
2635 if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET)
2636 ROUTE_RELEASE(src);
2637
2638 route_copyout(dst, src, sizeof (*dst));
2639 }
2640
2641 void
2642 inp_route_copyin(struct inpcb *inp, struct route *src)
2643 {
2644 struct route *dst = &inp->inp_route;
2645
2646 socket_lock_assert_owned(inp->inp_socket);
2647
2648 /* Minor sanity check */
2649 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET)
2650 panic("%s: wrong or corrupted route: %p", __func__, src);
2651
2652 route_copyin(src, dst, sizeof (*src));
2653 }
2654
2655 /*
2656 * Handler for setting IP_BOUND_IF/IPV6_BOUND_IF socket option.
2657 */
2658 int
2659 inp_bindif(struct inpcb *inp, unsigned int ifscope, struct ifnet **pifp)
2660 {
2661 struct ifnet *ifp = NULL;
2662
2663 ifnet_head_lock_shared();
2664 if ((ifscope > (unsigned)if_index) || (ifscope != IFSCOPE_NONE &&
2665 (ifp = ifindex2ifnet[ifscope]) == NULL)) {
2666 ifnet_head_done();
2667 return (ENXIO);
2668 }
2669 ifnet_head_done();
2670
2671 VERIFY(ifp != NULL || ifscope == IFSCOPE_NONE);
2672
2673 /*
2674 * A zero interface scope value indicates an "unbind".
2675 * Otherwise, take in whatever value the app desires;
2676 * the app may already know the scope (or force itself
2677 * to such a scope) ahead of time before the interface
2678 * gets attached. It doesn't matter either way; any
2679 * route lookup from this point on will require an
2680 * exact match for the embedded interface scope.
2681 */
2682 inp->inp_boundifp = ifp;
2683 if (inp->inp_boundifp == NULL)
2684 inp->inp_flags &= ~INP_BOUND_IF;
2685 else
2686 inp->inp_flags |= INP_BOUND_IF;
2687
2688 /* Blow away any cached route in the PCB */
2689 ROUTE_RELEASE(&inp->inp_route);
2690
2691 if (pifp != NULL)
2692 *pifp = ifp;
2693
2694 return (0);
2695 }
2696
2697 /*
2698 * Handler for setting IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
2699 * as well as for setting PROC_UUID_NO_CELLULAR policy.
2700 */
2701 void
2702 inp_set_nocellular(struct inpcb *inp)
2703 {
2704 inp->inp_flags |= INP_NO_IFT_CELLULAR;
2705
2706 /* Blow away any cached route in the PCB */
2707 ROUTE_RELEASE(&inp->inp_route);
2708 }
2709
2710 /*
2711 * Handler for clearing IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
2712 * as well as for clearing PROC_UUID_NO_CELLULAR policy.
2713 */
2714 void
2715 inp_clear_nocellular(struct inpcb *inp)
2716 {
2717 struct socket *so = inp->inp_socket;
2718
2719 /*
2720 * SO_RESTRICT_DENY_CELLULAR socket restriction issued on the socket
2721 * has a higher precendence than INP_NO_IFT_CELLULAR. Clear the flag
2722 * if and only if the socket is unrestricted.
2723 */
2724 if (so != NULL && !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
2725 inp->inp_flags &= ~INP_NO_IFT_CELLULAR;
2726
2727 /* Blow away any cached route in the PCB */
2728 ROUTE_RELEASE(&inp->inp_route);
2729 }
2730 }
2731
2732 void
2733 inp_set_noexpensive(struct inpcb *inp)
2734 {
2735 inp->inp_flags2 |= INP2_NO_IFF_EXPENSIVE;
2736
2737 /* Blow away any cached route in the PCB */
2738 ROUTE_RELEASE(&inp->inp_route);
2739 }
2740
2741 void
2742 inp_set_awdl_unrestricted(struct inpcb *inp)
2743 {
2744 inp->inp_flags2 |= INP2_AWDL_UNRESTRICTED;
2745
2746 /* Blow away any cached route in the PCB */
2747 ROUTE_RELEASE(&inp->inp_route);
2748 }
2749
2750 boolean_t
2751 inp_get_awdl_unrestricted(struct inpcb *inp)
2752 {
2753 return (inp->inp_flags2 & INP2_AWDL_UNRESTRICTED) ? TRUE : FALSE;
2754 }
2755
2756 void
2757 inp_clear_awdl_unrestricted(struct inpcb *inp)
2758 {
2759 inp->inp_flags2 &= ~INP2_AWDL_UNRESTRICTED;
2760
2761 /* Blow away any cached route in the PCB */
2762 ROUTE_RELEASE(&inp->inp_route);
2763 }
2764
2765 void
2766 inp_set_intcoproc_allowed(struct inpcb *inp)
2767 {
2768 inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
2769
2770 /* Blow away any cached route in the PCB */
2771 ROUTE_RELEASE(&inp->inp_route);
2772 }
2773
2774 boolean_t
2775 inp_get_intcoproc_allowed(struct inpcb *inp)
2776 {
2777 return (inp->inp_flags2 & INP2_INTCOPROC_ALLOWED) ? TRUE : FALSE;
2778 }
2779
2780 void
2781 inp_clear_intcoproc_allowed(struct inpcb *inp)
2782 {
2783 inp->inp_flags2 &= ~INP2_INTCOPROC_ALLOWED;
2784
2785 /* Blow away any cached route in the PCB */
2786 ROUTE_RELEASE(&inp->inp_route);
2787 }
2788
2789 #if NECP
2790 /*
2791 * Called when PROC_UUID_NECP_APP_POLICY is set.
2792 */
2793 void
2794 inp_set_want_app_policy(struct inpcb *inp)
2795 {
2796 inp->inp_flags2 |= INP2_WANT_APP_POLICY;
2797 }
2798
2799 /*
2800 * Called when PROC_UUID_NECP_APP_POLICY is cleared.
2801 */
2802 void
2803 inp_clear_want_app_policy(struct inpcb *inp)
2804 {
2805 inp->inp_flags2 &= ~INP2_WANT_APP_POLICY;
2806 }
2807 #endif /* NECP */
2808
2809 /*
2810 * Calculate flow hash for an inp, used by an interface to identify a
2811 * flow. When an interface provides flow control advisory, this flow
2812 * hash is used as an identifier.
2813 */
2814 u_int32_t
2815 inp_calc_flowhash(struct inpcb *inp)
2816 {
2817 struct inp_flowhash_key fh __attribute__((aligned(8)));
2818 u_int32_t flowhash = 0;
2819 struct inpcb *tmp_inp = NULL;
2820
2821 if (inp_hash_seed == 0)
2822 inp_hash_seed = RandomULong();
2823
2824 bzero(&fh, sizeof (fh));
2825
2826 bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof (fh.infh_laddr));
2827 bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof (fh.infh_faddr));
2828
2829 fh.infh_lport = inp->inp_lport;
2830 fh.infh_fport = inp->inp_fport;
2831 fh.infh_af = (inp->inp_vflag & INP_IPV6) ? AF_INET6 : AF_INET;
2832 fh.infh_proto = inp->inp_ip_p;
2833 fh.infh_rand1 = RandomULong();
2834 fh.infh_rand2 = RandomULong();
2835
2836 try_again:
2837 flowhash = net_flowhash(&fh, sizeof (fh), inp_hash_seed);
2838 if (flowhash == 0) {
2839 /* try to get a non-zero flowhash */
2840 inp_hash_seed = RandomULong();
2841 goto try_again;
2842 }
2843
2844 inp->inp_flowhash = flowhash;
2845
2846 /* Insert the inp into inp_fc_tree */
2847 lck_mtx_lock_spin(&inp_fc_lck);
2848 tmp_inp = RB_FIND(inp_fc_tree, &inp_fc_tree, inp);
2849 if (tmp_inp != NULL) {
2850 /*
2851 * There is a different inp with the same flowhash.
2852 * There can be a collision on flow hash but the
2853 * probability is low. Let's recompute the
2854 * flowhash.
2855 */
2856 lck_mtx_unlock(&inp_fc_lck);
2857 /* recompute hash seed */
2858 inp_hash_seed = RandomULong();
2859 goto try_again;
2860 }
2861
2862 RB_INSERT(inp_fc_tree, &inp_fc_tree, inp);
2863 inp->inp_flags2 |= INP2_IN_FCTREE;
2864 lck_mtx_unlock(&inp_fc_lck);
2865
2866 return (flowhash);
2867 }
2868
2869 void
2870 inp_flowadv(uint32_t flowhash)
2871 {
2872 struct inpcb *inp;
2873
2874 inp = inp_fc_getinp(flowhash, 0);
2875
2876 if (inp == NULL)
2877 return;
2878 inp_fc_feedback(inp);
2879 }
2880
2881 /*
2882 * Function to compare inp_fc_entries in inp flow control tree
2883 */
2884 static inline int
2885 infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2)
2886 {
2887 return (memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
2888 sizeof(inp1->inp_flowhash)));
2889 }
2890
2891 static struct inpcb *
2892 inp_fc_getinp(u_int32_t flowhash, u_int32_t flags)
2893 {
2894 struct inpcb *inp = NULL;
2895 int locked = (flags & INPFC_SOLOCKED) ? 1 : 0;
2896
2897 lck_mtx_lock_spin(&inp_fc_lck);
2898 key_inp.inp_flowhash = flowhash;
2899 inp = RB_FIND(inp_fc_tree, &inp_fc_tree, &key_inp);
2900 if (inp == NULL) {
2901 /* inp is not present, return */
2902 lck_mtx_unlock(&inp_fc_lck);
2903 return (NULL);
2904 }
2905
2906 if (flags & INPFC_REMOVE) {
2907 RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp);
2908 lck_mtx_unlock(&inp_fc_lck);
2909
2910 bzero(&(inp->infc_link), sizeof (inp->infc_link));
2911 inp->inp_flags2 &= ~INP2_IN_FCTREE;
2912 return (NULL);
2913 }
2914
2915 if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
2916 inp = NULL;
2917 lck_mtx_unlock(&inp_fc_lck);
2918
2919 return (inp);
2920 }
2921
2922 static void
2923 inp_fc_feedback(struct inpcb *inp)
2924 {
2925 struct socket *so = inp->inp_socket;
2926
2927 /* we already hold a want_cnt on this inp, socket can't be null */
2928 VERIFY(so != NULL);
2929 socket_lock(so, 1);
2930
2931 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2932 socket_unlock(so, 1);
2933 return;
2934 }
2935
2936 if (inp->inp_sndinprog_cnt > 0)
2937 inp->inp_flags |= INP_FC_FEEDBACK;
2938
2939 /*
2940 * Return if the connection is not in flow-controlled state.
2941 * This can happen if the connection experienced
2942 * loss while it was in flow controlled state
2943 */
2944 if (!INP_WAIT_FOR_IF_FEEDBACK(inp)) {
2945 socket_unlock(so, 1);
2946 return;
2947 }
2948 inp_reset_fc_state(inp);
2949
2950 if (SOCK_TYPE(so) == SOCK_STREAM)
2951 inp_fc_unthrottle_tcp(inp);
2952
2953 socket_unlock(so, 1);
2954 }
2955
2956 void
2957 inp_reset_fc_state(struct inpcb *inp)
2958 {
2959 struct socket *so = inp->inp_socket;
2960 int suspended = (INP_IS_FLOW_SUSPENDED(inp)) ? 1 : 0;
2961 int needwakeup = (INP_WAIT_FOR_IF_FEEDBACK(inp)) ? 1 : 0;
2962
2963 inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
2964
2965 if (suspended) {
2966 so->so_flags &= ~(SOF_SUSPENDED);
2967 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME));
2968 }
2969
2970 /* Give a write wakeup to unblock the socket */
2971 if (needwakeup)
2972 sowwakeup(so);
2973 }
2974
2975 int
2976 inp_set_fc_state(struct inpcb *inp, int advcode)
2977 {
2978 struct inpcb *tmp_inp = NULL;
2979 /*
2980 * If there was a feedback from the interface when
2981 * send operation was in progress, we should ignore
2982 * this flow advisory to avoid a race between setting
2983 * flow controlled state and receiving feedback from
2984 * the interface
2985 */
2986 if (inp->inp_flags & INP_FC_FEEDBACK)
2987 return (0);
2988
2989 inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
2990 if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash,
2991 INPFC_SOLOCKED)) != NULL) {
2992 if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING)
2993 return (0);
2994 VERIFY(tmp_inp == inp);
2995 switch (advcode) {
2996 case FADV_FLOW_CONTROLLED:
2997 inp->inp_flags |= INP_FLOW_CONTROLLED;
2998 break;
2999 case FADV_SUSPENDED:
3000 inp->inp_flags |= INP_FLOW_SUSPENDED;
3001 soevent(inp->inp_socket,
3002 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND));
3003
3004 /* Record the fact that suspend event was sent */
3005 inp->inp_socket->so_flags |= SOF_SUSPENDED;
3006 break;
3007 }
3008 return (1);
3009 }
3010 return (0);
3011 }
3012
3013 /*
3014 * Handler for SO_FLUSH socket option.
3015 */
3016 int
3017 inp_flush(struct inpcb *inp, int optval)
3018 {
3019 u_int32_t flowhash = inp->inp_flowhash;
3020 struct ifnet *rtifp, *oifp;
3021
3022 /* Either all classes or one of the valid ones */
3023 if (optval != SO_TC_ALL && !SO_VALID_TC(optval))
3024 return (EINVAL);
3025
3026 /* We need a flow hash for identification */
3027 if (flowhash == 0)
3028 return (0);
3029
3030 /* Grab the interfaces from the route and pcb */
3031 rtifp = ((inp->inp_route.ro_rt != NULL) ?
3032 inp->inp_route.ro_rt->rt_ifp : NULL);
3033 oifp = inp->inp_last_outifp;
3034
3035 if (rtifp != NULL)
3036 if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
3037 if (oifp != NULL && oifp != rtifp)
3038 if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
3039
3040 return (0);
3041 }
3042
3043 /*
3044 * Clear the INP_INADDR_ANY flag (special case for PPP only)
3045 */
3046 void
3047 inp_clear_INP_INADDR_ANY(struct socket *so)
3048 {
3049 struct inpcb *inp = NULL;
3050
3051 socket_lock(so, 1);
3052 inp = sotoinpcb(so);
3053 if (inp) {
3054 inp->inp_flags &= ~INP_INADDR_ANY;
3055 }
3056 socket_unlock(so, 1);
3057 }
3058
3059 void
3060 inp_get_soprocinfo(struct inpcb *inp, struct so_procinfo *soprocinfo)
3061 {
3062 struct socket *so = inp->inp_socket;
3063
3064 soprocinfo->spi_pid = so->last_pid;
3065 if (so->last_pid != 0)
3066 uuid_copy(soprocinfo->spi_uuid, so->last_uuid);
3067 /*
3068 * When not delegated, the effective pid is the same as the real pid
3069 */
3070 if (so->so_flags & SOF_DELEGATED) {
3071 soprocinfo->spi_delegated = 1;
3072 soprocinfo->spi_epid = so->e_pid;
3073 uuid_copy(soprocinfo->spi_euuid, so->e_uuid);
3074 } else {
3075 soprocinfo->spi_delegated = 0;
3076 soprocinfo->spi_epid = so->last_pid;
3077 }
3078 }
3079
3080 int
3081 inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash,
3082 struct so_procinfo *soprocinfo)
3083 {
3084 struct inpcb *inp = NULL;
3085 int found = 0;
3086
3087 bzero(soprocinfo, sizeof (struct so_procinfo));
3088
3089 if (!flowhash)
3090 return (-1);
3091
3092 lck_rw_lock_shared(pcbinfo->ipi_lock);
3093 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
3094 if (inp->inp_state != INPCB_STATE_DEAD &&
3095 inp->inp_socket != NULL &&
3096 inp->inp_flowhash == flowhash) {
3097 found = 1;
3098 inp_get_soprocinfo(inp, soprocinfo);
3099 break;
3100 }
3101 }
3102 lck_rw_done(pcbinfo->ipi_lock);
3103
3104 return (found);
3105 }
3106
3107 #if CONFIG_PROC_UUID_POLICY
3108 static void
3109 inp_update_cellular_policy(struct inpcb *inp, boolean_t set)
3110 {
3111 struct socket *so = inp->inp_socket;
3112 int before, after;
3113
3114 VERIFY(so != NULL);
3115 VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3116
3117 before = INP_NO_CELLULAR(inp);
3118 if (set) {
3119 inp_set_nocellular(inp);
3120 } else {
3121 inp_clear_nocellular(inp);
3122 }
3123 after = INP_NO_CELLULAR(inp);
3124 if (net_io_policy_log && (before != after)) {
3125 static const char *ok = "OK";
3126 static const char *nok = "NOACCESS";
3127 uuid_string_t euuid_buf;
3128 pid_t epid;
3129
3130 if (so->so_flags & SOF_DELEGATED) {
3131 uuid_unparse(so->e_uuid, euuid_buf);
3132 epid = so->e_pid;
3133 } else {
3134 uuid_unparse(so->last_uuid, euuid_buf);
3135 epid = so->last_pid;
3136 }
3137
3138 /* allow this socket to generate another notification event */
3139 so->so_ifdenied_notifies = 0;
3140
3141 log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
3142 "euuid %s%s %s->%s\n", __func__,
3143 (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
3144 SOCK_TYPE(so), epid, euuid_buf,
3145 (so->so_flags & SOF_DELEGATED) ?
3146 " [delegated]" : "",
3147 ((before < after) ? ok : nok),
3148 ((before < after) ? nok : ok));
3149 }
3150 }
3151
3152 #if NECP
3153 static void
3154 inp_update_necp_want_app_policy(struct inpcb *inp, boolean_t set)
3155 {
3156 struct socket *so = inp->inp_socket;
3157 int before, after;
3158
3159 VERIFY(so != NULL);
3160 VERIFY(inp->inp_state != INPCB_STATE_DEAD);
3161
3162 before = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
3163 if (set) {
3164 inp_set_want_app_policy(inp);
3165 } else {
3166 inp_clear_want_app_policy(inp);
3167 }
3168 after = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
3169 if (net_io_policy_log && (before != after)) {
3170 static const char *wanted = "WANTED";
3171 static const char *unwanted = "UNWANTED";
3172 uuid_string_t euuid_buf;
3173 pid_t epid;
3174
3175 if (so->so_flags & SOF_DELEGATED) {
3176 uuid_unparse(so->e_uuid, euuid_buf);
3177 epid = so->e_pid;
3178 } else {
3179 uuid_unparse(so->last_uuid, euuid_buf);
3180 epid = so->last_pid;
3181 }
3182
3183 log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
3184 "euuid %s%s %s->%s\n", __func__,
3185 (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
3186 SOCK_TYPE(so), epid, euuid_buf,
3187 (so->so_flags & SOF_DELEGATED) ?
3188 " [delegated]" : "",
3189 ((before < after) ? unwanted : wanted),
3190 ((before < after) ? wanted : unwanted));
3191 }
3192 }
3193 #endif /* NECP */
3194 #endif /* !CONFIG_PROC_UUID_POLICY */
3195
3196 #if NECP
3197 void
3198 inp_update_necp_policy(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int override_bound_interface)
3199 {
3200 necp_socket_find_policy_match(inp, override_local_addr, override_remote_addr, override_bound_interface);
3201 if (necp_socket_should_rescope(inp) &&
3202 inp->inp_lport == 0 &&
3203 inp->inp_laddr.s_addr == INADDR_ANY &&
3204 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
3205 // If we should rescope, and the socket is not yet bound
3206 inp_bindif(inp, necp_socket_get_rescope_if_index(inp), NULL);
3207 }
3208 }
3209 #endif /* NECP */
3210
3211 int
3212 inp_update_policy(struct inpcb *inp)
3213 {
3214 #if CONFIG_PROC_UUID_POLICY
3215 struct socket *so = inp->inp_socket;
3216 uint32_t pflags = 0;
3217 int32_t ogencnt;
3218 int err = 0;
3219
3220 if (!net_io_policy_uuid ||
3221 so == NULL || inp->inp_state == INPCB_STATE_DEAD)
3222 return (0);
3223
3224 /*
3225 * Kernel-created sockets that aren't delegating other sockets
3226 * are currently exempted from UUID policy checks.
3227 */
3228 if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED))
3229 return (0);
3230
3231 ogencnt = so->so_policy_gencnt;
3232 err = proc_uuid_policy_lookup(((so->so_flags & SOF_DELEGATED) ?
3233 so->e_uuid : so->last_uuid), &pflags, &so->so_policy_gencnt);
3234
3235 /*
3236 * Discard cached generation count if the entry is gone (ENOENT),
3237 * so that we go thru the checks below.
3238 */
3239 if (err == ENOENT && ogencnt != 0)
3240 so->so_policy_gencnt = 0;
3241
3242 /*
3243 * If the generation count has changed, inspect the policy flags
3244 * and act accordingly. If a policy flag was previously set and
3245 * the UUID is no longer present in the table (ENOENT), treat it
3246 * as if the flag has been cleared.
3247 */
3248 if ((err == 0 || err == ENOENT) && ogencnt != so->so_policy_gencnt) {
3249 /* update cellular policy for this socket */
3250 if (err == 0 && (pflags & PROC_UUID_NO_CELLULAR)) {
3251 inp_update_cellular_policy(inp, TRUE);
3252 } else if (!(pflags & PROC_UUID_NO_CELLULAR)) {
3253 inp_update_cellular_policy(inp, FALSE);
3254 }
3255 #if NECP
3256 /* update necp want app policy for this socket */
3257 if (err == 0 && (pflags & PROC_UUID_NECP_APP_POLICY)) {
3258 inp_update_necp_want_app_policy(inp, TRUE);
3259 } else if (!(pflags & PROC_UUID_NECP_APP_POLICY)) {
3260 inp_update_necp_want_app_policy(inp, FALSE);
3261 }
3262 #endif /* NECP */
3263 }
3264
3265 return ((err == ENOENT) ? 0 : err);
3266 #else /* !CONFIG_PROC_UUID_POLICY */
3267 #pragma unused(inp)
3268 return (0);
3269 #endif /* !CONFIG_PROC_UUID_POLICY */
3270 }
3271
3272 static unsigned int log_restricted;
3273 SYSCTL_DECL(_net_inet);
3274 SYSCTL_INT(_net_inet, OID_AUTO, log_restricted,
3275 CTLFLAG_RW | CTLFLAG_LOCKED, &log_restricted, 0,
3276 "Log network restrictions");
3277 /*
3278 * Called when we need to enforce policy restrictions in the input path.
3279 *
3280 * Returns TRUE if we're not allowed to receive data, otherwise FALSE.
3281 */
3282 static boolean_t
3283 _inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
3284 {
3285 VERIFY(inp != NULL);
3286
3287 /*
3288 * Inbound restrictions.
3289 */
3290 if (!sorestrictrecv)
3291 return (FALSE);
3292
3293 if (ifp == NULL)
3294 return (FALSE);
3295
3296 if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp))
3297 return (TRUE);
3298
3299 if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp))
3300 return (TRUE);
3301
3302 if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp))
3303 return (TRUE);
3304
3305 if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV))
3306 return (FALSE);
3307
3308 if (inp->inp_flags & INP_RECV_ANYIF)
3309 return (FALSE);
3310
3311 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp)
3312 return (FALSE);
3313
3314 if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp))
3315 return (TRUE);
3316
3317 return (TRUE);
3318 }
3319
3320 boolean_t
3321 inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
3322 {
3323 boolean_t ret;
3324
3325 ret = _inp_restricted_recv(inp, ifp);
3326 if (ret == TRUE && log_restricted) {
3327 printf("pid %d (%s) is unable to receive packets on %s\n",
3328 current_proc()->p_pid, proc_best_name(current_proc()),
3329 ifp->if_xname);
3330 }
3331 return (ret);
3332 }
3333
3334 /*
3335 * Called when we need to enforce policy restrictions in the output path.
3336 *
3337 * Returns TRUE if we're not allowed to send data out, otherwise FALSE.
3338 */
3339 static boolean_t
3340 _inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
3341 {
3342 VERIFY(inp != NULL);
3343
3344 /*
3345 * Outbound restrictions.
3346 */
3347 if (!sorestrictsend)
3348 return (FALSE);
3349
3350 if (ifp == NULL)
3351 return (FALSE);
3352
3353 if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp))
3354 return (TRUE);
3355
3356 if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp))
3357 return (TRUE);
3358
3359 if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp))
3360 return (TRUE);
3361
3362 if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp))
3363 return (TRUE);
3364
3365 return (FALSE);
3366 }
3367
3368 boolean_t
3369 inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
3370 {
3371 boolean_t ret;
3372
3373 ret = _inp_restricted_send(inp, ifp);
3374 if (ret == TRUE && log_restricted) {
3375 printf("pid %d (%s) is unable to transmit packets on %s\n",
3376 current_proc()->p_pid, proc_best_name(current_proc()),
3377 ifp->if_xname);
3378 }
3379 return (ret);
3380 }
3381
3382 inline void
3383 inp_count_sndbytes(struct inpcb *inp, u_int32_t th_ack)
3384 {
3385 struct ifnet *ifp = inp->inp_last_outifp;
3386 struct socket *so = inp->inp_socket;
3387 if (ifp != NULL && !(so->so_flags & SOF_MP_SUBFLOW) &&
3388 (ifp->if_type == IFT_CELLULAR ||
3389 ifp->if_subfamily == IFNET_SUBFAMILY_WIFI)) {
3390 int32_t unsent;
3391
3392 so->so_snd.sb_flags |= SB_SNDBYTE_CNT;
3393
3394 /*
3395 * There can be data outstanding before the connection
3396 * becomes established -- TFO case
3397 */
3398 if (so->so_snd.sb_cc > 0)
3399 inp_incr_sndbytes_total(so, so->so_snd.sb_cc);
3400
3401 unsent = inp_get_sndbytes_allunsent(so, th_ack);
3402 if (unsent > 0)
3403 inp_incr_sndbytes_unsent(so, unsent);
3404 }
3405 }
3406
3407 inline void
3408 inp_incr_sndbytes_total(struct socket *so, int32_t len)
3409 {
3410 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3411 struct ifnet *ifp = inp->inp_last_outifp;
3412
3413 if (ifp != NULL) {
3414 VERIFY(ifp->if_sndbyte_total >= 0);
3415 OSAddAtomic64(len, &ifp->if_sndbyte_total);
3416 }
3417 }
3418
3419 inline void
3420 inp_decr_sndbytes_total(struct socket *so, int32_t len)
3421 {
3422 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3423 struct ifnet *ifp = inp->inp_last_outifp;
3424
3425 if (ifp != NULL) {
3426 VERIFY(ifp->if_sndbyte_total >= len);
3427 OSAddAtomic64(-len, &ifp->if_sndbyte_total);
3428 }
3429 }
3430
3431 inline void
3432 inp_incr_sndbytes_unsent(struct socket *so, int32_t len)
3433 {
3434 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3435 struct ifnet *ifp = inp->inp_last_outifp;
3436
3437 if (ifp != NULL) {
3438 VERIFY(ifp->if_sndbyte_unsent >= 0);
3439 OSAddAtomic64(len, &ifp->if_sndbyte_unsent);
3440 }
3441 }
3442
3443 inline void
3444 inp_decr_sndbytes_unsent(struct socket *so, int32_t len)
3445 {
3446 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3447 struct ifnet *ifp = inp->inp_last_outifp;
3448
3449 if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT))
3450 return;
3451
3452 if (ifp != NULL) {
3453 if (ifp->if_sndbyte_unsent >= len)
3454 OSAddAtomic64(-len, &ifp->if_sndbyte_unsent);
3455 else
3456 ifp->if_sndbyte_unsent = 0;
3457 }
3458 }
3459
3460 inline void
3461 inp_decr_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
3462 {
3463 int32_t len;
3464
3465 if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT))
3466 return;
3467
3468 len = inp_get_sndbytes_allunsent(so, th_ack);
3469 inp_decr_sndbytes_unsent(so, len);
3470 }
3471
3472
3473 inline void
3474 inp_set_activity_bitmap(struct inpcb *inp)
3475 {
3476 in_stat_set_activity_bitmap(&inp->inp_nw_activity, net_uptime());
3477 }
3478
3479 inline void
3480 inp_get_activity_bitmap(struct inpcb *inp, activity_bitmap_t *ab)
3481 {
3482 bcopy(&inp->inp_nw_activity, ab, sizeof (*ab));
3483 }