2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1988, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)rtsock.c 8.5 (Berkeley) 11/2/94
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kauth.h>
66 #include <sys/kernel.h>
67 #include <sys/sysctl.h>
69 #include <sys/malloc.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/domain.h>
74 #include <sys/protosw.h>
75 #include <sys/syslog.h>
76 #include <sys/mcache.h>
77 #include <kern/locks.h>
78 #include <sys/codesign.h>
81 #include <net/route.h>
83 #include <net/raw_cb.h>
84 #include <netinet/in.h>
85 #include <netinet/in_var.h>
86 #include <netinet/in_arp.h>
87 #include <netinet/ip.h>
88 #include <netinet/ip6.h>
89 #include <netinet6/nd6.h>
91 extern struct rtstat rtstat
;
92 extern struct domain routedomain_s
;
93 static struct domain
*routedomain
= NULL
;
95 MALLOC_DEFINE(M_RTABLE
, "routetbl", "routing tables");
97 static struct sockaddr route_dst
= { .sa_len
= 2, .sa_family
= PF_ROUTE
, .sa_data
= { 0, } };
98 static struct sockaddr route_src
= { .sa_len
= 2, .sa_family
= PF_ROUTE
, .sa_data
= { 0, } };
99 static struct sockaddr sa_zero
= { .sa_len
= sizeof(sa_zero
), .sa_family
= AF_INET
, .sa_data
= { 0, } };
102 u_int32_t ip_count
; /* attached w/ AF_INET */
103 u_int32_t ip6_count
; /* attached w/ AF_INET6 */
104 u_int32_t any_count
; /* total attached */
107 static struct route_cb route_cb
;
113 struct sysctl_req
*w_req
;
116 static void route_dinit(struct domain
*);
117 static int rts_abort(struct socket
*);
118 static int rts_attach(struct socket
*, int, struct proc
*);
119 static int rts_bind(struct socket
*, struct sockaddr
*, struct proc
*);
120 static int rts_connect(struct socket
*, struct sockaddr
*, struct proc
*);
121 static int rts_detach(struct socket
*);
122 static int rts_disconnect(struct socket
*);
123 static int rts_peeraddr(struct socket
*, struct sockaddr
**);
124 static int rts_send(struct socket
*, int, struct mbuf
*, struct sockaddr
*,
125 struct mbuf
*, struct proc
*);
126 static int rts_shutdown(struct socket
*);
127 static int rts_sockaddr(struct socket
*, struct sockaddr
**);
129 static int route_output(struct mbuf
*, struct socket
*);
130 static int rt_setmetrics(u_int32_t
, struct rt_metrics
*, struct rtentry
*);
131 static void rt_getmetrics(struct rtentry
*, struct rt_metrics
*);
132 static void rt_setif(struct rtentry
*, struct sockaddr
*, struct sockaddr
*,
133 struct sockaddr
*, unsigned int);
134 static int rt_xaddrs(caddr_t
, caddr_t
, struct rt_addrinfo
*);
135 static struct mbuf
*rt_msg1(u_char
, struct rt_addrinfo
*);
136 static int rt_msg2(u_char
, struct rt_addrinfo
*, caddr_t
, struct walkarg
*,
138 static int sysctl_dumpentry(struct radix_node
*rn
, void *vw
);
139 static int sysctl_dumpentry_ext(struct radix_node
*rn
, void *vw
);
140 static int sysctl_iflist(int af
, struct walkarg
*w
);
141 static int sysctl_iflist2(int af
, struct walkarg
*w
);
142 static int sysctl_rtstat(struct sysctl_req
*);
143 static int sysctl_rttrash(struct sysctl_req
*);
144 static int sysctl_rtsock SYSCTL_HANDLER_ARGS
;
146 SYSCTL_NODE(_net
, PF_ROUTE
, routetable
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
149 SYSCTL_NODE(_net
, OID_AUTO
, route
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "routing");
151 /* Align x to 1024 (only power of 2) assuming x is positive */
152 #define ALIGN_BYTES(x) do { \
153 x = (uint32_t)P2ALIGN(x, 1024); \
156 #define ROUNDUP32(a) \
157 ((a) > 0 ? (1 + (((a) - 1) | (sizeof (uint32_t) - 1))) : \
160 #define ADVANCE32(x, n) \
161 (x += ROUNDUP32((n)->sa_len))
163 #define RT_HAS_IFADDR(rt) \
164 ((rt)->rt_ifa != NULL && (rt)->rt_ifa->ifa_addr != NULL)
167 * It really doesn't make any sense at all for this code to share much
168 * with raw_usrreq.c, since its functionality is so restricted. XXX
171 rts_abort(struct socket
*so
)
173 return raw_usrreqs
.pru_abort(so
);
176 /* pru_accept is EOPNOTSUPP */
179 rts_attach(struct socket
*so
, int proto
, struct proc
*p
)
185 VERIFY(so
->so_pcb
== NULL
);
187 MALLOC(rp
, struct rawcb
*, sizeof(*rp
), M_PCB
, M_WAITOK
| M_ZERO
);
192 so
->so_pcb
= (caddr_t
)rp
;
193 /* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */
194 error
= raw_attach(so
, proto
);
199 so
->so_flags
|= SOF_PCBCLEARING
;
203 switch (rp
->rcb_proto
.sp_protocol
) {
205 atomic_add_32(&route_cb
.ip_count
, 1);
208 atomic_add_32(&route_cb
.ip6_count
, 1);
211 rp
->rcb_faddr
= &route_src
;
212 atomic_add_32(&route_cb
.any_count
, 1);
213 /* the socket is already locked when we enter rts_attach */
215 so
->so_options
|= SO_USELOOPBACK
;
220 rts_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
222 return raw_usrreqs
.pru_bind(so
, nam
, p
); /* xxx just EINVAL */
226 rts_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
228 return raw_usrreqs
.pru_connect(so
, nam
, p
); /* XXX just EINVAL */
231 /* pru_connect2 is EOPNOTSUPP */
232 /* pru_control is EOPNOTSUPP */
235 rts_detach(struct socket
*so
)
237 struct rawcb
*rp
= sotorawcb(so
);
241 switch (rp
->rcb_proto
.sp_protocol
) {
243 atomic_add_32(&route_cb
.ip_count
, -1);
246 atomic_add_32(&route_cb
.ip6_count
, -1);
249 atomic_add_32(&route_cb
.any_count
, -1);
250 return raw_usrreqs
.pru_detach(so
);
254 rts_disconnect(struct socket
*so
)
256 return raw_usrreqs
.pru_disconnect(so
);
259 /* pru_listen is EOPNOTSUPP */
262 rts_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
264 return raw_usrreqs
.pru_peeraddr(so
, nam
);
267 /* pru_rcvd is EOPNOTSUPP */
268 /* pru_rcvoob is EOPNOTSUPP */
271 rts_send(struct socket
*so
, int flags
, struct mbuf
*m
, struct sockaddr
*nam
,
272 struct mbuf
*control
, struct proc
*p
)
274 return raw_usrreqs
.pru_send(so
, flags
, m
, nam
, control
, p
);
277 /* pru_sense is null */
280 rts_shutdown(struct socket
*so
)
282 return raw_usrreqs
.pru_shutdown(so
);
286 rts_sockaddr(struct socket
*so
, struct sockaddr
**nam
)
288 return raw_usrreqs
.pru_sockaddr(so
, nam
);
291 static struct pr_usrreqs route_usrreqs
= {
292 .pru_abort
= rts_abort
,
293 .pru_attach
= rts_attach
,
294 .pru_bind
= rts_bind
,
295 .pru_connect
= rts_connect
,
296 .pru_detach
= rts_detach
,
297 .pru_disconnect
= rts_disconnect
,
298 .pru_peeraddr
= rts_peeraddr
,
299 .pru_send
= rts_send
,
300 .pru_shutdown
= rts_shutdown
,
301 .pru_sockaddr
= rts_sockaddr
,
302 .pru_sosend
= sosend
,
303 .pru_soreceive
= soreceive
,
308 route_output(struct mbuf
*m
, struct socket
*so
)
310 struct rt_msghdr
*rtm
= NULL
;
311 struct rtentry
*rt
= NULL
;
312 struct rtentry
*saved_nrt
= NULL
;
313 struct radix_node_head
*rnh
;
314 struct rt_addrinfo info
;
316 sa_family_t dst_sa_family
= 0;
317 struct ifnet
*ifp
= NULL
;
318 struct sockaddr_in dst_in
, gate_in
;
319 int sendonlytoself
= 0;
320 unsigned int ifscope
= IFSCOPE_NONE
;
321 struct rawcb
*rp
= NULL
;
322 boolean_t is_router
= FALSE
;
323 #define senderr(e) { error = (e); goto flush; }
324 if (m
== NULL
|| ((m
->m_len
< sizeof(intptr_t)) &&
325 (m
= m_pullup(m
, sizeof(intptr_t))) == NULL
)) {
328 VERIFY(m
->m_flags
& M_PKTHDR
);
331 * Unlock the socket (but keep a reference) it won't be
332 * accessed until raw_input appends to it.
334 socket_unlock(so
, 0);
335 lck_mtx_lock(rnh_lock
);
337 len
= m
->m_pkthdr
.len
;
338 if (len
< sizeof(*rtm
) ||
339 len
!= mtod(m
, struct rt_msghdr
*)->rtm_msglen
) {
340 info
.rti_info
[RTAX_DST
] = NULL
;
343 R_Malloc(rtm
, struct rt_msghdr
*, len
);
345 info
.rti_info
[RTAX_DST
] = NULL
;
348 m_copydata(m
, 0, len
, (caddr_t
)rtm
);
349 if (rtm
->rtm_version
!= RTM_VERSION
) {
350 info
.rti_info
[RTAX_DST
] = NULL
;
351 senderr(EPROTONOSUPPORT
);
355 * Silent version of RTM_GET for Reachabiltiy APIs. We may change
356 * all RTM_GETs to be silent in the future, so this is private for now.
358 if (rtm
->rtm_type
== RTM_GET_SILENT
) {
359 if (!(so
->so_options
& SO_USELOOPBACK
)) {
363 rtm
->rtm_type
= RTM_GET
;
367 * Perform permission checking, only privileged sockets
368 * may perform operations other than RTM_GET
370 if (rtm
->rtm_type
!= RTM_GET
&& !(so
->so_state
& SS_PRIV
)) {
371 info
.rti_info
[RTAX_DST
] = NULL
;
375 rtm
->rtm_pid
= proc_selfpid();
376 info
.rti_addrs
= rtm
->rtm_addrs
;
377 if (rt_xaddrs((caddr_t
)(rtm
+ 1), len
+ (caddr_t
)rtm
, &info
)) {
378 info
.rti_info
[RTAX_DST
] = NULL
;
381 if (info
.rti_info
[RTAX_DST
] == NULL
||
382 info
.rti_info
[RTAX_DST
]->sa_family
>= AF_MAX
||
383 (info
.rti_info
[RTAX_GATEWAY
] != NULL
&&
384 info
.rti_info
[RTAX_GATEWAY
]->sa_family
>= AF_MAX
)) {
388 if (info
.rti_info
[RTAX_DST
]->sa_family
== AF_INET
&&
389 info
.rti_info
[RTAX_DST
]->sa_len
!= sizeof(struct sockaddr_in
)) {
390 /* At minimum, we need up to sin_addr */
391 if (info
.rti_info
[RTAX_DST
]->sa_len
<
392 offsetof(struct sockaddr_in
, sin_zero
)) {
395 bzero(&dst_in
, sizeof(dst_in
));
396 dst_in
.sin_len
= sizeof(dst_in
);
397 dst_in
.sin_family
= AF_INET
;
398 dst_in
.sin_port
= SIN(info
.rti_info
[RTAX_DST
])->sin_port
;
399 dst_in
.sin_addr
= SIN(info
.rti_info
[RTAX_DST
])->sin_addr
;
400 info
.rti_info
[RTAX_DST
] = (struct sockaddr
*)&dst_in
;
401 dst_sa_family
= info
.rti_info
[RTAX_DST
]->sa_family
;
402 } else if (info
.rti_info
[RTAX_DST
]->sa_family
== AF_INET6
&&
403 info
.rti_info
[RTAX_DST
]->sa_len
< sizeof(struct sockaddr_in6
)) {
407 if (info
.rti_info
[RTAX_GATEWAY
] != NULL
) {
408 if (info
.rti_info
[RTAX_GATEWAY
]->sa_family
== AF_INET
&&
409 info
.rti_info
[RTAX_GATEWAY
]->sa_len
!= sizeof(struct sockaddr_in
)) {
410 /* At minimum, we need up to sin_addr */
411 if (info
.rti_info
[RTAX_GATEWAY
]->sa_len
<
412 offsetof(struct sockaddr_in
, sin_zero
)) {
415 bzero(&gate_in
, sizeof(gate_in
));
416 gate_in
.sin_len
= sizeof(gate_in
);
417 gate_in
.sin_family
= AF_INET
;
418 gate_in
.sin_port
= SIN(info
.rti_info
[RTAX_GATEWAY
])->sin_port
;
419 gate_in
.sin_addr
= SIN(info
.rti_info
[RTAX_GATEWAY
])->sin_addr
;
420 info
.rti_info
[RTAX_GATEWAY
] = (struct sockaddr
*)&gate_in
;
421 } else if (info
.rti_info
[RTAX_GATEWAY
]->sa_family
== AF_INET6
&&
422 info
.rti_info
[RTAX_GATEWAY
]->sa_len
< sizeof(struct sockaddr_in6
)) {
427 if (info
.rti_info
[RTAX_GENMASK
]) {
428 struct radix_node
*t
;
429 t
= rn_addmask((caddr_t
)info
.rti_info
[RTAX_GENMASK
], 0, 1);
430 if (t
!= NULL
&& Bcmp(info
.rti_info
[RTAX_GENMASK
],
431 t
->rn_key
, *(u_char
*)info
.rti_info
[RTAX_GENMASK
]) == 0) {
432 info
.rti_info
[RTAX_GENMASK
] =
433 (struct sockaddr
*)(t
->rn_key
);
440 * If RTF_IFSCOPE flag is set, then rtm_index specifies the scope.
442 if (rtm
->rtm_flags
& RTF_IFSCOPE
) {
443 if (info
.rti_info
[RTAX_DST
]->sa_family
!= AF_INET
&&
444 info
.rti_info
[RTAX_DST
]->sa_family
!= AF_INET6
) {
447 ifscope
= rtm
->rtm_index
;
450 * Block changes on INTCOPROC interfaces.
453 unsigned int intcoproc_scope
= 0;
454 ifnet_head_lock_shared();
455 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
456 if (IFNET_IS_INTCOPROC(ifp
)) {
457 intcoproc_scope
= ifp
->if_index
;
462 if (intcoproc_scope
== ifscope
&& current_proc()->p_pid
!= 0) {
468 * RTF_PROXY can only be set internally from within the kernel.
470 if (rtm
->rtm_flags
& RTF_PROXY
) {
475 * For AF_INET, always zero out the embedded scope ID. If this is
476 * a scoped request, it must be done explicitly by setting RTF_IFSCOPE
477 * flag and the corresponding rtm_index value. This is to prevent
478 * false interpretation of the scope ID because it's using the sin_zero
479 * field, which might not be properly cleared by the requestor.
481 if (info
.rti_info
[RTAX_DST
]->sa_family
== AF_INET
) {
482 sin_set_ifscope(info
.rti_info
[RTAX_DST
], IFSCOPE_NONE
);
484 if (info
.rti_info
[RTAX_GATEWAY
] != NULL
&&
485 info
.rti_info
[RTAX_GATEWAY
]->sa_family
== AF_INET
) {
486 sin_set_ifscope(info
.rti_info
[RTAX_GATEWAY
], IFSCOPE_NONE
);
488 switch (rtm
->rtm_type
) {
490 if (info
.rti_info
[RTAX_GATEWAY
] == NULL
) {
494 error
= rtrequest_scoped_locked(RTM_ADD
,
495 info
.rti_info
[RTAX_DST
], info
.rti_info
[RTAX_GATEWAY
],
496 info
.rti_info
[RTAX_NETMASK
], rtm
->rtm_flags
, &saved_nrt
,
498 if (error
== 0 && saved_nrt
!= NULL
) {
501 * If the route request specified an interface with
502 * IFA and/or IFP, we set the requested interface on
503 * the route with rt_setif. It would be much better
504 * to do this inside rtrequest, but that would
505 * require passing the desired interface, in some
506 * form, to rtrequest. Since rtrequest is called in
507 * so many places (roughly 40 in our source), adding
508 * a parameter is to much for us to swallow; this is
509 * something for the FreeBSD developers to tackle.
510 * Instead, we let rtrequest compute whatever
511 * interface it wants, then come in behind it and
512 * stick in the interface that we really want. This
513 * works reasonably well except when rtrequest can't
514 * figure out what interface to use (with
515 * ifa_withroute) and returns ENETUNREACH. Ideally
516 * it shouldn't matter if rtrequest can't figure out
517 * the interface if we're going to explicitly set it
518 * ourselves anyway. But practically we can't
519 * recover here because rtrequest will not do any of
520 * the work necessary to add the route if it can't
521 * find an interface. As long as there is a default
522 * route that leads to some interface, rtrequest will
523 * find an interface, so this problem should be
524 * rarely encountered.
528 info
.rti_info
[RTAX_IFP
], info
.rti_info
[RTAX_IFA
],
529 info
.rti_info
[RTAX_GATEWAY
], ifscope
);
530 (void)rt_setmetrics(rtm
->rtm_inits
, &rtm
->rtm_rmx
, saved_nrt
);
531 saved_nrt
->rt_rmx
.rmx_locks
&= ~(rtm
->rtm_inits
);
532 saved_nrt
->rt_rmx
.rmx_locks
|=
533 (rtm
->rtm_inits
& rtm
->rtm_rmx
.rmx_locks
);
534 saved_nrt
->rt_genmask
= info
.rti_info
[RTAX_GENMASK
];
535 RT_REMREF_LOCKED(saved_nrt
);
536 RT_UNLOCK(saved_nrt
);
541 error
= rtrequest_scoped_locked(RTM_DELETE
,
542 info
.rti_info
[RTAX_DST
], info
.rti_info
[RTAX_GATEWAY
],
543 info
.rti_info
[RTAX_NETMASK
], rtm
->rtm_flags
, &saved_nrt
,
555 rnh
= rt_tables
[info
.rti_info
[RTAX_DST
]->sa_family
];
557 senderr(EAFNOSUPPORT
);
560 * Lookup the best match based on the key-mask pair;
561 * callee adds a reference and checks for root node.
563 rt
= rt_lookup(TRUE
, info
.rti_info
[RTAX_DST
],
564 info
.rti_info
[RTAX_NETMASK
], rnh
, ifscope
);
571 * Holding rnh_lock here prevents the possibility of
572 * ifa from changing (e.g. in_ifinit), so it is safe
573 * to access its ifa_addr (down below) without locking.
575 switch (rtm
->rtm_type
) {
581 cred
= kauth_cred_proc_ref(current_proc());
585 RT_LOCK_ASSERT_HELD(rt
);
586 info
.rti_info
[RTAX_DST
] = rt_key(rt
);
587 dst_sa_family
= info
.rti_info
[RTAX_DST
]->sa_family
;
588 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
589 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
590 info
.rti_info
[RTAX_GENMASK
] = rt
->rt_genmask
;
591 if (rtm
->rtm_addrs
& (RTA_IFP
| RTA_IFA
)) {
594 ifnet_lock_shared(ifp
);
595 ifa2
= ifp
->if_lladdr
;
596 info
.rti_info
[RTAX_IFP
] =
599 ifnet_lock_done(ifp
);
600 info
.rti_info
[RTAX_IFA
] =
601 rt
->rt_ifa
->ifa_addr
;
602 rtm
->rtm_index
= ifp
->if_index
;
604 info
.rti_info
[RTAX_IFP
] = NULL
;
605 info
.rti_info
[RTAX_IFA
] = NULL
;
607 } else if ((ifp
= rt
->rt_ifp
) != NULL
) {
608 rtm
->rtm_index
= ifp
->if_index
;
613 len
= rt_msg2(rtm
->rtm_type
, &info
, NULL
, NULL
, credp
);
617 struct rt_msghdr
*out_rtm
;
618 R_Malloc(out_rtm
, struct rt_msghdr
*, len
);
619 if (out_rtm
== NULL
) {
626 Bcopy(rtm
, out_rtm
, sizeof(struct rt_msghdr
));
630 (void) rt_msg2(out_rtm
->rtm_type
, &info
, (caddr_t
)out_rtm
,
637 rtm
->rtm_flags
= rt
->rt_flags
;
638 rt_getmetrics(rt
, &rtm
->rtm_rmx
);
639 rtm
->rtm_addrs
= info
.rti_addrs
;
644 kauth_cred_unref(&cred
);
649 is_router
= (rt
->rt_flags
& RTF_ROUTER
) ? TRUE
: FALSE
;
651 if (info
.rti_info
[RTAX_GATEWAY
] != NULL
&&
652 (error
= rt_setgate(rt
, rt_key(rt
),
653 info
.rti_info
[RTAX_GATEWAY
]))) {
659 * If they tried to change things but didn't specify
660 * the required gateway, then just use the old one.
661 * This can happen if the user tries to change the
662 * flags on the default route without changing the
663 * default gateway. Changing flags still doesn't work.
665 if ((rt
->rt_flags
& RTF_GATEWAY
) &&
666 info
.rti_info
[RTAX_GATEWAY
] == NULL
) {
667 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
671 * On Darwin, we call rt_setif which contains the
672 * equivalent to the code found at this very spot
676 info
.rti_info
[RTAX_IFP
], info
.rti_info
[RTAX_IFA
],
677 info
.rti_info
[RTAX_GATEWAY
], ifscope
);
679 if ((error
= rt_setmetrics(rtm
->rtm_inits
,
680 &rtm
->rtm_rmx
, rt
))) {
685 if (info
.rti_info
[RTAX_GENMASK
]) {
686 rt
->rt_genmask
= info
.rti_info
[RTAX_GENMASK
];
690 * Enqueue work item to invoke callback for this route entry
691 * This may not be needed always, but for now issue it anytime
692 * RTM_CHANGE gets called.
694 route_event_enqueue_nwk_wq_entry(rt
, NULL
, ROUTE_ENTRY_REFRESH
, NULL
, TRUE
);
696 * If the route is for a router, walk the tree to send refresh
697 * event to protocol cloned entries
700 struct route_event rt_ev
;
701 route_event_init(&rt_ev
, rt
, NULL
, ROUTE_ENTRY_REFRESH
);
703 (void) rnh
->rnh_walktree(rnh
, route_event_walktree
, (void *)&rt_ev
);
708 rt
->rt_rmx
.rmx_locks
&= ~(rtm
->rtm_inits
);
709 rt
->rt_rmx
.rmx_locks
|=
710 (rtm
->rtm_inits
& rtm
->rtm_rmx
.rmx_locks
);
721 rtm
->rtm_errno
= error
;
723 rtm
->rtm_flags
|= RTF_DONE
;
727 RT_LOCK_ASSERT_NOTHELD(rt
);
730 lck_mtx_unlock(rnh_lock
);
732 /* relock the socket now */
735 * Check to see if we don't want our own messages.
737 if (!(so
->so_options
& SO_USELOOPBACK
)) {
738 if (route_cb
.any_count
<= 1) {
745 /* There is another listener, so construct message */
749 m_copyback(m
, 0, rtm
->rtm_msglen
, (caddr_t
)rtm
);
750 if (m
->m_pkthdr
.len
< rtm
->rtm_msglen
) {
753 } else if (m
->m_pkthdr
.len
> rtm
->rtm_msglen
) {
754 m_adj(m
, rtm
->rtm_msglen
- m
->m_pkthdr
.len
);
758 if (sendonlytoself
&& m
!= NULL
) {
760 if (sbappendaddr(&so
->so_rcv
, &route_src
, m
,
761 NULL
, &error
) != 0) {
768 struct sockproto route_proto
= { .sp_family
= PF_ROUTE
, .sp_protocol
= 0 };
770 rp
->rcb_proto
.sp_family
= 0; /* Avoid us */
772 if (dst_sa_family
!= 0) {
773 route_proto
.sp_protocol
= dst_sa_family
;
776 socket_unlock(so
, 0);
777 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
781 rp
->rcb_proto
.sp_family
= PF_ROUTE
;
788 rt_setexpire(struct rtentry
*rt
, uint64_t expiry
)
790 /* set both rt_expire and rmx_expire */
791 rt
->rt_expire
= expiry
;
793 rt
->rt_rmx
.rmx_expire
=
794 (int32_t)(expiry
+ rt
->base_calendartime
-
797 rt
->rt_rmx
.rmx_expire
= 0;
802 rt_setmetrics(u_int32_t which
, struct rt_metrics
*in
, struct rtentry
*out
)
804 if (!(which
& RTV_REFRESH_HOST
)) {
805 struct timeval caltime
;
806 getmicrotime(&caltime
);
807 #define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e;
808 metric(RTV_RPIPE
, rmx_recvpipe
);
809 metric(RTV_SPIPE
, rmx_sendpipe
);
810 metric(RTV_SSTHRESH
, rmx_ssthresh
);
811 metric(RTV_RTT
, rmx_rtt
);
812 metric(RTV_RTTVAR
, rmx_rttvar
);
813 metric(RTV_HOPCOUNT
, rmx_hopcount
);
814 metric(RTV_MTU
, rmx_mtu
);
815 metric(RTV_EXPIRE
, rmx_expire
);
817 if (out
->rt_rmx
.rmx_expire
> 0) {
818 /* account for system time change */
819 getmicrotime(&caltime
);
820 out
->base_calendartime
+=
821 NET_CALCULATE_CLOCKSKEW(caltime
,
822 out
->base_calendartime
,
823 net_uptime(), out
->base_uptime
);
825 out
->rt_rmx
.rmx_expire
-
826 out
->base_calendartime
+
829 rt_setexpire(out
, 0);
832 VERIFY(out
->rt_expire
== 0 || out
->rt_rmx
.rmx_expire
!= 0);
833 VERIFY(out
->rt_expire
!= 0 || out
->rt_rmx
.rmx_expire
== 0);
835 /* Only RTV_REFRESH_HOST must be set */
836 if ((which
& ~RTV_REFRESH_HOST
) ||
837 (out
->rt_flags
& RTF_STATIC
) ||
838 !(out
->rt_flags
& RTF_LLINFO
)) {
842 if (out
->rt_llinfo_refresh
== NULL
) {
846 out
->rt_llinfo_refresh(out
);
852 rt_getmetrics(struct rtentry
*in
, struct rt_metrics
*out
)
854 struct timeval caltime
;
856 VERIFY(in
->rt_expire
== 0 || in
->rt_rmx
.rmx_expire
!= 0);
857 VERIFY(in
->rt_expire
!= 0 || in
->rt_rmx
.rmx_expire
== 0);
861 if (in
->rt_expire
!= 0) {
862 /* account for system time change */
863 getmicrotime(&caltime
);
865 in
->base_calendartime
+=
866 NET_CALCULATE_CLOCKSKEW(caltime
,
867 in
->base_calendartime
, net_uptime(), in
->base_uptime
);
869 out
->rmx_expire
= (int32_t)(in
->base_calendartime
+
870 in
->rt_expire
- in
->base_uptime
);
877 * Set route's interface given info.rti_info[RTAX_IFP],
878 * info.rti_info[RTAX_IFA], and gateway.
881 rt_setif(struct rtentry
*rt
, struct sockaddr
*Ifpaddr
, struct sockaddr
*Ifaaddr
,
882 struct sockaddr
*Gate
, unsigned int ifscope
)
884 struct ifaddr
*ifa
= NULL
;
885 struct ifnet
*ifp
= NULL
;
886 void (*ifa_rtrequest
)(int, struct rtentry
*, struct sockaddr
*);
888 LCK_MTX_ASSERT(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
890 RT_LOCK_ASSERT_HELD(rt
);
892 /* Don't update a defunct route */
893 if (rt
->rt_flags
& RTF_CONDEMNED
) {
897 /* Add an extra ref for ourselves */
898 RT_ADDREF_LOCKED(rt
);
900 /* Become a regular mutex, just in case */
904 * New gateway could require new ifaddr, ifp; flags may also
905 * be different; ifp may be specified by ll sockaddr when
906 * protocol address is ambiguous.
908 if (Ifpaddr
&& (ifa
= ifa_ifwithnet_scoped(Ifpaddr
, ifscope
)) &&
909 (ifp
= ifa
->ifa_ifp
) && (Ifaaddr
|| Gate
)) {
911 ifa
= ifaof_ifpforaddr(Ifaaddr
? Ifaaddr
: Gate
, ifp
);
917 if (Ifpaddr
&& (ifp
= if_withname(Ifpaddr
))) {
919 ifa
= ifaof_ifpforaddr(Gate
, ifp
);
921 ifnet_lock_shared(ifp
);
922 ifa
= TAILQ_FIRST(&ifp
->if_addrhead
);
926 ifnet_lock_done(ifp
);
928 } else if (Ifaaddr
&&
929 (ifa
= ifa_ifwithaddr_scoped(Ifaaddr
, ifscope
))) {
931 } else if (Gate
!= NULL
) {
933 * Safe to drop rt_lock and use rt_key, since holding
934 * rnh_lock here prevents another thread from calling
935 * rt_setgate() on this route. We cannot hold the
936 * lock across ifa_ifwithroute since the lookup done
937 * by that routine may point to the same route.
940 if ((ifa
= ifa_ifwithroute_scoped_locked(rt
->rt_flags
,
941 rt_key(rt
), Gate
, ifscope
)) != NULL
) {
945 /* Don't update a defunct route */
946 if (rt
->rt_flags
& RTF_CONDEMNED
) {
950 /* Release extra ref */
951 RT_REMREF_LOCKED(rt
);
957 /* trigger route cache reevaluation */
958 if (rt_key(rt
)->sa_family
== AF_INET
) {
959 routegenid_inet_update();
960 } else if (rt_key(rt
)->sa_family
== AF_INET6
) {
961 routegenid_inet6_update();
965 struct ifaddr
*oifa
= rt
->rt_ifa
;
969 ifa_rtrequest
= oifa
->ifa_rtrequest
;
971 if (ifa_rtrequest
!= NULL
) {
972 ifa_rtrequest(RTM_DELETE
, rt
, Gate
);
977 if (rt
->rt_ifp
!= ifp
) {
979 * Purge any link-layer info caching.
981 if (rt
->rt_llinfo_purge
!= NULL
) {
982 rt
->rt_llinfo_purge(rt
);
986 * Adjust route ref count for the interfaces.
988 if (rt
->rt_if_ref_fn
!= NULL
) {
989 rt
->rt_if_ref_fn(ifp
, 1);
990 rt
->rt_if_ref_fn(rt
->rt_ifp
, -1);
995 * If this is the (non-scoped) default route, record
996 * the interface index used for the primary ifscope.
998 if (rt_primary_default(rt
, rt_key(rt
))) {
999 set_primary_ifscope(rt_key(rt
)->sa_family
,
1000 rt
->rt_ifp
->if_index
);
1003 * If rmx_mtu is not locked, update it
1004 * to the MTU used by the new interface.
1006 if (!(rt
->rt_rmx
.rmx_locks
& RTV_MTU
)) {
1007 rt
->rt_rmx
.rmx_mtu
= rt
->rt_ifp
->if_mtu
;
1008 if (rt_key(rt
)->sa_family
== AF_INET
&&
1009 INTF_ADJUST_MTU_FOR_CLAT46(ifp
)) {
1010 rt
->rt_rmx
.rmx_mtu
= IN6_LINKMTU(rt
->rt_ifp
);
1011 /* Further adjust the size for CLAT46 expansion */
1012 rt
->rt_rmx
.rmx_mtu
-= CLAT46_HDR_EXPANSION_OVERHD
;
1016 if (rt
->rt_ifa
!= NULL
) {
1017 IFA_LOCK_SPIN(rt
->rt_ifa
);
1018 ifa_rtrequest
= rt
->rt_ifa
->ifa_rtrequest
;
1019 IFA_UNLOCK(rt
->rt_ifa
);
1020 if (ifa_rtrequest
!= NULL
) {
1021 ifa_rtrequest(RTM_ADD
, rt
, Gate
);
1025 /* Release extra ref */
1026 RT_REMREF_LOCKED(rt
);
1033 /* XXX: to reset gateway to correct value, at RTM_CHANGE */
1034 if (rt
->rt_ifa
!= NULL
) {
1035 IFA_LOCK_SPIN(rt
->rt_ifa
);
1036 ifa_rtrequest
= rt
->rt_ifa
->ifa_rtrequest
;
1037 IFA_UNLOCK(rt
->rt_ifa
);
1038 if (ifa_rtrequest
!= NULL
) {
1039 ifa_rtrequest(RTM_ADD
, rt
, Gate
);
1044 * Workaround for local address routes pointing to the loopback
1045 * interface added by configd, until <rdar://problem/12970142>.
1047 if ((rt
->rt_ifp
->if_flags
& IFF_LOOPBACK
) &&
1048 (rt
->rt_flags
& RTF_HOST
) && rt
->rt_ifa
->ifa_ifp
== rt
->rt_ifp
) {
1049 ifa
= ifa_ifwithaddr(rt_key(rt
));
1051 if (ifa
!= rt
->rt_ifa
) {
1058 /* Release extra ref */
1059 RT_REMREF_LOCKED(rt
);
1063 * Extract the addresses of the passed sockaddrs.
1064 * Do a little sanity checking so as to avoid bad memory references.
1065 * This data is derived straight from userland.
1068 rt_xaddrs(caddr_t cp
, caddr_t cplim
, struct rt_addrinfo
*rtinfo
)
1070 struct sockaddr
*sa
;
1073 bzero(rtinfo
->rti_info
, sizeof(rtinfo
->rti_info
));
1074 for (i
= 0; (i
< RTAX_MAX
) && (cp
< cplim
); i
++) {
1075 if ((rtinfo
->rti_addrs
& (1 << i
)) == 0) {
1078 sa
= (struct sockaddr
*)cp
;
1082 if ((cp
+ sa
->sa_len
) > cplim
) {
1085 if (sa
->sa_len
> sizeof(struct sockaddr_storage
)) {
1089 * there are no more.. quit now
1090 * If there are more bits, they are in error.
1091 * I've seen this. route(1) can evidently generate these.
1092 * This causes kernel to core dump.
1093 * for compatibility, If we see this, point to a safe address.
1095 if (sa
->sa_len
== 0) {
1096 rtinfo
->rti_info
[i
] = &sa_zero
;
1097 return 0; /* should be EINVAL but for compat */
1099 if (sa
->sa_len
< offsetof(struct sockaddr
, sa_data
)) {
1103 rtinfo
->rti_info
[i
] = sa
;
1109 static struct mbuf
*
1110 rt_msg1(u_char type
, struct rt_addrinfo
*rtinfo
)
1112 struct rt_msghdr
*rtm
;
1120 len
= sizeof(struct ifa_msghdr
);
1125 len
= sizeof(struct ifma_msghdr
);
1129 len
= sizeof(struct if_msghdr
);
1133 len
= sizeof(struct rt_msghdr
);
1135 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
1136 if (m
&& len
> MHLEN
) {
1137 MCLGET(m
, M_DONTWAIT
);
1138 if (!(m
->m_flags
& M_EXT
)) {
1146 m
->m_pkthdr
.len
= m
->m_len
= len
;
1147 m
->m_pkthdr
.rcvif
= NULL
;
1148 rtm
= mtod(m
, struct rt_msghdr
*);
1149 bzero((caddr_t
)rtm
, len
);
1151 for (i
= 0; i
< RTAX_MAX
; i
++) {
1152 struct sockaddr
*sa
, *hint
;
1153 uint8_t ssbuf
[SOCK_MAXADDRLEN
+ 1];
1156 * Make sure to accomodate the largest possible size of sa_len.
1158 _CASSERT(sizeof(ssbuf
) == (SOCK_MAXADDRLEN
+ 1));
1160 if ((sa
= rtinfo
->rti_info
[i
]) == NULL
) {
1167 if ((hint
= rtinfo
->rti_info
[RTAX_DST
]) == NULL
) {
1168 hint
= rtinfo
->rti_info
[RTAX_IFA
];
1171 /* Scrub away any trace of embedded interface scope */
1172 sa
= rtm_scrub(type
, i
, hint
, sa
, &ssbuf
,
1173 sizeof(ssbuf
), NULL
);
1180 rtinfo
->rti_addrs
|= (1 << i
);
1182 m_copyback(m
, off
, dlen
, (caddr_t
)sa
);
1184 off
+= ROUNDUP32(dlen
);
1186 if (m
->m_pkthdr
.len
!= len
) {
1190 rtm
->rtm_msglen
= (u_short
)len
;
1191 rtm
->rtm_version
= RTM_VERSION
;
1192 rtm
->rtm_type
= type
;
1197 rt_msg2(u_char type
, struct rt_addrinfo
*rtinfo
, caddr_t cp
, struct walkarg
*w
,
1198 kauth_cred_t
* credp
)
1201 int len
, dlen
, rlen
, second_time
= 0;
1204 rtinfo
->rti_addrs
= 0;
1209 len
= sizeof(struct ifa_msghdr
);
1214 len
= sizeof(struct ifma_msghdr
);
1218 len
= sizeof(struct if_msghdr
);
1222 len
= sizeof(struct if_msghdr2
);
1226 len
= sizeof(struct ifma_msghdr2
);
1230 len
= sizeof(struct rt_msghdr_ext
);
1234 len
= sizeof(struct rt_msghdr2
);
1238 len
= sizeof(struct rt_msghdr
);
1244 for (i
= 0; i
< RTAX_MAX
; i
++) {
1245 struct sockaddr
*sa
, *hint
;
1246 uint8_t ssbuf
[SOCK_MAXADDRLEN
+ 1];
1249 * Make sure to accomodate the largest possible size of sa_len.
1251 _CASSERT(sizeof(ssbuf
) == (SOCK_MAXADDRLEN
+ 1));
1253 if ((sa
= rtinfo
->rti_info
[i
]) == NULL
) {
1260 if ((hint
= rtinfo
->rti_info
[RTAX_DST
]) == NULL
) {
1261 hint
= rtinfo
->rti_info
[RTAX_IFA
];
1264 /* Scrub away any trace of embedded interface scope */
1265 sa
= rtm_scrub(type
, i
, hint
, sa
, &ssbuf
,
1266 sizeof(ssbuf
), NULL
);
1270 sa
= rtm_scrub(type
, i
, NULL
, sa
, &ssbuf
,
1271 sizeof(ssbuf
), credp
);
1278 rtinfo
->rti_addrs
|= (1 << i
);
1280 rlen
= ROUNDUP32(dlen
);
1282 bcopy((caddr_t
)sa
, cp
, (size_t)dlen
);
1284 bzero(cp
+ dlen
, rlen
- dlen
);
1290 if (cp
== NULL
&& w
!= NULL
&& !second_time
) {
1291 struct walkarg
*rw
= w
;
1293 if (rw
->w_req
!= NULL
) {
1294 if (rw
->w_tmemsize
< len
) {
1295 if (rw
->w_tmem
!= NULL
) {
1296 FREE(rw
->w_tmem
, M_RTABLE
);
1298 rw
->w_tmem
= _MALLOC(len
, M_RTABLE
, M_ZERO
| M_WAITOK
);
1299 if (rw
->w_tmem
!= NULL
) {
1300 rw
->w_tmemsize
= len
;
1303 if (rw
->w_tmem
!= NULL
) {
1311 struct rt_msghdr
*rtm
= (struct rt_msghdr
*)(void *)cp0
;
1313 rtm
->rtm_version
= RTM_VERSION
;
1314 rtm
->rtm_type
= type
;
1315 rtm
->rtm_msglen
= (u_short
)len
;
1321 * This routine is called to generate a message from the routing
1322 * socket indicating that a redirect has occurred, a routing lookup
1323 * has failed, or that a protocol has detected timeouts to a particular
1327 rt_missmsg(u_char type
, struct rt_addrinfo
*rtinfo
, int flags
, int error
)
1329 struct rt_msghdr
*rtm
;
1331 struct sockaddr
*sa
= rtinfo
->rti_info
[RTAX_DST
];
1332 struct sockproto route_proto
= { .sp_family
= PF_ROUTE
, .sp_protocol
= 0 };
1334 if (route_cb
.any_count
== 0) {
1337 m
= rt_msg1(type
, rtinfo
);
1341 rtm
= mtod(m
, struct rt_msghdr
*);
1342 rtm
->rtm_flags
= RTF_DONE
| flags
;
1343 rtm
->rtm_errno
= error
;
1344 rtm
->rtm_addrs
= rtinfo
->rti_addrs
;
1345 route_proto
.sp_family
= sa
? sa
->sa_family
: 0;
1346 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
1350 * This routine is called to generate a message from the routing
1351 * socket indicating that the status of a network interface has changed.
1354 rt_ifmsg(struct ifnet
*ifp
)
1356 struct if_msghdr
*ifm
;
1358 struct rt_addrinfo info
;
1359 struct sockproto route_proto
= { .sp_family
= PF_ROUTE
, .sp_protocol
= 0 };
1361 if (route_cb
.any_count
== 0) {
1364 bzero((caddr_t
)&info
, sizeof(info
));
1365 m
= rt_msg1(RTM_IFINFO
, &info
);
1369 ifm
= mtod(m
, struct if_msghdr
*);
1370 ifm
->ifm_index
= ifp
->if_index
;
1371 ifm
->ifm_flags
= (u_short
)ifp
->if_flags
;
1372 if_data_internal_to_if_data(ifp
, &ifp
->if_data
, &ifm
->ifm_data
);
1374 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
1378 * This is called to generate messages from the routing socket
1379 * indicating a network interface has had addresses associated with it.
1380 * if we ever reverse the logic and replace messages TO the routing
1381 * socket indicate a request to configure interfaces, then it will
1382 * be unnecessary as the routing socket will automatically generate
1385 * Since this is coming from the interface, it is expected that the
1386 * interface will be locked. Caller must hold rnh_lock and rt_lock.
1389 rt_newaddrmsg(u_char cmd
, struct ifaddr
*ifa
, int error
, struct rtentry
*rt
)
1391 struct rt_addrinfo info
;
1392 struct sockaddr
*sa
= 0;
1395 struct ifnet
*ifp
= ifa
->ifa_ifp
;
1396 struct sockproto route_proto
= { .sp_family
= PF_ROUTE
, .sp_protocol
= 0 };
1398 LCK_MTX_ASSERT(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
1399 RT_LOCK_ASSERT_HELD(rt
);
1401 if (route_cb
.any_count
== 0) {
1405 /* Become a regular mutex, just in case */
1406 RT_CONVERT_LOCK(rt
);
1407 for (pass
= 1; pass
< 3; pass
++) {
1408 bzero((caddr_t
)&info
, sizeof(info
));
1409 if ((cmd
== RTM_ADD
&& pass
== 1) ||
1410 (cmd
== RTM_DELETE
&& pass
== 2)) {
1411 struct ifa_msghdr
*ifam
;
1412 u_char ncmd
= cmd
== RTM_ADD
? RTM_NEWADDR
: RTM_DELADDR
;
1414 /* Lock ifp for if_lladdr */
1415 ifnet_lock_shared(ifp
);
1417 info
.rti_info
[RTAX_IFA
] = sa
= ifa
->ifa_addr
;
1419 * Holding ifnet lock here prevents the link address
1420 * from changing contents, so no need to hold its
1421 * lock. The link address is always present; it's
1424 info
.rti_info
[RTAX_IFP
] = ifp
->if_lladdr
->ifa_addr
;
1425 info
.rti_info
[RTAX_NETMASK
] = ifa
->ifa_netmask
;
1426 info
.rti_info
[RTAX_BRD
] = ifa
->ifa_dstaddr
;
1427 if ((m
= rt_msg1(ncmd
, &info
)) == NULL
) {
1429 ifnet_lock_done(ifp
);
1433 ifnet_lock_done(ifp
);
1434 ifam
= mtod(m
, struct ifa_msghdr
*);
1435 ifam
->ifam_index
= ifp
->if_index
;
1437 ifam
->ifam_metric
= ifa
->ifa_metric
;
1438 ifam
->ifam_flags
= ifa
->ifa_flags
;
1440 ifam
->ifam_addrs
= info
.rti_addrs
;
1442 if ((cmd
== RTM_ADD
&& pass
== 2) ||
1443 (cmd
== RTM_DELETE
&& pass
== 1)) {
1444 struct rt_msghdr
*rtm
;
1449 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
1450 info
.rti_info
[RTAX_DST
] = sa
= rt_key(rt
);
1451 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
1452 if ((m
= rt_msg1(cmd
, &info
)) == NULL
) {
1455 rtm
= mtod(m
, struct rt_msghdr
*);
1456 rtm
->rtm_index
= ifp
->if_index
;
1457 rtm
->rtm_flags
|= rt
->rt_flags
;
1458 rtm
->rtm_errno
= error
;
1459 rtm
->rtm_addrs
= info
.rti_addrs
;
1461 route_proto
.sp_protocol
= sa
? sa
->sa_family
: 0;
1462 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
1467 * This is the analogue to the rt_newaddrmsg which performs the same
1468 * function but for multicast group memberhips. This is easier since
1469 * there is no route state to worry about.
1472 rt_newmaddrmsg(u_char cmd
, struct ifmultiaddr
*ifma
)
1474 struct rt_addrinfo info
;
1476 struct ifnet
*ifp
= ifma
->ifma_ifp
;
1477 struct ifma_msghdr
*ifmam
;
1478 struct sockproto route_proto
= { .sp_family
= PF_ROUTE
, .sp_protocol
= 0 };
1480 if (route_cb
.any_count
== 0) {
1484 /* Lock ifp for if_lladdr */
1485 ifnet_lock_shared(ifp
);
1486 bzero((caddr_t
)&info
, sizeof(info
));
1488 info
.rti_info
[RTAX_IFA
] = ifma
->ifma_addr
;
1489 /* lladdr doesn't need lock */
1490 info
.rti_info
[RTAX_IFP
] = ifp
->if_lladdr
->ifa_addr
;
1493 * If a link-layer address is present, present it as a ``gateway''
1494 * (similarly to how ARP entries, e.g., are presented).
1496 info
.rti_info
[RTAX_GATEWAY
] = (ifma
->ifma_ll
!= NULL
) ?
1497 ifma
->ifma_ll
->ifma_addr
: NULL
;
1498 if ((m
= rt_msg1(cmd
, &info
)) == NULL
) {
1500 ifnet_lock_done(ifp
);
1503 ifmam
= mtod(m
, struct ifma_msghdr
*);
1504 ifmam
->ifmam_index
= ifp
->if_index
;
1505 ifmam
->ifmam_addrs
= info
.rti_addrs
;
1506 route_proto
.sp_protocol
= ifma
->ifma_addr
->sa_family
;
1508 ifnet_lock_done(ifp
);
1509 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
1515 const char *c
= "RTM_?";
1566 case RTM_GET_SILENT
:
1567 c
= "RTM_GET_SILENT";
1573 c
= "RTM_NEWMADDR2";
1587 * This is used in dumping the kernel table via sysctl().
1590 sysctl_dumpentry(struct radix_node
*rn
, void *vw
)
1592 struct walkarg
*w
= vw
;
1593 struct rtentry
*rt
= (struct rtentry
*)rn
;
1594 int error
= 0, size
;
1595 struct rt_addrinfo info
;
1597 kauth_cred_t
*credp
;
1599 cred
= kauth_cred_proc_ref(current_proc());
1603 if ((w
->w_op
== NET_RT_FLAGS
|| w
->w_op
== NET_RT_FLAGS_PRIV
) &&
1604 !(rt
->rt_flags
& w
->w_arg
)) {
1609 * If the matching route has RTF_LLINFO set, then we can skip scrubbing the MAC
1610 * only if the outgoing interface is not loopback and the process has entitlement
1611 * for neighbor cache read.
1613 if (w
->w_op
== NET_RT_FLAGS_PRIV
&& (rt
->rt_flags
& RTF_LLINFO
)) {
1614 if (rt
->rt_ifp
!= lo_ifp
&&
1615 (route_op_entitlement_check(NULL
, cred
, ROUTE_OP_READ
, TRUE
) == 0)) {
1620 bzero((caddr_t
)&info
, sizeof(info
));
1621 info
.rti_info
[RTAX_DST
] = rt_key(rt
);
1622 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
1623 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
1624 info
.rti_info
[RTAX_GENMASK
] = rt
->rt_genmask
;
1625 if (RT_HAS_IFADDR(rt
)) {
1626 info
.rti_info
[RTAX_IFA
] = rt
->rt_ifa
->ifa_addr
;
1629 if (w
->w_op
!= NET_RT_DUMP2
) {
1630 size
= rt_msg2(RTM_GET
, &info
, NULL
, w
, credp
);
1631 if (w
->w_req
!= NULL
&& w
->w_tmem
!= NULL
) {
1632 struct rt_msghdr
*rtm
=
1633 (struct rt_msghdr
*)(void *)w
->w_tmem
;
1635 rtm
->rtm_flags
= rt
->rt_flags
;
1636 rtm
->rtm_use
= rt
->rt_use
;
1637 rt_getmetrics(rt
, &rtm
->rtm_rmx
);
1638 rtm
->rtm_index
= rt
->rt_ifp
->if_index
;
1642 rtm
->rtm_addrs
= info
.rti_addrs
;
1643 error
= SYSCTL_OUT(w
->w_req
, (caddr_t
)rtm
, size
);
1646 size
= rt_msg2(RTM_GET2
, &info
, NULL
, w
, credp
);
1647 if (w
->w_req
!= NULL
&& w
->w_tmem
!= NULL
) {
1648 struct rt_msghdr2
*rtm
=
1649 (struct rt_msghdr2
*)(void *)w
->w_tmem
;
1651 rtm
->rtm_flags
= rt
->rt_flags
;
1652 rtm
->rtm_use
= rt
->rt_use
;
1653 rt_getmetrics(rt
, &rtm
->rtm_rmx
);
1654 rtm
->rtm_index
= rt
->rt_ifp
->if_index
;
1655 rtm
->rtm_refcnt
= rt
->rt_refcnt
;
1656 if (rt
->rt_parent
) {
1657 rtm
->rtm_parentflags
= rt
->rt_parent
->rt_flags
;
1659 rtm
->rtm_parentflags
= 0;
1661 rtm
->rtm_reserved
= 0;
1662 rtm
->rtm_addrs
= info
.rti_addrs
;
1663 error
= SYSCTL_OUT(w
->w_req
, (caddr_t
)rtm
, size
);
1669 kauth_cred_unref(&cred
);
1674 * This is used for dumping extended information from route entries.
1677 sysctl_dumpentry_ext(struct radix_node
*rn
, void *vw
)
1679 struct walkarg
*w
= vw
;
1680 struct rtentry
*rt
= (struct rtentry
*)rn
;
1681 int error
= 0, size
;
1682 struct rt_addrinfo info
;
1685 cred
= kauth_cred_proc_ref(current_proc());
1688 if (w
->w_op
== NET_RT_DUMPX_FLAGS
&& !(rt
->rt_flags
& w
->w_arg
)) {
1691 bzero(&info
, sizeof(info
));
1692 info
.rti_info
[RTAX_DST
] = rt_key(rt
);
1693 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
1694 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
1695 info
.rti_info
[RTAX_GENMASK
] = rt
->rt_genmask
;
1697 size
= rt_msg2(RTM_GET_EXT
, &info
, NULL
, w
, &cred
);
1698 if (w
->w_req
!= NULL
&& w
->w_tmem
!= NULL
) {
1699 struct rt_msghdr_ext
*ertm
=
1700 (struct rt_msghdr_ext
*)(void *)w
->w_tmem
;
1702 ertm
->rtm_flags
= rt
->rt_flags
;
1703 ertm
->rtm_use
= rt
->rt_use
;
1704 rt_getmetrics(rt
, &ertm
->rtm_rmx
);
1705 ertm
->rtm_index
= rt
->rt_ifp
->if_index
;
1708 ertm
->rtm_errno
= 0;
1709 ertm
->rtm_addrs
= info
.rti_addrs
;
1710 if (rt
->rt_llinfo_get_ri
== NULL
) {
1711 bzero(&ertm
->rtm_ri
, sizeof(ertm
->rtm_ri
));
1712 ertm
->rtm_ri
.ri_rssi
= IFNET_RSSI_UNKNOWN
;
1713 ertm
->rtm_ri
.ri_lqm
= IFNET_LQM_THRESH_OFF
;
1714 ertm
->rtm_ri
.ri_npm
= IFNET_NPM_THRESH_UNKNOWN
;
1716 rt
->rt_llinfo_get_ri(rt
, &ertm
->rtm_ri
);
1718 error
= SYSCTL_OUT(w
->w_req
, (caddr_t
)ertm
, size
);
1723 kauth_cred_unref(&cred
);
1729 * To avoid to call copyout() while holding locks and to cause problems
1730 * in the paging path, sysctl_iflist() and sysctl_iflist2() contstruct
1731 * the list in two passes. In the first pass we compute the total
1732 * length of the data we are going to copyout, then we release
1733 * all locks to allocate a temporary buffer that gets filled
1734 * in the second pass.
1736 * Note that we are verifying the assumption that _MALLOC returns a buffer
1737 * that is at least 32 bits aligned and that the messages and addresses are
1741 sysctl_iflist(int af
, struct walkarg
*w
)
1745 struct rt_addrinfo info
;
1746 int len
= 0, error
= 0;
1748 int total_len
= 0, current_len
= 0;
1749 char *total_buffer
= NULL
, *cp
= NULL
;
1752 cred
= kauth_cred_proc_ref(current_proc());
1754 bzero((caddr_t
)&info
, sizeof(info
));
1756 for (pass
= 0; pass
< 2; pass
++) {
1757 ifnet_head_lock_shared();
1759 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
1763 if (w
->w_arg
&& w
->w_arg
!= ifp
->if_index
) {
1766 ifnet_lock_shared(ifp
);
1768 * Holding ifnet lock here prevents the link address
1769 * from changing contents, so no need to hold the ifa
1770 * lock. The link address is always present; it's
1773 ifa
= ifp
->if_lladdr
;
1774 info
.rti_info
[RTAX_IFP
] = ifa
->ifa_addr
;
1775 len
= rt_msg2(RTM_IFINFO
, &info
, NULL
, NULL
, &cred
);
1779 struct if_msghdr
*ifm
;
1781 if (current_len
+ len
> total_len
) {
1782 ifnet_lock_done(ifp
);
1786 info
.rti_info
[RTAX_IFP
] = ifa
->ifa_addr
;
1787 len
= rt_msg2(RTM_IFINFO
, &info
,
1788 (caddr_t
)cp
, NULL
, &cred
);
1789 info
.rti_info
[RTAX_IFP
] = NULL
;
1791 ifm
= (struct if_msghdr
*)(void *)cp
;
1792 ifm
->ifm_index
= ifp
->if_index
;
1793 ifm
->ifm_flags
= (u_short
)ifp
->if_flags
;
1794 if_data_internal_to_if_data(ifp
, &ifp
->if_data
,
1796 ifm
->ifm_addrs
= info
.rti_addrs
;
1798 * <rdar://problem/32940901>
1799 * Round bytes only for non-platform
1801 if (!csproc_get_platform_binary(w
->w_req
->p
)) {
1802 ALIGN_BYTES(ifm
->ifm_data
.ifi_ibytes
);
1803 ALIGN_BYTES(ifm
->ifm_data
.ifi_obytes
);
1807 VERIFY(IS_P2ALIGNED(cp
, sizeof(u_int32_t
)));
1810 while ((ifa
= ifa
->ifa_link
.tqe_next
) != NULL
) {
1812 if (af
&& af
!= ifa
->ifa_addr
->sa_family
) {
1816 if (ifa
->ifa_addr
->sa_family
== AF_INET6
&&
1817 (((struct in6_ifaddr
*)ifa
)->ia6_flags
&
1818 IN6_IFF_CLAT46
) != 0) {
1822 info
.rti_info
[RTAX_IFA
] = ifa
->ifa_addr
;
1823 info
.rti_info
[RTAX_NETMASK
] = ifa
->ifa_netmask
;
1824 info
.rti_info
[RTAX_BRD
] = ifa
->ifa_dstaddr
;
1825 len
= rt_msg2(RTM_NEWADDR
, &info
, NULL
, NULL
,
1830 struct ifa_msghdr
*ifam
;
1832 if (current_len
+ len
> total_len
) {
1837 len
= rt_msg2(RTM_NEWADDR
, &info
,
1838 (caddr_t
)cp
, NULL
, &cred
);
1840 ifam
= (struct ifa_msghdr
*)(void *)cp
;
1842 ifa
->ifa_ifp
->if_index
;
1843 ifam
->ifam_flags
= ifa
->ifa_flags
;
1844 ifam
->ifam_metric
= ifa
->ifa_metric
;
1845 ifam
->ifam_addrs
= info
.rti_addrs
;
1848 VERIFY(IS_P2ALIGNED(cp
,
1849 sizeof(u_int32_t
)));
1854 ifnet_lock_done(ifp
);
1855 info
.rti_info
[RTAX_IFA
] = info
.rti_info
[RTAX_NETMASK
] =
1856 info
.rti_info
[RTAX_BRD
] = NULL
;
1862 if (error
== ENOBUFS
) {
1863 printf("%s: current_len (%d) + len (%d) > "
1864 "total_len (%d)\n", __func__
, current_len
,
1871 /* Better to return zero length buffer than ENOBUFS */
1872 if (total_len
== 0) {
1875 total_len
+= total_len
>> 3;
1876 total_buffer
= _MALLOC(total_len
, M_RTABLE
,
1878 if (total_buffer
== NULL
) {
1879 printf("%s: _MALLOC(%d) failed\n", __func__
,
1885 VERIFY(IS_P2ALIGNED(cp
, sizeof(u_int32_t
)));
1887 error
= SYSCTL_OUT(w
->w_req
, total_buffer
, current_len
);
1894 if (total_buffer
!= NULL
) {
1895 _FREE(total_buffer
, M_RTABLE
);
1898 kauth_cred_unref(&cred
);
1903 sysctl_iflist2(int af
, struct walkarg
*w
)
1907 struct rt_addrinfo info
;
1908 int len
= 0, error
= 0;
1910 int total_len
= 0, current_len
= 0;
1911 char *total_buffer
= NULL
, *cp
= NULL
;
1914 cred
= kauth_cred_proc_ref(current_proc());
1916 bzero((caddr_t
)&info
, sizeof(info
));
1918 for (pass
= 0; pass
< 2; pass
++) {
1919 struct ifmultiaddr
*ifma
;
1921 ifnet_head_lock_shared();
1923 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
1927 if (w
->w_arg
&& w
->w_arg
!= ifp
->if_index
) {
1930 ifnet_lock_shared(ifp
);
1932 * Holding ifnet lock here prevents the link address
1933 * from changing contents, so no need to hold the ifa
1934 * lock. The link address is always present; it's
1937 ifa
= ifp
->if_lladdr
;
1938 info
.rti_info
[RTAX_IFP
] = ifa
->ifa_addr
;
1939 len
= rt_msg2(RTM_IFINFO2
, &info
, NULL
, NULL
, &cred
);
1943 struct if_msghdr2
*ifm
;
1945 if (current_len
+ len
> total_len
) {
1946 ifnet_lock_done(ifp
);
1950 info
.rti_info
[RTAX_IFP
] = ifa
->ifa_addr
;
1951 len
= rt_msg2(RTM_IFINFO2
, &info
,
1952 (caddr_t
)cp
, NULL
, &cred
);
1953 info
.rti_info
[RTAX_IFP
] = NULL
;
1955 ifm
= (struct if_msghdr2
*)(void *)cp
;
1956 ifm
->ifm_addrs
= info
.rti_addrs
;
1957 ifm
->ifm_flags
= (u_short
)ifp
->if_flags
;
1958 ifm
->ifm_index
= ifp
->if_index
;
1959 ifm
->ifm_snd_len
= IFCQ_LEN(&ifp
->if_snd
);
1960 ifm
->ifm_snd_maxlen
= IFCQ_MAXLEN(&ifp
->if_snd
);
1961 ifm
->ifm_snd_drops
=
1962 (int)ifp
->if_snd
.ifcq_dropcnt
.packets
;
1963 ifm
->ifm_timer
= ifp
->if_timer
;
1964 if_data_internal_to_if_data64(ifp
,
1965 &ifp
->if_data
, &ifm
->ifm_data
);
1967 * <rdar://problem/32940901>
1968 * Round bytes only for non-platform
1970 if (!csproc_get_platform_binary(w
->w_req
->p
)) {
1971 ALIGN_BYTES(ifm
->ifm_data
.ifi_ibytes
);
1972 ALIGN_BYTES(ifm
->ifm_data
.ifi_obytes
);
1976 VERIFY(IS_P2ALIGNED(cp
, sizeof(u_int32_t
)));
1979 while ((ifa
= ifa
->ifa_link
.tqe_next
) != NULL
) {
1981 if (af
&& af
!= ifa
->ifa_addr
->sa_family
) {
1985 if (ifa
->ifa_addr
->sa_family
== AF_INET6
&&
1986 (((struct in6_ifaddr
*)ifa
)->ia6_flags
&
1987 IN6_IFF_CLAT46
) != 0) {
1992 info
.rti_info
[RTAX_IFA
] = ifa
->ifa_addr
;
1993 info
.rti_info
[RTAX_NETMASK
] = ifa
->ifa_netmask
;
1994 info
.rti_info
[RTAX_BRD
] = ifa
->ifa_dstaddr
;
1995 len
= rt_msg2(RTM_NEWADDR
, &info
, NULL
, NULL
,
2000 struct ifa_msghdr
*ifam
;
2002 if (current_len
+ len
> total_len
) {
2007 len
= rt_msg2(RTM_NEWADDR
, &info
,
2008 (caddr_t
)cp
, NULL
, &cred
);
2010 ifam
= (struct ifa_msghdr
*)(void *)cp
;
2012 ifa
->ifa_ifp
->if_index
;
2013 ifam
->ifam_flags
= ifa
->ifa_flags
;
2014 ifam
->ifam_metric
= ifa
->ifa_metric
;
2015 ifam
->ifam_addrs
= info
.rti_addrs
;
2018 VERIFY(IS_P2ALIGNED(cp
,
2019 sizeof(u_int32_t
)));
2025 ifnet_lock_done(ifp
);
2029 for (ifma
= LIST_FIRST(&ifp
->if_multiaddrs
);
2030 ifma
!= NULL
; ifma
= LIST_NEXT(ifma
, ifma_link
)) {
2031 struct ifaddr
*ifa0
;
2034 if (af
&& af
!= ifma
->ifma_addr
->sa_family
) {
2038 bzero((caddr_t
)&info
, sizeof(info
));
2039 info
.rti_info
[RTAX_IFA
] = ifma
->ifma_addr
;
2041 * Holding ifnet lock here prevents the link
2042 * address from changing contents, so no need
2043 * to hold the ifa0 lock. The link address is
2044 * always present; it's never freed.
2046 ifa0
= ifp
->if_lladdr
;
2047 info
.rti_info
[RTAX_IFP
] = ifa0
->ifa_addr
;
2048 if (ifma
->ifma_ll
!= NULL
) {
2049 info
.rti_info
[RTAX_GATEWAY
] =
2050 ifma
->ifma_ll
->ifma_addr
;
2052 len
= rt_msg2(RTM_NEWMADDR2
, &info
, NULL
, NULL
,
2057 struct ifma_msghdr2
*ifmam
;
2059 if (current_len
+ len
> total_len
) {
2064 len
= rt_msg2(RTM_NEWMADDR2
, &info
,
2065 (caddr_t
)cp
, NULL
, &cred
);
2068 (struct ifma_msghdr2
*)(void *)cp
;
2069 ifmam
->ifmam_addrs
= info
.rti_addrs
;
2070 ifmam
->ifmam_flags
= 0;
2071 ifmam
->ifmam_index
=
2072 ifma
->ifma_ifp
->if_index
;
2073 ifmam
->ifmam_refcount
=
2077 VERIFY(IS_P2ALIGNED(cp
,
2078 sizeof(u_int32_t
)));
2083 ifnet_lock_done(ifp
);
2084 info
.rti_info
[RTAX_IFA
] = info
.rti_info
[RTAX_NETMASK
] =
2085 info
.rti_info
[RTAX_BRD
] = NULL
;
2090 if (error
== ENOBUFS
) {
2091 printf("%s: current_len (%d) + len (%d) > "
2092 "total_len (%d)\n", __func__
, current_len
,
2099 /* Better to return zero length buffer than ENOBUFS */
2100 if (total_len
== 0) {
2103 total_len
+= total_len
>> 3;
2104 total_buffer
= _MALLOC(total_len
, M_RTABLE
,
2106 if (total_buffer
== NULL
) {
2107 printf("%s: _MALLOC(%d) failed\n", __func__
,
2113 VERIFY(IS_P2ALIGNED(cp
, sizeof(u_int32_t
)));
2115 error
= SYSCTL_OUT(w
->w_req
, total_buffer
, current_len
);
2122 if (total_buffer
!= NULL
) {
2123 _FREE(total_buffer
, M_RTABLE
);
2126 kauth_cred_unref(&cred
);
2132 sysctl_rtstat(struct sysctl_req
*req
)
2134 return SYSCTL_OUT(req
, &rtstat
, sizeof(struct rtstat
));
2138 sysctl_rttrash(struct sysctl_req
*req
)
2140 return SYSCTL_OUT(req
, &rttrash
, sizeof(rttrash
));
2144 sysctl_rtsock SYSCTL_HANDLER_ARGS
2146 #pragma unused(oidp)
2147 int *name
= (int *)arg1
;
2148 u_int namelen
= arg2
;
2149 struct radix_node_head
*rnh
;
2150 int i
, error
= EINVAL
;
2162 af
= (u_char
)name
[0];
2163 Bzero(&w
, sizeof(w
));
2172 case NET_RT_FLAGS_PRIV
:
2173 lck_mtx_lock(rnh_lock
);
2174 for (i
= 1; i
<= AF_MAX
; i
++) {
2175 if ((rnh
= rt_tables
[i
]) && (af
== 0 || af
== i
) &&
2176 (error
= rnh
->rnh_walktree(rnh
,
2177 sysctl_dumpentry
, &w
))) {
2181 lck_mtx_unlock(rnh_lock
);
2184 case NET_RT_DUMPX_FLAGS
:
2185 lck_mtx_lock(rnh_lock
);
2186 for (i
= 1; i
<= AF_MAX
; i
++) {
2187 if ((rnh
= rt_tables
[i
]) && (af
== 0 || af
== i
) &&
2188 (error
= rnh
->rnh_walktree(rnh
,
2189 sysctl_dumpentry_ext
, &w
))) {
2193 lck_mtx_unlock(rnh_lock
);
2196 error
= sysctl_iflist(af
, &w
);
2198 case NET_RT_IFLIST2
:
2199 error
= sysctl_iflist2(af
, &w
);
2202 error
= sysctl_rtstat(req
);
2205 error
= sysctl_rttrash(req
);
2208 if (w
.w_tmem
!= NULL
) {
2209 FREE(w
.w_tmem
, M_RTABLE
);
2215 * Definitions of protocols supported in the ROUTE domain.
2217 static struct protosw routesw
[] = {
2219 .pr_type
= SOCK_RAW
,
2221 .pr_flags
= PR_ATOMIC
| PR_ADDR
,
2222 .pr_output
= route_output
,
2223 .pr_ctlinput
= raw_ctlinput
,
2224 .pr_init
= raw_init
,
2225 .pr_usrreqs
= &route_usrreqs
,
2229 static int route_proto_count
= (sizeof(routesw
) / sizeof(struct protosw
));
2231 struct domain routedomain_s
= {
2232 .dom_family
= PF_ROUTE
,
2233 .dom_name
= "route",
2234 .dom_init
= route_dinit
,
2238 route_dinit(struct domain
*dp
)
2243 VERIFY(!(dp
->dom_flags
& DOM_INITIALIZED
));
2244 VERIFY(routedomain
== NULL
);
2248 for (i
= 0, pr
= &routesw
[0]; i
< route_proto_count
; i
++, pr
++) {
2249 net_add_proto(pr
, dp
, 1);