2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1988, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)rtsock.c 8.5 (Berkeley) 11/2/94
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kauth.h>
66 #include <sys/kernel.h>
67 #include <sys/sysctl.h>
69 #include <sys/malloc.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/domain.h>
74 #include <sys/protosw.h>
75 #include <sys/syslog.h>
76 #include <sys/mcache.h>
77 #include <kern/locks.h>
80 #include <net/route.h>
82 #include <net/raw_cb.h>
83 #include <netinet/in.h>
84 #include <netinet/in_var.h>
85 #include <netinet/in_arp.h>
86 #include <netinet6/nd6.h>
88 extern struct rtstat rtstat
;
89 extern struct domain routedomain_s
;
90 static struct domain
*routedomain
= NULL
;
92 MALLOC_DEFINE(M_RTABLE
, "routetbl", "routing tables");
94 static struct sockaddr route_dst
= { 2, PF_ROUTE
, { 0, } };
95 static struct sockaddr route_src
= { 2, PF_ROUTE
, { 0, } };
96 static struct sockaddr sa_zero
= { sizeof (sa_zero
), AF_INET
, { 0, } };
99 u_int32_t ip_count
; /* attached w/ AF_INET */
100 u_int32_t ip6_count
; /* attached w/ AF_INET6 */
101 u_int32_t any_count
; /* total attached */
104 static struct route_cb route_cb
;
110 struct sysctl_req
*w_req
;
113 static void route_dinit(struct domain
*);
114 static int rts_abort(struct socket
*);
115 static int rts_attach(struct socket
*, int, struct proc
*);
116 static int rts_bind(struct socket
*, struct sockaddr
*, struct proc
*);
117 static int rts_connect(struct socket
*, struct sockaddr
*, struct proc
*);
118 static int rts_detach(struct socket
*);
119 static int rts_disconnect(struct socket
*);
120 static int rts_peeraddr(struct socket
*, struct sockaddr
**);
121 static int rts_send(struct socket
*, int, struct mbuf
*, struct sockaddr
*,
122 struct mbuf
*, struct proc
*);
123 static int rts_shutdown(struct socket
*);
124 static int rts_sockaddr(struct socket
*, struct sockaddr
**);
126 static int route_output(struct mbuf
*, struct socket
*);
127 static void rt_setmetrics(u_int32_t
, struct rt_metrics
*, struct rtentry
*);
128 static void rt_getmetrics(struct rtentry
*, struct rt_metrics
*);
129 static void rt_setif(struct rtentry
*, struct sockaddr
*, struct sockaddr
*,
130 struct sockaddr
*, unsigned int);
131 static int rt_xaddrs(caddr_t
, caddr_t
, struct rt_addrinfo
*);
132 static struct mbuf
*rt_msg1(int, struct rt_addrinfo
*);
133 static int rt_msg2(int, struct rt_addrinfo
*, caddr_t
, struct walkarg
*,
135 static int sysctl_dumpentry(struct radix_node
*rn
, void *vw
);
136 static int sysctl_dumpentry_ext(struct radix_node
*rn
, void *vw
);
137 static int sysctl_iflist(int af
, struct walkarg
*w
);
138 static int sysctl_iflist2(int af
, struct walkarg
*w
);
139 static int sysctl_rtstat(struct sysctl_req
*);
140 static int sysctl_rttrash(struct sysctl_req
*);
141 static int sysctl_rtsock SYSCTL_HANDLER_ARGS
;
143 SYSCTL_NODE(_net
, PF_ROUTE
, routetable
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
146 SYSCTL_NODE(_net
, OID_AUTO
, route
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "routing");
148 #define ROUNDUP32(a) \
149 ((a) > 0 ? (1 + (((a) - 1) | (sizeof (uint32_t) - 1))) : \
152 #define ADVANCE32(x, n) \
153 (x += ROUNDUP32((n)->sa_len))
156 * It really doesn't make any sense at all for this code to share much
157 * with raw_usrreq.c, since its functionality is so restricted. XXX
160 rts_abort(struct socket
*so
)
162 return (raw_usrreqs
.pru_abort(so
));
165 /* pru_accept is EOPNOTSUPP */
168 rts_attach(struct socket
*so
, int proto
, struct proc
*p
)
174 VERIFY(so
->so_pcb
== NULL
);
176 MALLOC(rp
, struct rawcb
*, sizeof (*rp
), M_PCB
, M_WAITOK
| M_ZERO
);
180 so
->so_pcb
= (caddr_t
)rp
;
181 /* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */
182 error
= raw_attach(so
, proto
);
187 so
->so_flags
|= SOF_PCBCLEARING
;
191 switch (rp
->rcb_proto
.sp_protocol
) {
193 atomic_add_32(&route_cb
.ip_count
, 1);
196 atomic_add_32(&route_cb
.ip6_count
, 1);
199 rp
->rcb_faddr
= &route_src
;
200 atomic_add_32(&route_cb
.any_count
, 1);
201 /* the socket is already locked when we enter rts_attach */
203 so
->so_options
|= SO_USELOOPBACK
;
208 rts_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
210 return (raw_usrreqs
.pru_bind(so
, nam
, p
)); /* xxx just EINVAL */
214 rts_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
216 return (raw_usrreqs
.pru_connect(so
, nam
, p
)); /* XXX just EINVAL */
219 /* pru_connect2 is EOPNOTSUPP */
220 /* pru_control is EOPNOTSUPP */
223 rts_detach(struct socket
*so
)
225 struct rawcb
*rp
= sotorawcb(so
);
229 switch (rp
->rcb_proto
.sp_protocol
) {
231 atomic_add_32(&route_cb
.ip_count
, -1);
234 atomic_add_32(&route_cb
.ip6_count
, -1);
237 atomic_add_32(&route_cb
.any_count
, -1);
238 return (raw_usrreqs
.pru_detach(so
));
242 rts_disconnect(struct socket
*so
)
244 return (raw_usrreqs
.pru_disconnect(so
));
247 /* pru_listen is EOPNOTSUPP */
250 rts_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
252 return (raw_usrreqs
.pru_peeraddr(so
, nam
));
255 /* pru_rcvd is EOPNOTSUPP */
256 /* pru_rcvoob is EOPNOTSUPP */
259 rts_send(struct socket
*so
, int flags
, struct mbuf
*m
, struct sockaddr
*nam
,
260 struct mbuf
*control
, struct proc
*p
)
262 return (raw_usrreqs
.pru_send(so
, flags
, m
, nam
, control
, p
));
265 /* pru_sense is null */
268 rts_shutdown(struct socket
*so
)
270 return (raw_usrreqs
.pru_shutdown(so
));
274 rts_sockaddr(struct socket
*so
, struct sockaddr
**nam
)
276 return (raw_usrreqs
.pru_sockaddr(so
, nam
));
279 static struct pr_usrreqs route_usrreqs
= {
280 .pru_abort
= rts_abort
,
281 .pru_attach
= rts_attach
,
282 .pru_bind
= rts_bind
,
283 .pru_connect
= rts_connect
,
284 .pru_detach
= rts_detach
,
285 .pru_disconnect
= rts_disconnect
,
286 .pru_peeraddr
= rts_peeraddr
,
287 .pru_send
= rts_send
,
288 .pru_shutdown
= rts_shutdown
,
289 .pru_sockaddr
= rts_sockaddr
,
290 .pru_sosend
= sosend
,
291 .pru_soreceive
= soreceive
,
296 route_output(struct mbuf
*m
, struct socket
*so
)
298 struct rt_msghdr
*rtm
= NULL
;
299 struct rtentry
*rt
= NULL
;
300 struct rtentry
*saved_nrt
= NULL
;
301 struct radix_node_head
*rnh
;
302 struct rt_addrinfo info
;
304 sa_family_t dst_sa_family
= 0;
305 struct ifnet
*ifp
= NULL
;
306 struct sockaddr_in dst_in
, gate_in
;
307 int sendonlytoself
= 0;
308 unsigned int ifscope
= IFSCOPE_NONE
;
309 struct rawcb
*rp
= NULL
;
311 #define senderr(e) { error = (e); goto flush; }
312 if (m
== NULL
|| ((m
->m_len
< sizeof (intptr_t)) &&
313 (m
= m_pullup(m
, sizeof (intptr_t))) == NULL
))
315 VERIFY(m
->m_flags
& M_PKTHDR
);
318 * Unlock the socket (but keep a reference) it won't be
319 * accessed until raw_input appends to it.
321 socket_unlock(so
, 0);
322 lck_mtx_lock(rnh_lock
);
324 len
= m
->m_pkthdr
.len
;
325 if (len
< sizeof (*rtm
) ||
326 len
!= mtod(m
, struct rt_msghdr
*)->rtm_msglen
) {
327 info
.rti_info
[RTAX_DST
] = NULL
;
330 R_Malloc(rtm
, struct rt_msghdr
*, len
);
332 info
.rti_info
[RTAX_DST
] = NULL
;
335 m_copydata(m
, 0, len
, (caddr_t
)rtm
);
336 if (rtm
->rtm_version
!= RTM_VERSION
) {
337 info
.rti_info
[RTAX_DST
] = NULL
;
338 senderr(EPROTONOSUPPORT
);
342 * Silent version of RTM_GET for Reachabiltiy APIs. We may change
343 * all RTM_GETs to be silent in the future, so this is private for now.
345 if (rtm
->rtm_type
== RTM_GET_SILENT
) {
346 if (!(so
->so_options
& SO_USELOOPBACK
))
349 rtm
->rtm_type
= RTM_GET
;
353 * Perform permission checking, only privileged sockets
354 * may perform operations other than RTM_GET
356 if (rtm
->rtm_type
!= RTM_GET
&& !(so
->so_state
& SS_PRIV
)) {
357 info
.rti_info
[RTAX_DST
] = NULL
;
361 rtm
->rtm_pid
= proc_selfpid();
362 info
.rti_addrs
= rtm
->rtm_addrs
;
363 if (rt_xaddrs((caddr_t
)(rtm
+ 1), len
+ (caddr_t
)rtm
, &info
)) {
364 info
.rti_info
[RTAX_DST
] = NULL
;
367 if (info
.rti_info
[RTAX_DST
] == NULL
||
368 info
.rti_info
[RTAX_DST
]->sa_family
>= AF_MAX
||
369 (info
.rti_info
[RTAX_GATEWAY
] != NULL
&&
370 info
.rti_info
[RTAX_GATEWAY
]->sa_family
>= AF_MAX
))
373 if (info
.rti_info
[RTAX_DST
]->sa_family
== AF_INET
&&
374 info
.rti_info
[RTAX_DST
]->sa_len
!= sizeof (dst_in
)) {
375 /* At minimum, we need up to sin_addr */
376 if (info
.rti_info
[RTAX_DST
]->sa_len
<
377 offsetof(struct sockaddr_in
, sin_zero
))
379 bzero(&dst_in
, sizeof (dst_in
));
380 dst_in
.sin_len
= sizeof (dst_in
);
381 dst_in
.sin_family
= AF_INET
;
382 dst_in
.sin_port
= SIN(info
.rti_info
[RTAX_DST
])->sin_port
;
383 dst_in
.sin_addr
= SIN(info
.rti_info
[RTAX_DST
])->sin_addr
;
384 info
.rti_info
[RTAX_DST
] = (struct sockaddr
*)&dst_in
;
385 dst_sa_family
= info
.rti_info
[RTAX_DST
]->sa_family
;
388 if (info
.rti_info
[RTAX_GATEWAY
] != NULL
&&
389 info
.rti_info
[RTAX_GATEWAY
]->sa_family
== AF_INET
&&
390 info
.rti_info
[RTAX_GATEWAY
]->sa_len
!= sizeof (gate_in
)) {
391 /* At minimum, we need up to sin_addr */
392 if (info
.rti_info
[RTAX_GATEWAY
]->sa_len
<
393 offsetof(struct sockaddr_in
, sin_zero
))
395 bzero(&gate_in
, sizeof (gate_in
));
396 gate_in
.sin_len
= sizeof (gate_in
);
397 gate_in
.sin_family
= AF_INET
;
398 gate_in
.sin_port
= SIN(info
.rti_info
[RTAX_GATEWAY
])->sin_port
;
399 gate_in
.sin_addr
= SIN(info
.rti_info
[RTAX_GATEWAY
])->sin_addr
;
400 info
.rti_info
[RTAX_GATEWAY
] = (struct sockaddr
*)&gate_in
;
403 if (info
.rti_info
[RTAX_GENMASK
]) {
404 struct radix_node
*t
;
405 t
= rn_addmask((caddr_t
)info
.rti_info
[RTAX_GENMASK
], 0, 1);
406 if (t
!= NULL
&& Bcmp(info
.rti_info
[RTAX_GENMASK
],
407 t
->rn_key
, *(u_char
*)info
.rti_info
[RTAX_GENMASK
]) == 0)
408 info
.rti_info
[RTAX_GENMASK
] =
409 (struct sockaddr
*)(t
->rn_key
);
415 * If RTF_IFSCOPE flag is set, then rtm_index specifies the scope.
417 if (rtm
->rtm_flags
& RTF_IFSCOPE
) {
418 if (info
.rti_info
[RTAX_DST
]->sa_family
!= AF_INET
&&
419 info
.rti_info
[RTAX_DST
]->sa_family
!= AF_INET6
)
421 ifscope
= rtm
->rtm_index
;
425 * RTF_PROXY can only be set internally from within the kernel.
427 if (rtm
->rtm_flags
& RTF_PROXY
)
431 * For AF_INET, always zero out the embedded scope ID. If this is
432 * a scoped request, it must be done explicitly by setting RTF_IFSCOPE
433 * flag and the corresponding rtm_index value. This is to prevent
434 * false interpretation of the scope ID because it's using the sin_zero
435 * field, which might not be properly cleared by the requestor.
437 if (info
.rti_info
[RTAX_DST
]->sa_family
== AF_INET
)
438 sin_set_ifscope(info
.rti_info
[RTAX_DST
], IFSCOPE_NONE
);
439 if (info
.rti_info
[RTAX_GATEWAY
] != NULL
&&
440 info
.rti_info
[RTAX_GATEWAY
]->sa_family
== AF_INET
)
441 sin_set_ifscope(info
.rti_info
[RTAX_GATEWAY
], IFSCOPE_NONE
);
443 switch (rtm
->rtm_type
) {
445 if (info
.rti_info
[RTAX_GATEWAY
] == NULL
)
448 error
= rtrequest_scoped_locked(RTM_ADD
,
449 info
.rti_info
[RTAX_DST
], info
.rti_info
[RTAX_GATEWAY
],
450 info
.rti_info
[RTAX_NETMASK
], rtm
->rtm_flags
, &saved_nrt
,
452 if (error
== 0 && saved_nrt
!= NULL
) {
455 * If the route request specified an interface with
456 * IFA and/or IFP, we set the requested interface on
457 * the route with rt_setif. It would be much better
458 * to do this inside rtrequest, but that would
459 * require passing the desired interface, in some
460 * form, to rtrequest. Since rtrequest is called in
461 * so many places (roughly 40 in our source), adding
462 * a parameter is to much for us to swallow; this is
463 * something for the FreeBSD developers to tackle.
464 * Instead, we let rtrequest compute whatever
465 * interface it wants, then come in behind it and
466 * stick in the interface that we really want. This
467 * works reasonably well except when rtrequest can't
468 * figure out what interface to use (with
469 * ifa_withroute) and returns ENETUNREACH. Ideally
470 * it shouldn't matter if rtrequest can't figure out
471 * the interface if we're going to explicitly set it
472 * ourselves anyway. But practically we can't
473 * recover here because rtrequest will not do any of
474 * the work necessary to add the route if it can't
475 * find an interface. As long as there is a default
476 * route that leads to some interface, rtrequest will
477 * find an interface, so this problem should be
478 * rarely encountered.
482 info
.rti_info
[RTAX_IFP
], info
.rti_info
[RTAX_IFA
],
483 info
.rti_info
[RTAX_GATEWAY
], ifscope
);
484 rt_setmetrics(rtm
->rtm_inits
, &rtm
->rtm_rmx
, saved_nrt
);
485 saved_nrt
->rt_rmx
.rmx_locks
&= ~(rtm
->rtm_inits
);
486 saved_nrt
->rt_rmx
.rmx_locks
|=
487 (rtm
->rtm_inits
& rtm
->rtm_rmx
.rmx_locks
);
488 saved_nrt
->rt_genmask
= info
.rti_info
[RTAX_GENMASK
];
489 RT_REMREF_LOCKED(saved_nrt
);
490 RT_UNLOCK(saved_nrt
);
495 error
= rtrequest_scoped_locked(RTM_DELETE
,
496 info
.rti_info
[RTAX_DST
], info
.rti_info
[RTAX_GATEWAY
],
497 info
.rti_info
[RTAX_NETMASK
], rtm
->rtm_flags
, &saved_nrt
,
509 rnh
= rt_tables
[info
.rti_info
[RTAX_DST
]->sa_family
];
511 senderr(EAFNOSUPPORT
);
513 * Lookup the best match based on the key-mask pair;
514 * callee adds a reference and checks for root node.
516 rt
= rt_lookup(TRUE
, info
.rti_info
[RTAX_DST
],
517 info
.rti_info
[RTAX_NETMASK
], rnh
, ifscope
);
523 * Holding rnh_lock here prevents the possibility of
524 * ifa from changing (e.g. in_ifinit), so it is safe
525 * to access its ifa_addr (down below) without locking.
527 switch (rtm
->rtm_type
) {
532 RT_LOCK_ASSERT_HELD(rt
);
533 info
.rti_info
[RTAX_DST
] = rt_key(rt
);
534 dst_sa_family
= info
.rti_info
[RTAX_DST
]->sa_family
;
535 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
536 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
537 info
.rti_info
[RTAX_GENMASK
] = rt
->rt_genmask
;
538 if (rtm
->rtm_addrs
& (RTA_IFP
| RTA_IFA
)) {
541 ifnet_lock_shared(ifp
);
542 ifa2
= ifp
->if_lladdr
;
543 info
.rti_info
[RTAX_IFP
] =
546 ifnet_lock_done(ifp
);
547 info
.rti_info
[RTAX_IFA
] =
548 rt
->rt_ifa
->ifa_addr
;
549 rtm
->rtm_index
= ifp
->if_index
;
551 info
.rti_info
[RTAX_IFP
] = NULL
;
552 info
.rti_info
[RTAX_IFA
] = NULL
;
554 } else if ((ifp
= rt
->rt_ifp
) != NULL
) {
555 rtm
->rtm_index
= ifp
->if_index
;
559 len
= rt_msg2(rtm
->rtm_type
, &info
, NULL
, NULL
, NULL
);
562 if (len
> rtm
->rtm_msglen
) {
563 struct rt_msghdr
*new_rtm
;
564 R_Malloc(new_rtm
, struct rt_msghdr
*, len
);
565 if (new_rtm
== NULL
) {
571 Bcopy(rtm
, new_rtm
, rtm
->rtm_msglen
);
572 R_Free(rtm
); rtm
= new_rtm
;
576 (void) rt_msg2(rtm
->rtm_type
, &info
, (caddr_t
)rtm
,
580 rtm
->rtm_flags
= rt
->rt_flags
;
581 rt_getmetrics(rt
, &rtm
->rtm_rmx
);
582 rtm
->rtm_addrs
= info
.rti_addrs
;
589 if (info
.rti_info
[RTAX_GATEWAY
] != NULL
&&
590 (error
= rt_setgate(rt
, rt_key(rt
),
591 info
.rti_info
[RTAX_GATEWAY
]))) {
597 * If they tried to change things but didn't specify
598 * the required gateway, then just use the old one.
599 * This can happen if the user tries to change the
600 * flags on the default route without changing the
601 * default gateway. Changing flags still doesn't work.
603 if ((rt
->rt_flags
& RTF_GATEWAY
) &&
604 info
.rti_info
[RTAX_GATEWAY
] == NULL
)
605 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
608 * On Darwin, we call rt_setif which contains the
609 * equivalent to the code found at this very spot
613 info
.rti_info
[RTAX_IFP
], info
.rti_info
[RTAX_IFA
],
614 info
.rti_info
[RTAX_GATEWAY
], ifscope
);
616 rt_setmetrics(rtm
->rtm_inits
, &rtm
->rtm_rmx
, rt
);
617 if (info
.rti_info
[RTAX_GENMASK
])
618 rt
->rt_genmask
= info
.rti_info
[RTAX_GENMASK
];
621 rt
->rt_rmx
.rmx_locks
&= ~(rtm
->rtm_inits
);
622 rt
->rt_rmx
.rmx_locks
|=
623 (rtm
->rtm_inits
& rtm
->rtm_rmx
.rmx_locks
);
635 rtm
->rtm_errno
= error
;
637 rtm
->rtm_flags
|= RTF_DONE
;
640 RT_LOCK_ASSERT_NOTHELD(rt
);
643 lck_mtx_unlock(rnh_lock
);
645 /* relock the socket now */
648 * Check to see if we don't want our own messages.
650 if (!(so
->so_options
& SO_USELOOPBACK
)) {
651 if (route_cb
.any_count
<= 1) {
657 /* There is another listener, so construct message */
661 m_copyback(m
, 0, rtm
->rtm_msglen
, (caddr_t
)rtm
);
662 if (m
->m_pkthdr
.len
< rtm
->rtm_msglen
) {
665 } else if (m
->m_pkthdr
.len
> rtm
->rtm_msglen
) {
666 m_adj(m
, rtm
->rtm_msglen
- m
->m_pkthdr
.len
);
670 if (sendonlytoself
&& m
!= NULL
) {
672 if (sbappendaddr(&so
->so_rcv
, &route_src
, m
,
673 NULL
, &error
) != 0) {
679 struct sockproto route_proto
= { PF_ROUTE
, 0 };
681 rp
->rcb_proto
.sp_family
= 0; /* Avoid us */
682 if (dst_sa_family
!= 0)
683 route_proto
.sp_protocol
= dst_sa_family
;
685 socket_unlock(so
, 0);
686 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
690 rp
->rcb_proto
.sp_family
= PF_ROUTE
;
696 rt_setexpire(struct rtentry
*rt
, uint64_t expiry
)
698 /* set both rt_expire and rmx_expire */
699 rt
->rt_expire
= expiry
;
701 rt
->rt_rmx
.rmx_expire
= expiry
+ rt
->base_calendartime
-
704 rt
->rt_rmx
.rmx_expire
= 0;
709 rt_setmetrics(u_int32_t which
, struct rt_metrics
*in
, struct rtentry
*out
)
711 struct timeval caltime
;
713 getmicrotime(&caltime
);
715 #define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e;
716 metric(RTV_RPIPE
, rmx_recvpipe
);
717 metric(RTV_SPIPE
, rmx_sendpipe
);
718 metric(RTV_SSTHRESH
, rmx_ssthresh
);
719 metric(RTV_RTT
, rmx_rtt
);
720 metric(RTV_RTTVAR
, rmx_rttvar
);
721 metric(RTV_HOPCOUNT
, rmx_hopcount
);
722 metric(RTV_MTU
, rmx_mtu
);
723 metric(RTV_EXPIRE
, rmx_expire
);
726 if (out
->rt_rmx
.rmx_expire
> 0) {
727 /* account for system time change */
728 getmicrotime(&caltime
);
729 out
->base_calendartime
+=
730 NET_CALCULATE_CLOCKSKEW(caltime
,
731 out
->base_calendartime
,
732 net_uptime(), out
->base_uptime
);
734 out
->rt_rmx
.rmx_expire
-
735 out
->base_calendartime
+
738 rt_setexpire(out
, 0);
741 VERIFY(out
->rt_expire
== 0 || out
->rt_rmx
.rmx_expire
!= 0);
742 VERIFY(out
->rt_expire
!= 0 || out
->rt_rmx
.rmx_expire
== 0);
746 rt_getmetrics(struct rtentry
*in
, struct rt_metrics
*out
)
748 struct timeval caltime
;
750 VERIFY(in
->rt_expire
== 0 || in
->rt_rmx
.rmx_expire
!= 0);
751 VERIFY(in
->rt_expire
!= 0 || in
->rt_rmx
.rmx_expire
== 0);
755 if (in
->rt_expire
!= 0) {
756 /* account for system time change */
757 getmicrotime(&caltime
);
759 in
->base_calendartime
+=
760 NET_CALCULATE_CLOCKSKEW(caltime
,
761 in
->base_calendartime
, net_uptime(), in
->base_uptime
);
763 out
->rmx_expire
= in
->base_calendartime
+
764 in
->rt_expire
- in
->base_uptime
;
771 * Set route's interface given info.rti_info[RTAX_IFP],
772 * info.rti_info[RTAX_IFA], and gateway.
775 rt_setif(struct rtentry
*rt
, struct sockaddr
*Ifpaddr
, struct sockaddr
*Ifaaddr
,
776 struct sockaddr
*Gate
, unsigned int ifscope
)
778 struct ifaddr
*ifa
= NULL
;
779 struct ifnet
*ifp
= NULL
;
780 void (*ifa_rtrequest
)(int, struct rtentry
*, struct sockaddr
*);
782 lck_mtx_assert(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
784 RT_LOCK_ASSERT_HELD(rt
);
786 /* Don't update a defunct route */
787 if (rt
->rt_flags
& RTF_CONDEMNED
)
790 /* Add an extra ref for ourselves */
791 RT_ADDREF_LOCKED(rt
);
793 /* Become a regular mutex, just in case */
797 * New gateway could require new ifaddr, ifp; flags may also
798 * be different; ifp may be specified by ll sockaddr when
799 * protocol address is ambiguous.
801 if (Ifpaddr
&& (ifa
= ifa_ifwithnet_scoped(Ifpaddr
, ifscope
)) &&
802 (ifp
= ifa
->ifa_ifp
) && (Ifaaddr
|| Gate
)) {
804 ifa
= ifaof_ifpforaddr(Ifaaddr
? Ifaaddr
: Gate
, ifp
);
810 if (Ifpaddr
&& (ifp
= if_withname(Ifpaddr
))) {
812 ifa
= ifaof_ifpforaddr(Gate
, ifp
);
814 ifnet_lock_shared(ifp
);
815 ifa
= TAILQ_FIRST(&ifp
->if_addrhead
);
818 ifnet_lock_done(ifp
);
820 } else if (Ifaaddr
&&
821 (ifa
= ifa_ifwithaddr_scoped(Ifaaddr
, ifscope
))) {
823 } else if (Gate
!= NULL
) {
825 * Safe to drop rt_lock and use rt_key, since holding
826 * rnh_lock here prevents another thread from calling
827 * rt_setgate() on this route. We cannot hold the
828 * lock across ifa_ifwithroute since the lookup done
829 * by that routine may point to the same route.
832 if ((ifa
= ifa_ifwithroute_scoped_locked(rt
->rt_flags
,
833 rt_key(rt
), Gate
, ifscope
)) != NULL
)
836 /* Don't update a defunct route */
837 if (rt
->rt_flags
& RTF_CONDEMNED
) {
840 /* Release extra ref */
841 RT_REMREF_LOCKED(rt
);
847 /* trigger route cache reevaluation */
848 if (rt_key(rt
)->sa_family
== AF_INET
)
849 routegenid_inet_update();
851 else if (rt_key(rt
)->sa_family
== AF_INET6
)
852 routegenid_inet6_update();
856 struct ifaddr
*oifa
= rt
->rt_ifa
;
860 ifa_rtrequest
= oifa
->ifa_rtrequest
;
862 if (ifa_rtrequest
!= NULL
)
863 ifa_rtrequest(RTM_DELETE
, rt
, Gate
);
867 if (rt
->rt_ifp
!= ifp
) {
869 * Purge any link-layer info caching.
871 if (rt
->rt_llinfo_purge
!= NULL
)
872 rt
->rt_llinfo_purge(rt
);
875 * Adjust route ref count for the interfaces.
877 if (rt
->rt_if_ref_fn
!= NULL
) {
878 rt
->rt_if_ref_fn(ifp
, 1);
879 rt
->rt_if_ref_fn(rt
->rt_ifp
, -1);
884 * If this is the (non-scoped) default route, record
885 * the interface index used for the primary ifscope.
887 if (rt_primary_default(rt
, rt_key(rt
))) {
888 set_primary_ifscope(rt_key(rt
)->sa_family
,
889 rt
->rt_ifp
->if_index
);
892 * If rmx_mtu is not locked, update it
893 * to the MTU used by the new interface.
895 if (!(rt
->rt_rmx
.rmx_locks
& RTV_MTU
))
896 rt
->rt_rmx
.rmx_mtu
= rt
->rt_ifp
->if_mtu
;
898 if (rt
->rt_ifa
!= NULL
) {
899 IFA_LOCK_SPIN(rt
->rt_ifa
);
900 ifa_rtrequest
= rt
->rt_ifa
->ifa_rtrequest
;
901 IFA_UNLOCK(rt
->rt_ifa
);
902 if (ifa_rtrequest
!= NULL
)
903 ifa_rtrequest(RTM_ADD
, rt
, Gate
);
906 /* Release extra ref */
907 RT_REMREF_LOCKED(rt
);
914 /* XXX: to reset gateway to correct value, at RTM_CHANGE */
915 if (rt
->rt_ifa
!= NULL
) {
916 IFA_LOCK_SPIN(rt
->rt_ifa
);
917 ifa_rtrequest
= rt
->rt_ifa
->ifa_rtrequest
;
918 IFA_UNLOCK(rt
->rt_ifa
);
919 if (ifa_rtrequest
!= NULL
)
920 ifa_rtrequest(RTM_ADD
, rt
, Gate
);
924 * Workaround for local address routes pointing to the loopback
925 * interface added by configd, until <rdar://problem/12970142>.
927 if ((rt
->rt_ifp
->if_flags
& IFF_LOOPBACK
) &&
928 (rt
->rt_flags
& RTF_HOST
) && rt
->rt_ifa
->ifa_ifp
== rt
->rt_ifp
) {
929 ifa
= ifa_ifwithaddr(rt_key(rt
));
931 if (ifa
!= rt
->rt_ifa
)
937 /* Release extra ref */
938 RT_REMREF_LOCKED(rt
);
942 * Extract the addresses of the passed sockaddrs.
943 * Do a little sanity checking so as to avoid bad memory references.
944 * This data is derived straight from userland.
947 rt_xaddrs(caddr_t cp
, caddr_t cplim
, struct rt_addrinfo
*rtinfo
)
952 bzero(rtinfo
->rti_info
, sizeof (rtinfo
->rti_info
));
953 for (i
= 0; (i
< RTAX_MAX
) && (cp
< cplim
); i
++) {
954 if ((rtinfo
->rti_addrs
& (1 << i
)) == 0)
956 sa
= (struct sockaddr
*)cp
;
960 if ((cp
+ sa
->sa_len
) > cplim
)
963 * there are no more.. quit now
964 * If there are more bits, they are in error.
965 * I've seen this. route(1) can evidently generate these.
966 * This causes kernel to core dump.
967 * for compatibility, If we see this, point to a safe address.
969 if (sa
->sa_len
== 0) {
970 rtinfo
->rti_info
[i
] = &sa_zero
;
971 return (0); /* should be EINVAL but for compat */
974 rtinfo
->rti_info
[i
] = sa
;
981 rt_msg1(int type
, struct rt_addrinfo
*rtinfo
)
983 struct rt_msghdr
*rtm
;
992 len
= sizeof (struct ifa_msghdr
);
997 len
= sizeof (struct ifma_msghdr
);
1001 len
= sizeof (struct if_msghdr
);
1005 len
= sizeof (struct rt_msghdr
);
1009 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
1010 if (m
&& len
> MHLEN
) {
1011 MCLGET(m
, M_DONTWAIT
);
1012 if (!(m
->m_flags
& M_EXT
)) {
1019 m
->m_pkthdr
.len
= m
->m_len
= len
;
1020 m
->m_pkthdr
.rcvif
= NULL
;
1021 rtm
= mtod(m
, struct rt_msghdr
*);
1022 bzero((caddr_t
)rtm
, len
);
1023 for (i
= 0; i
< RTAX_MAX
; i
++) {
1024 struct sockaddr
*sa
, *hint
;
1025 uint8_t ssbuf
[SOCK_MAXADDRLEN
+ 1];
1028 * Make sure to accomodate the largest possible size of sa_len.
1030 _CASSERT(sizeof (ssbuf
) == (SOCK_MAXADDRLEN
+ 1));
1032 if ((sa
= rtinfo
->rti_info
[i
]) == NULL
)
1038 if ((hint
= rtinfo
->rti_info
[RTAX_DST
]) == NULL
)
1039 hint
= rtinfo
->rti_info
[RTAX_IFA
];
1041 /* Scrub away any trace of embedded interface scope */
1042 sa
= rtm_scrub(type
, i
, hint
, sa
, &ssbuf
,
1043 sizeof (ssbuf
), NULL
);
1050 rtinfo
->rti_addrs
|= (1 << i
);
1051 dlen
= ROUNDUP32(sa
->sa_len
);
1052 m_copyback(m
, len
, dlen
, (caddr_t
)sa
);
1055 if (m
->m_pkthdr
.len
!= len
) {
1059 rtm
->rtm_msglen
= len
;
1060 rtm
->rtm_version
= RTM_VERSION
;
1061 rtm
->rtm_type
= type
;
1066 rt_msg2(int type
, struct rt_addrinfo
*rtinfo
, caddr_t cp
, struct walkarg
*w
,
1067 kauth_cred_t
* credp
)
1070 int len
, dlen
, second_time
= 0;
1073 rtinfo
->rti_addrs
= 0;
1079 len
= sizeof (struct ifa_msghdr
);
1084 len
= sizeof (struct ifma_msghdr
);
1088 len
= sizeof (struct if_msghdr
);
1092 len
= sizeof (struct if_msghdr2
);
1096 len
= sizeof (struct ifma_msghdr2
);
1100 len
= sizeof (struct rt_msghdr_ext
);
1104 len
= sizeof (struct rt_msghdr2
);
1108 len
= sizeof (struct rt_msghdr
);
1113 for (i
= 0; i
< RTAX_MAX
; i
++) {
1114 struct sockaddr
*sa
, *hint
;
1115 uint8_t ssbuf
[SOCK_MAXADDRLEN
+ 1];
1118 * Make sure to accomodate the largest possible size of sa_len.
1120 _CASSERT(sizeof (ssbuf
) == (SOCK_MAXADDRLEN
+ 1));
1122 if ((sa
= rtinfo
->rti_info
[i
]) == NULL
)
1128 if ((hint
= rtinfo
->rti_info
[RTAX_DST
]) == NULL
)
1129 hint
= rtinfo
->rti_info
[RTAX_IFA
];
1131 /* Scrub away any trace of embedded interface scope */
1132 sa
= rtm_scrub(type
, i
, hint
, sa
, &ssbuf
,
1133 sizeof (ssbuf
), NULL
);
1137 sa
= rtm_scrub(type
, i
, NULL
, sa
, &ssbuf
,
1138 sizeof (ssbuf
), credp
);
1145 rtinfo
->rti_addrs
|= (1 << i
);
1146 dlen
= ROUNDUP32(sa
->sa_len
);
1148 bcopy((caddr_t
)sa
, cp
, (unsigned)dlen
);
1153 if (cp
== NULL
&& w
!= NULL
&& !second_time
) {
1154 struct walkarg
*rw
= w
;
1156 if (rw
->w_req
!= NULL
) {
1157 if (rw
->w_tmemsize
< len
) {
1158 if (rw
->w_tmem
!= NULL
)
1159 FREE(rw
->w_tmem
, M_RTABLE
);
1160 rw
->w_tmem
= _MALLOC(len
, M_RTABLE
, M_WAITOK
);
1161 if (rw
->w_tmem
!= NULL
)
1162 rw
->w_tmemsize
= len
;
1164 if (rw
->w_tmem
!= NULL
) {
1172 struct rt_msghdr
*rtm
= (struct rt_msghdr
*)(void *)cp0
;
1174 rtm
->rtm_version
= RTM_VERSION
;
1175 rtm
->rtm_type
= type
;
1176 rtm
->rtm_msglen
= len
;
1182 * This routine is called to generate a message from the routing
1183 * socket indicating that a redirect has occurred, a routing lookup
1184 * has failed, or that a protocol has detected timeouts to a particular
1188 rt_missmsg(int type
, struct rt_addrinfo
*rtinfo
, int flags
, int error
)
1190 struct rt_msghdr
*rtm
;
1192 struct sockaddr
*sa
= rtinfo
->rti_info
[RTAX_DST
];
1193 struct sockproto route_proto
= { PF_ROUTE
, 0 };
1195 if (route_cb
.any_count
== 0)
1197 m
= rt_msg1(type
, rtinfo
);
1200 rtm
= mtod(m
, struct rt_msghdr
*);
1201 rtm
->rtm_flags
= RTF_DONE
| flags
;
1202 rtm
->rtm_errno
= error
;
1203 rtm
->rtm_addrs
= rtinfo
->rti_addrs
;
1204 route_proto
.sp_family
= sa
? sa
->sa_family
: 0;
1205 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
1209 * This routine is called to generate a message from the routing
1210 * socket indicating that the status of a network interface has changed.
1213 rt_ifmsg(struct ifnet
*ifp
)
1215 struct if_msghdr
*ifm
;
1217 struct rt_addrinfo info
;
1218 struct sockproto route_proto
= { PF_ROUTE
, 0 };
1220 if (route_cb
.any_count
== 0)
1222 bzero((caddr_t
)&info
, sizeof (info
));
1223 m
= rt_msg1(RTM_IFINFO
, &info
);
1226 ifm
= mtod(m
, struct if_msghdr
*);
1227 ifm
->ifm_index
= ifp
->if_index
;
1228 ifm
->ifm_flags
= (u_short
)ifp
->if_flags
;
1229 if_data_internal_to_if_data(ifp
, &ifp
->if_data
, &ifm
->ifm_data
);
1231 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
1235 * This is called to generate messages from the routing socket
1236 * indicating a network interface has had addresses associated with it.
1237 * if we ever reverse the logic and replace messages TO the routing
1238 * socket indicate a request to configure interfaces, then it will
1239 * be unnecessary as the routing socket will automatically generate
1242 * Since this is coming from the interface, it is expected that the
1243 * interface will be locked. Caller must hold rnh_lock and rt_lock.
1246 rt_newaddrmsg(int cmd
, struct ifaddr
*ifa
, int error
, struct rtentry
*rt
)
1248 struct rt_addrinfo info
;
1249 struct sockaddr
*sa
= 0;
1252 struct ifnet
*ifp
= ifa
->ifa_ifp
;
1253 struct sockproto route_proto
= { PF_ROUTE
, 0 };
1255 lck_mtx_assert(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
1256 RT_LOCK_ASSERT_HELD(rt
);
1258 if (route_cb
.any_count
== 0)
1261 /* Become a regular mutex, just in case */
1262 RT_CONVERT_LOCK(rt
);
1263 for (pass
= 1; pass
< 3; pass
++) {
1264 bzero((caddr_t
)&info
, sizeof (info
));
1265 if ((cmd
== RTM_ADD
&& pass
== 1) ||
1266 (cmd
== RTM_DELETE
&& pass
== 2)) {
1267 struct ifa_msghdr
*ifam
;
1268 int ncmd
= cmd
== RTM_ADD
? RTM_NEWADDR
: RTM_DELADDR
;
1270 /* Lock ifp for if_lladdr */
1271 ifnet_lock_shared(ifp
);
1273 info
.rti_info
[RTAX_IFA
] = sa
= ifa
->ifa_addr
;
1275 * Holding ifnet lock here prevents the link address
1276 * from changing contents, so no need to hold its
1277 * lock. The link address is always present; it's
1280 info
.rti_info
[RTAX_IFP
] = ifp
->if_lladdr
->ifa_addr
;
1281 info
.rti_info
[RTAX_NETMASK
] = ifa
->ifa_netmask
;
1282 info
.rti_info
[RTAX_BRD
] = ifa
->ifa_dstaddr
;
1283 if ((m
= rt_msg1(ncmd
, &info
)) == NULL
) {
1285 ifnet_lock_done(ifp
);
1289 ifnet_lock_done(ifp
);
1290 ifam
= mtod(m
, struct ifa_msghdr
*);
1291 ifam
->ifam_index
= ifp
->if_index
;
1293 ifam
->ifam_metric
= ifa
->ifa_metric
;
1294 ifam
->ifam_flags
= ifa
->ifa_flags
;
1296 ifam
->ifam_addrs
= info
.rti_addrs
;
1298 if ((cmd
== RTM_ADD
&& pass
== 2) ||
1299 (cmd
== RTM_DELETE
&& pass
== 1)) {
1300 struct rt_msghdr
*rtm
;
1304 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
1305 info
.rti_info
[RTAX_DST
] = sa
= rt_key(rt
);
1306 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
1307 if ((m
= rt_msg1(cmd
, &info
)) == NULL
)
1309 rtm
= mtod(m
, struct rt_msghdr
*);
1310 rtm
->rtm_index
= ifp
->if_index
;
1311 rtm
->rtm_flags
|= rt
->rt_flags
;
1312 rtm
->rtm_errno
= error
;
1313 rtm
->rtm_addrs
= info
.rti_addrs
;
1315 route_proto
.sp_protocol
= sa
? sa
->sa_family
: 0;
1316 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
1321 * This is the analogue to the rt_newaddrmsg which performs the same
1322 * function but for multicast group memberhips. This is easier since
1323 * there is no route state to worry about.
1326 rt_newmaddrmsg(int cmd
, struct ifmultiaddr
*ifma
)
1328 struct rt_addrinfo info
;
1330 struct ifnet
*ifp
= ifma
->ifma_ifp
;
1331 struct ifma_msghdr
*ifmam
;
1332 struct sockproto route_proto
= { PF_ROUTE
, 0 };
1334 if (route_cb
.any_count
== 0)
1337 /* Lock ifp for if_lladdr */
1338 ifnet_lock_shared(ifp
);
1339 bzero((caddr_t
)&info
, sizeof (info
));
1341 info
.rti_info
[RTAX_IFA
] = ifma
->ifma_addr
;
1342 /* lladdr doesn't need lock */
1343 info
.rti_info
[RTAX_IFP
] = ifp
->if_lladdr
->ifa_addr
;
1346 * If a link-layer address is present, present it as a ``gateway''
1347 * (similarly to how ARP entries, e.g., are presented).
1349 info
.rti_info
[RTAX_GATEWAY
] = (ifma
->ifma_ll
!= NULL
) ?
1350 ifma
->ifma_ll
->ifma_addr
: NULL
;
1351 if ((m
= rt_msg1(cmd
, &info
)) == NULL
) {
1353 ifnet_lock_done(ifp
);
1356 ifmam
= mtod(m
, struct ifma_msghdr
*);
1357 ifmam
->ifmam_index
= ifp
->if_index
;
1358 ifmam
->ifmam_addrs
= info
.rti_addrs
;
1359 route_proto
.sp_protocol
= ifma
->ifma_addr
->sa_family
;
1361 ifnet_lock_done(ifp
);
1362 raw_input(m
, &route_proto
, &route_src
, &route_dst
);
1368 const char *c
= "RTM_?";
1419 case RTM_GET_SILENT
:
1420 c
= "RTM_GET_SILENT";
1426 c
= "RTM_NEWMADDR2";
1440 * This is used in dumping the kernel table via sysctl().
1443 sysctl_dumpentry(struct radix_node
*rn
, void *vw
)
1445 struct walkarg
*w
= vw
;
1446 struct rtentry
*rt
= (struct rtentry
*)rn
;
1447 int error
= 0, size
;
1448 struct rt_addrinfo info
;
1451 cred
= kauth_cred_proc_ref(current_proc());
1454 if (w
->w_op
== NET_RT_FLAGS
&& !(rt
->rt_flags
& w
->w_arg
))
1456 bzero((caddr_t
)&info
, sizeof (info
));
1457 info
.rti_info
[RTAX_DST
] = rt_key(rt
);
1458 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
1459 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
1460 info
.rti_info
[RTAX_GENMASK
] = rt
->rt_genmask
;
1462 if (w
->w_op
!= NET_RT_DUMP2
) {
1463 size
= rt_msg2(RTM_GET
, &info
, NULL
, w
, &cred
);
1464 if (w
->w_req
!= NULL
&& w
->w_tmem
!= NULL
) {
1465 struct rt_msghdr
*rtm
=
1466 (struct rt_msghdr
*)(void *)w
->w_tmem
;
1468 rtm
->rtm_flags
= rt
->rt_flags
;
1469 rtm
->rtm_use
= rt
->rt_use
;
1470 rt_getmetrics(rt
, &rtm
->rtm_rmx
);
1471 rtm
->rtm_index
= rt
->rt_ifp
->if_index
;
1475 rtm
->rtm_addrs
= info
.rti_addrs
;
1476 error
= SYSCTL_OUT(w
->w_req
, (caddr_t
)rtm
, size
);
1479 size
= rt_msg2(RTM_GET2
, &info
, NULL
, w
, &cred
);
1480 if (w
->w_req
!= NULL
&& w
->w_tmem
!= NULL
) {
1481 struct rt_msghdr2
*rtm
=
1482 (struct rt_msghdr2
*)(void *)w
->w_tmem
;
1484 rtm
->rtm_flags
= rt
->rt_flags
;
1485 rtm
->rtm_use
= rt
->rt_use
;
1486 rt_getmetrics(rt
, &rtm
->rtm_rmx
);
1487 rtm
->rtm_index
= rt
->rt_ifp
->if_index
;
1488 rtm
->rtm_refcnt
= rt
->rt_refcnt
;
1490 rtm
->rtm_parentflags
= rt
->rt_parent
->rt_flags
;
1492 rtm
->rtm_parentflags
= 0;
1493 rtm
->rtm_reserved
= 0;
1494 rtm
->rtm_addrs
= info
.rti_addrs
;
1495 error
= SYSCTL_OUT(w
->w_req
, (caddr_t
)rtm
, size
);
1501 kauth_cred_unref(&cred
);
1506 * This is used for dumping extended information from route entries.
1509 sysctl_dumpentry_ext(struct radix_node
*rn
, void *vw
)
1511 struct walkarg
*w
= vw
;
1512 struct rtentry
*rt
= (struct rtentry
*)rn
;
1513 int error
= 0, size
;
1514 struct rt_addrinfo info
;
1517 cred
= kauth_cred_proc_ref(current_proc());
1520 if (w
->w_op
== NET_RT_DUMPX_FLAGS
&& !(rt
->rt_flags
& w
->w_arg
))
1522 bzero(&info
, sizeof (info
));
1523 info
.rti_info
[RTAX_DST
] = rt_key(rt
);
1524 info
.rti_info
[RTAX_GATEWAY
] = rt
->rt_gateway
;
1525 info
.rti_info
[RTAX_NETMASK
] = rt_mask(rt
);
1526 info
.rti_info
[RTAX_GENMASK
] = rt
->rt_genmask
;
1528 size
= rt_msg2(RTM_GET_EXT
, &info
, NULL
, w
, &cred
);
1529 if (w
->w_req
!= NULL
&& w
->w_tmem
!= NULL
) {
1530 struct rt_msghdr_ext
*ertm
=
1531 (struct rt_msghdr_ext
*)(void *)w
->w_tmem
;
1533 ertm
->rtm_flags
= rt
->rt_flags
;
1534 ertm
->rtm_use
= rt
->rt_use
;
1535 rt_getmetrics(rt
, &ertm
->rtm_rmx
);
1536 ertm
->rtm_index
= rt
->rt_ifp
->if_index
;
1539 ertm
->rtm_errno
= 0;
1540 ertm
->rtm_addrs
= info
.rti_addrs
;
1541 if (rt
->rt_llinfo_get_ri
== NULL
) {
1542 bzero(&ertm
->rtm_ri
, sizeof (ertm
->rtm_ri
));
1543 ertm
->rtm_ri
.ri_rssi
= IFNET_RSSI_UNKNOWN
;
1544 ertm
->rtm_ri
.ri_lqm
= IFNET_LQM_THRESH_OFF
;
1545 ertm
->rtm_ri
.ri_npm
= IFNET_NPM_THRESH_UNKNOWN
;
1547 rt
->rt_llinfo_get_ri(rt
, &ertm
->rtm_ri
);
1549 error
= SYSCTL_OUT(w
->w_req
, (caddr_t
)ertm
, size
);
1554 kauth_cred_unref(&cred
);
1560 * To avoid to call copyout() while holding locks and to cause problems
1561 * in the paging path, sysctl_iflist() and sysctl_iflist2() contstruct
1562 * the list in two passes. In the first pass we compute the total
1563 * length of the data we are going to copyout, then we release
1564 * all locks to allocate a temporary buffer that gets filled
1565 * in the second pass.
1567 * Note that we are verifying the assumption that _MALLOC returns a buffer
1568 * that is at least 32 bits aligned and that the messages and addresses are
1572 sysctl_iflist(int af
, struct walkarg
*w
)
1576 struct rt_addrinfo info
;
1579 int total_len
= 0, current_len
= 0;
1580 char *total_buffer
= NULL
, *cp
= NULL
;
1583 cred
= kauth_cred_proc_ref(current_proc());
1585 bzero((caddr_t
)&info
, sizeof (info
));
1587 for (pass
= 0; pass
< 2; pass
++) {
1588 ifnet_head_lock_shared();
1590 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
1593 if (w
->w_arg
&& w
->w_arg
!= ifp
->if_index
)
1595 ifnet_lock_shared(ifp
);
1597 * Holding ifnet lock here prevents the link address
1598 * from changing contents, so no need to hold the ifa
1599 * lock. The link address is always present; it's
1602 ifa
= ifp
->if_lladdr
;
1603 info
.rti_info
[RTAX_IFP
] = ifa
->ifa_addr
;
1604 len
= rt_msg2(RTM_IFINFO
, &info
, NULL
, NULL
, &cred
);
1608 struct if_msghdr
*ifm
;
1610 if (current_len
+ len
> total_len
) {
1611 ifnet_lock_done(ifp
);
1615 info
.rti_info
[RTAX_IFP
] = ifa
->ifa_addr
;
1616 len
= rt_msg2(RTM_IFINFO
, &info
,
1617 (caddr_t
)cp
, NULL
, &cred
);
1618 info
.rti_info
[RTAX_IFP
] = NULL
;
1620 ifm
= (struct if_msghdr
*)(void *)cp
;
1621 ifm
->ifm_index
= ifp
->if_index
;
1622 ifm
->ifm_flags
= (u_short
)ifp
->if_flags
;
1623 if_data_internal_to_if_data(ifp
, &ifp
->if_data
,
1625 ifm
->ifm_addrs
= info
.rti_addrs
;
1628 VERIFY(IS_P2ALIGNED(cp
, sizeof (u_int32_t
)));
1631 while ((ifa
= ifa
->ifa_link
.tqe_next
) != NULL
) {
1633 if (af
&& af
!= ifa
->ifa_addr
->sa_family
) {
1637 info
.rti_info
[RTAX_IFA
] = ifa
->ifa_addr
;
1638 info
.rti_info
[RTAX_NETMASK
] = ifa
->ifa_netmask
;
1639 info
.rti_info
[RTAX_BRD
] = ifa
->ifa_dstaddr
;
1640 len
= rt_msg2(RTM_NEWADDR
, &info
, NULL
, NULL
,
1645 struct ifa_msghdr
*ifam
;
1647 if (current_len
+ len
> total_len
) {
1652 len
= rt_msg2(RTM_NEWADDR
, &info
,
1653 (caddr_t
)cp
, NULL
, &cred
);
1655 ifam
= (struct ifa_msghdr
*)(void *)cp
;
1657 ifa
->ifa_ifp
->if_index
;
1658 ifam
->ifam_flags
= ifa
->ifa_flags
;
1659 ifam
->ifam_metric
= ifa
->ifa_metric
;
1660 ifam
->ifam_addrs
= info
.rti_addrs
;
1663 VERIFY(IS_P2ALIGNED(cp
,
1664 sizeof (u_int32_t
)));
1669 ifnet_lock_done(ifp
);
1670 info
.rti_info
[RTAX_IFA
] = info
.rti_info
[RTAX_NETMASK
] =
1671 info
.rti_info
[RTAX_BRD
] = NULL
;
1677 if (error
== ENOBUFS
)
1678 printf("%s: current_len (%d) + len (%d) > "
1679 "total_len (%d)\n", __func__
, current_len
,
1685 /* Better to return zero length buffer than ENOBUFS */
1688 total_len
+= total_len
>> 3;
1689 total_buffer
= _MALLOC(total_len
, M_RTABLE
,
1691 if (total_buffer
== NULL
) {
1692 printf("%s: _MALLOC(%d) failed\n", __func__
,
1698 VERIFY(IS_P2ALIGNED(cp
, sizeof (u_int32_t
)));
1700 error
= SYSCTL_OUT(w
->w_req
, total_buffer
, current_len
);
1706 if (total_buffer
!= NULL
)
1707 _FREE(total_buffer
, M_RTABLE
);
1709 kauth_cred_unref(&cred
);
1714 sysctl_iflist2(int af
, struct walkarg
*w
)
1718 struct rt_addrinfo info
;
1721 int total_len
= 0, current_len
= 0;
1722 char *total_buffer
= NULL
, *cp
= NULL
;
1725 cred
= kauth_cred_proc_ref(current_proc());
1727 bzero((caddr_t
)&info
, sizeof (info
));
1729 for (pass
= 0; pass
< 2; pass
++) {
1730 struct ifmultiaddr
*ifma
;
1732 ifnet_head_lock_shared();
1734 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
1737 if (w
->w_arg
&& w
->w_arg
!= ifp
->if_index
)
1739 ifnet_lock_shared(ifp
);
1741 * Holding ifnet lock here prevents the link address
1742 * from changing contents, so no need to hold the ifa
1743 * lock. The link address is always present; it's
1746 ifa
= ifp
->if_lladdr
;
1747 info
.rti_info
[RTAX_IFP
] = ifa
->ifa_addr
;
1748 len
= rt_msg2(RTM_IFINFO2
, &info
, NULL
, NULL
, &cred
);
1752 struct if_msghdr2
*ifm
;
1754 if (current_len
+ len
> total_len
) {
1755 ifnet_lock_done(ifp
);
1759 info
.rti_info
[RTAX_IFP
] = ifa
->ifa_addr
;
1760 len
= rt_msg2(RTM_IFINFO2
, &info
,
1761 (caddr_t
)cp
, NULL
, &cred
);
1762 info
.rti_info
[RTAX_IFP
] = NULL
;
1764 ifm
= (struct if_msghdr2
*)(void *)cp
;
1765 ifm
->ifm_addrs
= info
.rti_addrs
;
1766 ifm
->ifm_flags
= (u_short
)ifp
->if_flags
;
1767 ifm
->ifm_index
= ifp
->if_index
;
1768 ifm
->ifm_snd_len
= IFCQ_LEN(&ifp
->if_snd
);
1769 ifm
->ifm_snd_maxlen
= IFCQ_MAXLEN(&ifp
->if_snd
);
1770 ifm
->ifm_snd_drops
=
1771 ifp
->if_snd
.ifcq_dropcnt
.packets
;
1772 ifm
->ifm_timer
= ifp
->if_timer
;
1773 if_data_internal_to_if_data64(ifp
,
1774 &ifp
->if_data
, &ifm
->ifm_data
);
1777 VERIFY(IS_P2ALIGNED(cp
, sizeof (u_int32_t
)));
1780 while ((ifa
= ifa
->ifa_link
.tqe_next
) != NULL
) {
1782 if (af
&& af
!= ifa
->ifa_addr
->sa_family
) {
1786 info
.rti_info
[RTAX_IFA
] = ifa
->ifa_addr
;
1787 info
.rti_info
[RTAX_NETMASK
] = ifa
->ifa_netmask
;
1788 info
.rti_info
[RTAX_BRD
] = ifa
->ifa_dstaddr
;
1789 len
= rt_msg2(RTM_NEWADDR
, &info
, NULL
, NULL
,
1794 struct ifa_msghdr
*ifam
;
1796 if (current_len
+ len
> total_len
) {
1801 len
= rt_msg2(RTM_NEWADDR
, &info
,
1802 (caddr_t
)cp
, NULL
, &cred
);
1804 ifam
= (struct ifa_msghdr
*)(void *)cp
;
1806 ifa
->ifa_ifp
->if_index
;
1807 ifam
->ifam_flags
= ifa
->ifa_flags
;
1808 ifam
->ifam_metric
= ifa
->ifa_metric
;
1809 ifam
->ifam_addrs
= info
.rti_addrs
;
1812 VERIFY(IS_P2ALIGNED(cp
,
1813 sizeof (u_int32_t
)));
1819 ifnet_lock_done(ifp
);
1823 for (ifma
= LIST_FIRST(&ifp
->if_multiaddrs
);
1824 ifma
!= NULL
; ifma
= LIST_NEXT(ifma
, ifma_link
)) {
1825 struct ifaddr
*ifa0
;
1828 if (af
&& af
!= ifma
->ifma_addr
->sa_family
) {
1832 bzero((caddr_t
)&info
, sizeof (info
));
1833 info
.rti_info
[RTAX_IFA
] = ifma
->ifma_addr
;
1835 * Holding ifnet lock here prevents the link
1836 * address from changing contents, so no need
1837 * to hold the ifa0 lock. The link address is
1838 * always present; it's never freed.
1840 ifa0
= ifp
->if_lladdr
;
1841 info
.rti_info
[RTAX_IFP
] = ifa0
->ifa_addr
;
1842 if (ifma
->ifma_ll
!= NULL
)
1843 info
.rti_info
[RTAX_GATEWAY
] =
1844 ifma
->ifma_ll
->ifma_addr
;
1845 len
= rt_msg2(RTM_NEWMADDR2
, &info
, NULL
, NULL
,
1850 struct ifma_msghdr2
*ifmam
;
1852 if (current_len
+ len
> total_len
) {
1857 len
= rt_msg2(RTM_NEWMADDR2
, &info
,
1858 (caddr_t
)cp
, NULL
, &cred
);
1861 (struct ifma_msghdr2
*)(void *)cp
;
1862 ifmam
->ifmam_addrs
= info
.rti_addrs
;
1863 ifmam
->ifmam_flags
= 0;
1864 ifmam
->ifmam_index
=
1865 ifma
->ifma_ifp
->if_index
;
1866 ifmam
->ifmam_refcount
=
1870 VERIFY(IS_P2ALIGNED(cp
,
1871 sizeof (u_int32_t
)));
1876 ifnet_lock_done(ifp
);
1877 info
.rti_info
[RTAX_IFA
] = info
.rti_info
[RTAX_NETMASK
] =
1878 info
.rti_info
[RTAX_BRD
] = NULL
;
1883 if (error
== ENOBUFS
)
1884 printf("%s: current_len (%d) + len (%d) > "
1885 "total_len (%d)\n", __func__
, current_len
,
1891 /* Better to return zero length buffer than ENOBUFS */
1894 total_len
+= total_len
>> 3;
1895 total_buffer
= _MALLOC(total_len
, M_RTABLE
,
1897 if (total_buffer
== NULL
) {
1898 printf("%s: _MALLOC(%d) failed\n", __func__
,
1904 VERIFY(IS_P2ALIGNED(cp
, sizeof (u_int32_t
)));
1906 error
= SYSCTL_OUT(w
->w_req
, total_buffer
, current_len
);
1912 if (total_buffer
!= NULL
)
1913 _FREE(total_buffer
, M_RTABLE
);
1915 kauth_cred_unref(&cred
);
1921 sysctl_rtstat(struct sysctl_req
*req
)
1923 return (SYSCTL_OUT(req
, &rtstat
, sizeof (struct rtstat
)));
1927 sysctl_rttrash(struct sysctl_req
*req
)
1929 return (SYSCTL_OUT(req
, &rttrash
, sizeof (rttrash
)));
1933 sysctl_rtsock SYSCTL_HANDLER_ARGS
1935 #pragma unused(oidp)
1936 int *name
= (int *)arg1
;
1937 u_int namelen
= arg2
;
1938 struct radix_node_head
*rnh
;
1939 int i
, error
= EINVAL
;
1950 Bzero(&w
, sizeof (w
));
1960 lck_mtx_lock(rnh_lock
);
1961 for (i
= 1; i
<= AF_MAX
; i
++)
1962 if ((rnh
= rt_tables
[i
]) && (af
== 0 || af
== i
) &&
1963 (error
= rnh
->rnh_walktree(rnh
,
1964 sysctl_dumpentry
, &w
)))
1966 lck_mtx_unlock(rnh_lock
);
1969 case NET_RT_DUMPX_FLAGS
:
1970 lck_mtx_lock(rnh_lock
);
1971 for (i
= 1; i
<= AF_MAX
; i
++)
1972 if ((rnh
= rt_tables
[i
]) && (af
== 0 || af
== i
) &&
1973 (error
= rnh
->rnh_walktree(rnh
,
1974 sysctl_dumpentry_ext
, &w
)))
1976 lck_mtx_unlock(rnh_lock
);
1979 error
= sysctl_iflist(af
, &w
);
1981 case NET_RT_IFLIST2
:
1982 error
= sysctl_iflist2(af
, &w
);
1985 error
= sysctl_rtstat(req
);
1988 error
= sysctl_rttrash(req
);
1991 if (w
.w_tmem
!= NULL
)
1992 FREE(w
.w_tmem
, M_RTABLE
);
1997 * Definitions of protocols supported in the ROUTE domain.
1999 static struct protosw routesw
[] = {
2001 .pr_type
= SOCK_RAW
,
2003 .pr_flags
= PR_ATOMIC
|PR_ADDR
,
2004 .pr_output
= route_output
,
2005 .pr_ctlinput
= raw_ctlinput
,
2006 .pr_init
= raw_init
,
2007 .pr_usrreqs
= &route_usrreqs
,
2011 static int route_proto_count
= (sizeof (routesw
) / sizeof (struct protosw
));
2013 struct domain routedomain_s
= {
2014 .dom_family
= PF_ROUTE
,
2015 .dom_name
= "route",
2016 .dom_init
= route_dinit
,
2020 route_dinit(struct domain
*dp
)
2025 VERIFY(!(dp
->dom_flags
& DOM_INITIALIZED
));
2026 VERIFY(routedomain
== NULL
);
2030 for (i
= 0, pr
= &routesw
[0]; i
< route_proto_count
; i
++, pr
++)
2031 net_add_proto(pr
, dp
, 1);