]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/rtsock.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / bsd / net / rtsock.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1988, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)rtsock.c 8.5 (Berkeley) 11/2/94
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kauth.h>
66 #include <sys/kernel.h>
67 #include <sys/sysctl.h>
68 #include <sys/proc.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/domain.h>
74 #include <sys/protosw.h>
75 #include <sys/syslog.h>
76 #include <sys/mcache.h>
77 #include <kern/locks.h>
78 #include <sys/codesign.h>
79
80 #include <net/if.h>
81 #include <net/route.h>
82 #include <net/dlil.h>
83 #include <net/raw_cb.h>
84 #include <netinet/in.h>
85 #include <netinet/in_var.h>
86 #include <netinet/in_arp.h>
87 #include <netinet6/nd6.h>
88
89 extern struct rtstat rtstat;
90 extern struct domain routedomain_s;
91 static struct domain *routedomain = NULL;
92
93 MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
94
95 static struct sockaddr route_dst = { 2, PF_ROUTE, { 0, } };
96 static struct sockaddr route_src = { 2, PF_ROUTE, { 0, } };
97 static struct sockaddr sa_zero = { sizeof (sa_zero), AF_INET, { 0, } };
98
99 struct route_cb {
100 u_int32_t ip_count; /* attached w/ AF_INET */
101 u_int32_t ip6_count; /* attached w/ AF_INET6 */
102 u_int32_t any_count; /* total attached */
103 };
104
105 static struct route_cb route_cb;
106
107 struct walkarg {
108 int w_tmemsize;
109 int w_op, w_arg;
110 caddr_t w_tmem;
111 struct sysctl_req *w_req;
112 };
113
114 static void route_dinit(struct domain *);
115 static int rts_abort(struct socket *);
116 static int rts_attach(struct socket *, int, struct proc *);
117 static int rts_bind(struct socket *, struct sockaddr *, struct proc *);
118 static int rts_connect(struct socket *, struct sockaddr *, struct proc *);
119 static int rts_detach(struct socket *);
120 static int rts_disconnect(struct socket *);
121 static int rts_peeraddr(struct socket *, struct sockaddr **);
122 static int rts_send(struct socket *, int, struct mbuf *, struct sockaddr *,
123 struct mbuf *, struct proc *);
124 static int rts_shutdown(struct socket *);
125 static int rts_sockaddr(struct socket *, struct sockaddr **);
126
127 static int route_output(struct mbuf *, struct socket *);
128 static int rt_setmetrics(u_int32_t, struct rt_metrics *, struct rtentry *);
129 static void rt_getmetrics(struct rtentry *, struct rt_metrics *);
130 static void rt_setif(struct rtentry *, struct sockaddr *, struct sockaddr *,
131 struct sockaddr *, unsigned int);
132 static int rt_xaddrs(caddr_t, caddr_t, struct rt_addrinfo *);
133 static struct mbuf *rt_msg1(int, struct rt_addrinfo *);
134 static int rt_msg2(int, struct rt_addrinfo *, caddr_t, struct walkarg *,
135 kauth_cred_t *);
136 static int sysctl_dumpentry(struct radix_node *rn, void *vw);
137 static int sysctl_dumpentry_ext(struct radix_node *rn, void *vw);
138 static int sysctl_iflist(int af, struct walkarg *w);
139 static int sysctl_iflist2(int af, struct walkarg *w);
140 static int sysctl_rtstat(struct sysctl_req *);
141 static int sysctl_rttrash(struct sysctl_req *);
142 static int sysctl_rtsock SYSCTL_HANDLER_ARGS;
143
144 SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD | CTLFLAG_LOCKED,
145 sysctl_rtsock, "");
146
147 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "routing");
148
149 /* Align x to 1024 (only power of 2) assuming x is positive */
150 #define ALIGN_BYTES(x) do { \
151 x = P2ALIGN(x, 1024); \
152 } while(0)
153
154 #define ROUNDUP32(a) \
155 ((a) > 0 ? (1 + (((a) - 1) | (sizeof (uint32_t) - 1))) : \
156 sizeof (uint32_t))
157
158 #define ADVANCE32(x, n) \
159 (x += ROUNDUP32((n)->sa_len))
160
161 /*
162 * It really doesn't make any sense at all for this code to share much
163 * with raw_usrreq.c, since its functionality is so restricted. XXX
164 */
165 static int
166 rts_abort(struct socket *so)
167 {
168 return (raw_usrreqs.pru_abort(so));
169 }
170
171 /* pru_accept is EOPNOTSUPP */
172
173 static int
174 rts_attach(struct socket *so, int proto, struct proc *p)
175 {
176 #pragma unused(p)
177 struct rawcb *rp;
178 int error;
179
180 VERIFY(so->so_pcb == NULL);
181
182 MALLOC(rp, struct rawcb *, sizeof (*rp), M_PCB, M_WAITOK | M_ZERO);
183 if (rp == NULL)
184 return (ENOBUFS);
185
186 so->so_pcb = (caddr_t)rp;
187 /* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */
188 error = raw_attach(so, proto);
189 rp = sotorawcb(so);
190 if (error) {
191 FREE(rp, M_PCB);
192 so->so_pcb = NULL;
193 so->so_flags |= SOF_PCBCLEARING;
194 return (error);
195 }
196
197 switch (rp->rcb_proto.sp_protocol) {
198 case AF_INET:
199 atomic_add_32(&route_cb.ip_count, 1);
200 break;
201 case AF_INET6:
202 atomic_add_32(&route_cb.ip6_count, 1);
203 break;
204 }
205 rp->rcb_faddr = &route_src;
206 atomic_add_32(&route_cb.any_count, 1);
207 /* the socket is already locked when we enter rts_attach */
208 soisconnected(so);
209 so->so_options |= SO_USELOOPBACK;
210 return (0);
211 }
212
213 static int
214 rts_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
215 {
216 return (raw_usrreqs.pru_bind(so, nam, p)); /* xxx just EINVAL */
217 }
218
219 static int
220 rts_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
221 {
222 return (raw_usrreqs.pru_connect(so, nam, p)); /* XXX just EINVAL */
223 }
224
225 /* pru_connect2 is EOPNOTSUPP */
226 /* pru_control is EOPNOTSUPP */
227
228 static int
229 rts_detach(struct socket *so)
230 {
231 struct rawcb *rp = sotorawcb(so);
232
233 VERIFY(rp != NULL);
234
235 switch (rp->rcb_proto.sp_protocol) {
236 case AF_INET:
237 atomic_add_32(&route_cb.ip_count, -1);
238 break;
239 case AF_INET6:
240 atomic_add_32(&route_cb.ip6_count, -1);
241 break;
242 }
243 atomic_add_32(&route_cb.any_count, -1);
244 return (raw_usrreqs.pru_detach(so));
245 }
246
247 static int
248 rts_disconnect(struct socket *so)
249 {
250 return (raw_usrreqs.pru_disconnect(so));
251 }
252
253 /* pru_listen is EOPNOTSUPP */
254
255 static int
256 rts_peeraddr(struct socket *so, struct sockaddr **nam)
257 {
258 return (raw_usrreqs.pru_peeraddr(so, nam));
259 }
260
261 /* pru_rcvd is EOPNOTSUPP */
262 /* pru_rcvoob is EOPNOTSUPP */
263
264 static int
265 rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
266 struct mbuf *control, struct proc *p)
267 {
268 return (raw_usrreqs.pru_send(so, flags, m, nam, control, p));
269 }
270
271 /* pru_sense is null */
272
273 static int
274 rts_shutdown(struct socket *so)
275 {
276 return (raw_usrreqs.pru_shutdown(so));
277 }
278
279 static int
280 rts_sockaddr(struct socket *so, struct sockaddr **nam)
281 {
282 return (raw_usrreqs.pru_sockaddr(so, nam));
283 }
284
285 static struct pr_usrreqs route_usrreqs = {
286 .pru_abort = rts_abort,
287 .pru_attach = rts_attach,
288 .pru_bind = rts_bind,
289 .pru_connect = rts_connect,
290 .pru_detach = rts_detach,
291 .pru_disconnect = rts_disconnect,
292 .pru_peeraddr = rts_peeraddr,
293 .pru_send = rts_send,
294 .pru_shutdown = rts_shutdown,
295 .pru_sockaddr = rts_sockaddr,
296 .pru_sosend = sosend,
297 .pru_soreceive = soreceive,
298 };
299
300 /*ARGSUSED*/
301 static int
302 route_output(struct mbuf *m, struct socket *so)
303 {
304 struct rt_msghdr *rtm = NULL;
305 struct rtentry *rt = NULL;
306 struct rtentry *saved_nrt = NULL;
307 struct radix_node_head *rnh;
308 struct rt_addrinfo info;
309 int len, error = 0;
310 sa_family_t dst_sa_family = 0;
311 struct ifnet *ifp = NULL;
312 struct sockaddr_in dst_in, gate_in;
313 int sendonlytoself = 0;
314 unsigned int ifscope = IFSCOPE_NONE;
315 struct rawcb *rp = NULL;
316 boolean_t is_router = FALSE;
317 #define senderr(e) { error = (e); goto flush; }
318 if (m == NULL || ((m->m_len < sizeof (intptr_t)) &&
319 (m = m_pullup(m, sizeof (intptr_t))) == NULL))
320 return (ENOBUFS);
321 VERIFY(m->m_flags & M_PKTHDR);
322
323 /*
324 * Unlock the socket (but keep a reference) it won't be
325 * accessed until raw_input appends to it.
326 */
327 socket_unlock(so, 0);
328 lck_mtx_lock(rnh_lock);
329
330 len = m->m_pkthdr.len;
331 if (len < sizeof (*rtm) ||
332 len != mtod(m, struct rt_msghdr *)->rtm_msglen) {
333 info.rti_info[RTAX_DST] = NULL;
334 senderr(EINVAL);
335 }
336 R_Malloc(rtm, struct rt_msghdr *, len);
337 if (rtm == NULL) {
338 info.rti_info[RTAX_DST] = NULL;
339 senderr(ENOBUFS);
340 }
341 m_copydata(m, 0, len, (caddr_t)rtm);
342 if (rtm->rtm_version != RTM_VERSION) {
343 info.rti_info[RTAX_DST] = NULL;
344 senderr(EPROTONOSUPPORT);
345 }
346
347 /*
348 * Silent version of RTM_GET for Reachabiltiy APIs. We may change
349 * all RTM_GETs to be silent in the future, so this is private for now.
350 */
351 if (rtm->rtm_type == RTM_GET_SILENT) {
352 if (!(so->so_options & SO_USELOOPBACK))
353 senderr(EINVAL);
354 sendonlytoself = 1;
355 rtm->rtm_type = RTM_GET;
356 }
357
358 /*
359 * Perform permission checking, only privileged sockets
360 * may perform operations other than RTM_GET
361 */
362 if (rtm->rtm_type != RTM_GET && !(so->so_state & SS_PRIV)) {
363 info.rti_info[RTAX_DST] = NULL;
364 senderr(EPERM);
365 }
366
367 rtm->rtm_pid = proc_selfpid();
368 info.rti_addrs = rtm->rtm_addrs;
369 if (rt_xaddrs((caddr_t)(rtm + 1), len + (caddr_t)rtm, &info)) {
370 info.rti_info[RTAX_DST] = NULL;
371 senderr(EINVAL);
372 }
373 if (info.rti_info[RTAX_DST] == NULL ||
374 info.rti_info[RTAX_DST]->sa_family >= AF_MAX ||
375 (info.rti_info[RTAX_GATEWAY] != NULL &&
376 info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX))
377 senderr(EINVAL);
378
379 if (info.rti_info[RTAX_DST]->sa_family == AF_INET &&
380 info.rti_info[RTAX_DST]->sa_len != sizeof (dst_in)) {
381 /* At minimum, we need up to sin_addr */
382 if (info.rti_info[RTAX_DST]->sa_len <
383 offsetof(struct sockaddr_in, sin_zero))
384 senderr(EINVAL);
385 bzero(&dst_in, sizeof (dst_in));
386 dst_in.sin_len = sizeof (dst_in);
387 dst_in.sin_family = AF_INET;
388 dst_in.sin_port = SIN(info.rti_info[RTAX_DST])->sin_port;
389 dst_in.sin_addr = SIN(info.rti_info[RTAX_DST])->sin_addr;
390 info.rti_info[RTAX_DST] = (struct sockaddr *)&dst_in;
391 dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
392 }
393
394 if (info.rti_info[RTAX_GATEWAY] != NULL &&
395 info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET &&
396 info.rti_info[RTAX_GATEWAY]->sa_len != sizeof (gate_in)) {
397 /* At minimum, we need up to sin_addr */
398 if (info.rti_info[RTAX_GATEWAY]->sa_len <
399 offsetof(struct sockaddr_in, sin_zero))
400 senderr(EINVAL);
401 bzero(&gate_in, sizeof (gate_in));
402 gate_in.sin_len = sizeof (gate_in);
403 gate_in.sin_family = AF_INET;
404 gate_in.sin_port = SIN(info.rti_info[RTAX_GATEWAY])->sin_port;
405 gate_in.sin_addr = SIN(info.rti_info[RTAX_GATEWAY])->sin_addr;
406 info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&gate_in;
407 }
408
409 if (info.rti_info[RTAX_GENMASK]) {
410 struct radix_node *t;
411 t = rn_addmask((caddr_t)info.rti_info[RTAX_GENMASK], 0, 1);
412 if (t != NULL && Bcmp(info.rti_info[RTAX_GENMASK],
413 t->rn_key, *(u_char *)info.rti_info[RTAX_GENMASK]) == 0)
414 info.rti_info[RTAX_GENMASK] =
415 (struct sockaddr *)(t->rn_key);
416 else
417 senderr(ENOBUFS);
418 }
419
420 /*
421 * If RTF_IFSCOPE flag is set, then rtm_index specifies the scope.
422 */
423 if (rtm->rtm_flags & RTF_IFSCOPE) {
424 if (info.rti_info[RTAX_DST]->sa_family != AF_INET &&
425 info.rti_info[RTAX_DST]->sa_family != AF_INET6)
426 senderr(EINVAL);
427 ifscope = rtm->rtm_index;
428 }
429 /*
430 * Block changes on INTCOPROC interfaces.
431 */
432 if (ifscope) {
433 unsigned int intcoproc_scope = 0;
434 ifnet_head_lock_shared();
435 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
436 if (IFNET_IS_INTCOPROC(ifp)) {
437 intcoproc_scope = ifp->if_index;
438 break;
439 }
440 }
441 ifnet_head_done();
442 if (intcoproc_scope == ifscope && current_proc()->p_pid != 0)
443 senderr(EINVAL);
444 }
445
446 /*
447 * RTF_PROXY can only be set internally from within the kernel.
448 */
449 if (rtm->rtm_flags & RTF_PROXY)
450 senderr(EINVAL);
451
452 /*
453 * For AF_INET, always zero out the embedded scope ID. If this is
454 * a scoped request, it must be done explicitly by setting RTF_IFSCOPE
455 * flag and the corresponding rtm_index value. This is to prevent
456 * false interpretation of the scope ID because it's using the sin_zero
457 * field, which might not be properly cleared by the requestor.
458 */
459 if (info.rti_info[RTAX_DST]->sa_family == AF_INET)
460 sin_set_ifscope(info.rti_info[RTAX_DST], IFSCOPE_NONE);
461 if (info.rti_info[RTAX_GATEWAY] != NULL &&
462 info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET)
463 sin_set_ifscope(info.rti_info[RTAX_GATEWAY], IFSCOPE_NONE);
464
465 switch (rtm->rtm_type) {
466 case RTM_ADD:
467 if (info.rti_info[RTAX_GATEWAY] == NULL)
468 senderr(EINVAL);
469
470 error = rtrequest_scoped_locked(RTM_ADD,
471 info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
472 info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt,
473 ifscope);
474 if (error == 0 && saved_nrt != NULL) {
475 RT_LOCK(saved_nrt);
476 /*
477 * If the route request specified an interface with
478 * IFA and/or IFP, we set the requested interface on
479 * the route with rt_setif. It would be much better
480 * to do this inside rtrequest, but that would
481 * require passing the desired interface, in some
482 * form, to rtrequest. Since rtrequest is called in
483 * so many places (roughly 40 in our source), adding
484 * a parameter is to much for us to swallow; this is
485 * something for the FreeBSD developers to tackle.
486 * Instead, we let rtrequest compute whatever
487 * interface it wants, then come in behind it and
488 * stick in the interface that we really want. This
489 * works reasonably well except when rtrequest can't
490 * figure out what interface to use (with
491 * ifa_withroute) and returns ENETUNREACH. Ideally
492 * it shouldn't matter if rtrequest can't figure out
493 * the interface if we're going to explicitly set it
494 * ourselves anyway. But practically we can't
495 * recover here because rtrequest will not do any of
496 * the work necessary to add the route if it can't
497 * find an interface. As long as there is a default
498 * route that leads to some interface, rtrequest will
499 * find an interface, so this problem should be
500 * rarely encountered.
501 * dwiggins@bbn.com
502 */
503 rt_setif(saved_nrt,
504 info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA],
505 info.rti_info[RTAX_GATEWAY], ifscope);
506 (void)rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, saved_nrt);
507 saved_nrt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
508 saved_nrt->rt_rmx.rmx_locks |=
509 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
510 saved_nrt->rt_genmask = info.rti_info[RTAX_GENMASK];
511 RT_REMREF_LOCKED(saved_nrt);
512 RT_UNLOCK(saved_nrt);
513 }
514 break;
515
516 case RTM_DELETE:
517 error = rtrequest_scoped_locked(RTM_DELETE,
518 info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
519 info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt,
520 ifscope);
521 if (error == 0) {
522 rt = saved_nrt;
523 RT_LOCK(rt);
524 goto report;
525 }
526 break;
527
528 case RTM_GET:
529 case RTM_CHANGE:
530 case RTM_LOCK:
531 rnh = rt_tables[info.rti_info[RTAX_DST]->sa_family];
532 if (rnh == NULL)
533 senderr(EAFNOSUPPORT);
534 /*
535 * Lookup the best match based on the key-mask pair;
536 * callee adds a reference and checks for root node.
537 */
538 rt = rt_lookup(TRUE, info.rti_info[RTAX_DST],
539 info.rti_info[RTAX_NETMASK], rnh, ifscope);
540 if (rt == NULL)
541 senderr(ESRCH);
542 RT_LOCK(rt);
543
544 /*
545 * Holding rnh_lock here prevents the possibility of
546 * ifa from changing (e.g. in_ifinit), so it is safe
547 * to access its ifa_addr (down below) without locking.
548 */
549 switch (rtm->rtm_type) {
550 case RTM_GET: {
551 kauth_cred_t cred;
552 struct ifaddr *ifa2;
553 report:
554 cred = kauth_cred_proc_ref(current_proc());
555 ifa2 = NULL;
556 RT_LOCK_ASSERT_HELD(rt);
557 info.rti_info[RTAX_DST] = rt_key(rt);
558 dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
559 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
560 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
561 info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
562 if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
563 ifp = rt->rt_ifp;
564 if (ifp != NULL) {
565 ifnet_lock_shared(ifp);
566 ifa2 = ifp->if_lladdr;
567 info.rti_info[RTAX_IFP] =
568 ifa2->ifa_addr;
569 IFA_ADDREF(ifa2);
570 ifnet_lock_done(ifp);
571 info.rti_info[RTAX_IFA] =
572 rt->rt_ifa->ifa_addr;
573 rtm->rtm_index = ifp->if_index;
574 } else {
575 info.rti_info[RTAX_IFP] = NULL;
576 info.rti_info[RTAX_IFA] = NULL;
577 }
578 } else if ((ifp = rt->rt_ifp) != NULL) {
579 rtm->rtm_index = ifp->if_index;
580 }
581 if (ifa2 != NULL)
582 IFA_LOCK(ifa2);
583 len = rt_msg2(rtm->rtm_type, &info, NULL, NULL, &cred);
584 if (ifa2 != NULL)
585 IFA_UNLOCK(ifa2);
586 if (len > rtm->rtm_msglen) {
587 struct rt_msghdr *new_rtm;
588 R_Malloc(new_rtm, struct rt_msghdr *, len);
589 if (new_rtm == NULL) {
590 RT_UNLOCK(rt);
591 if (ifa2 != NULL)
592 IFA_REMREF(ifa2);
593 senderr(ENOBUFS);
594 }
595 Bcopy(rtm, new_rtm, rtm->rtm_msglen);
596 R_Free(rtm); rtm = new_rtm;
597 }
598 if (ifa2 != NULL)
599 IFA_LOCK(ifa2);
600 (void) rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm,
601 NULL, &cred);
602 if (ifa2 != NULL)
603 IFA_UNLOCK(ifa2);
604 rtm->rtm_flags = rt->rt_flags;
605 rt_getmetrics(rt, &rtm->rtm_rmx);
606 rtm->rtm_addrs = info.rti_addrs;
607 if (ifa2 != NULL)
608 IFA_REMREF(ifa2);
609
610 kauth_cred_unref(&cred);
611 break;
612 }
613
614 case RTM_CHANGE:
615 is_router = (rt->rt_flags & RTF_ROUTER) ? TRUE : FALSE;
616
617 if (info.rti_info[RTAX_GATEWAY] != NULL &&
618 (error = rt_setgate(rt, rt_key(rt),
619 info.rti_info[RTAX_GATEWAY]))) {
620 int tmp = error;
621 RT_UNLOCK(rt);
622 senderr(tmp);
623 }
624 /*
625 * If they tried to change things but didn't specify
626 * the required gateway, then just use the old one.
627 * This can happen if the user tries to change the
628 * flags on the default route without changing the
629 * default gateway. Changing flags still doesn't work.
630 */
631 if ((rt->rt_flags & RTF_GATEWAY) &&
632 info.rti_info[RTAX_GATEWAY] == NULL)
633 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
634
635 /*
636 * On Darwin, we call rt_setif which contains the
637 * equivalent to the code found at this very spot
638 * in BSD.
639 */
640 rt_setif(rt,
641 info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA],
642 info.rti_info[RTAX_GATEWAY], ifscope);
643
644 if ((error = rt_setmetrics(rtm->rtm_inits,
645 &rtm->rtm_rmx, rt))) {
646 int tmp = error;
647 RT_UNLOCK(rt);
648 senderr(tmp);
649 }
650 if (info.rti_info[RTAX_GENMASK])
651 rt->rt_genmask = info.rti_info[RTAX_GENMASK];
652
653 /*
654 * Enqueue work item to invoke callback for this route entry
655 * This may not be needed always, but for now issue it anytime
656 * RTM_CHANGE gets called.
657 */
658 route_event_enqueue_nwk_wq_entry(rt, NULL, ROUTE_ENTRY_REFRESH, NULL, TRUE);
659 /*
660 * If the route is for a router, walk the tree to send refresh
661 * event to protocol cloned entries
662 */
663 if (is_router) {
664 struct route_event rt_ev;
665 route_event_init(&rt_ev, rt, NULL, ROUTE_ENTRY_REFRESH);
666 RT_UNLOCK(rt);
667 (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev);
668 RT_LOCK(rt);
669 }
670 /* FALLTHRU */
671 case RTM_LOCK:
672 rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
673 rt->rt_rmx.rmx_locks |=
674 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
675 break;
676 }
677 RT_UNLOCK(rt);
678 break;
679
680 default:
681 senderr(EOPNOTSUPP);
682 }
683 flush:
684 if (rtm != NULL) {
685 if (error)
686 rtm->rtm_errno = error;
687 else
688 rtm->rtm_flags |= RTF_DONE;
689 }
690 if (rt != NULL) {
691 RT_LOCK_ASSERT_NOTHELD(rt);
692 rtfree_locked(rt);
693 }
694 lck_mtx_unlock(rnh_lock);
695
696 /* relock the socket now */
697 socket_lock(so, 0);
698 /*
699 * Check to see if we don't want our own messages.
700 */
701 if (!(so->so_options & SO_USELOOPBACK)) {
702 if (route_cb.any_count <= 1) {
703 if (rtm != NULL)
704 R_Free(rtm);
705 m_freem(m);
706 return (error);
707 }
708 /* There is another listener, so construct message */
709 rp = sotorawcb(so);
710 }
711 if (rtm != NULL) {
712 m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm);
713 if (m->m_pkthdr.len < rtm->rtm_msglen) {
714 m_freem(m);
715 m = NULL;
716 } else if (m->m_pkthdr.len > rtm->rtm_msglen) {
717 m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len);
718 }
719 R_Free(rtm);
720 }
721 if (sendonlytoself && m != NULL) {
722 error = 0;
723 if (sbappendaddr(&so->so_rcv, &route_src, m,
724 NULL, &error) != 0) {
725 sorwakeup(so);
726 }
727 if (error)
728 return (error);
729 } else {
730 struct sockproto route_proto = { PF_ROUTE, 0 };
731 if (rp != NULL)
732 rp->rcb_proto.sp_family = 0; /* Avoid us */
733 if (dst_sa_family != 0)
734 route_proto.sp_protocol = dst_sa_family;
735 if (m != NULL) {
736 socket_unlock(so, 0);
737 raw_input(m, &route_proto, &route_src, &route_dst);
738 socket_lock(so, 0);
739 }
740 if (rp != NULL)
741 rp->rcb_proto.sp_family = PF_ROUTE;
742 }
743 return (error);
744 }
745
746 void
747 rt_setexpire(struct rtentry *rt, uint64_t expiry)
748 {
749 /* set both rt_expire and rmx_expire */
750 rt->rt_expire = expiry;
751 if (expiry) {
752 rt->rt_rmx.rmx_expire = expiry + rt->base_calendartime -
753 rt->base_uptime;
754 } else {
755 rt->rt_rmx.rmx_expire = 0;
756 }
757 }
758
759 static int
760 rt_setmetrics(u_int32_t which, struct rt_metrics *in, struct rtentry *out)
761 {
762 if (!(which & RTV_REFRESH_HOST)) {
763 struct timeval caltime;
764 getmicrotime(&caltime);
765 #define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e;
766 metric(RTV_RPIPE, rmx_recvpipe);
767 metric(RTV_SPIPE, rmx_sendpipe);
768 metric(RTV_SSTHRESH, rmx_ssthresh);
769 metric(RTV_RTT, rmx_rtt);
770 metric(RTV_RTTVAR, rmx_rttvar);
771 metric(RTV_HOPCOUNT, rmx_hopcount);
772 metric(RTV_MTU, rmx_mtu);
773 metric(RTV_EXPIRE, rmx_expire);
774 #undef metric
775 if (out->rt_rmx.rmx_expire > 0) {
776 /* account for system time change */
777 getmicrotime(&caltime);
778 out->base_calendartime +=
779 NET_CALCULATE_CLOCKSKEW(caltime,
780 out->base_calendartime,
781 net_uptime(), out->base_uptime);
782 rt_setexpire(out,
783 out->rt_rmx.rmx_expire -
784 out->base_calendartime +
785 out->base_uptime);
786 } else {
787 rt_setexpire(out, 0);
788 }
789
790 VERIFY(out->rt_expire == 0 || out->rt_rmx.rmx_expire != 0);
791 VERIFY(out->rt_expire != 0 || out->rt_rmx.rmx_expire == 0);
792 } else {
793 /* Only RTV_REFRESH_HOST must be set */
794 if ((which & ~RTV_REFRESH_HOST) ||
795 (out->rt_flags & RTF_STATIC) ||
796 !(out->rt_flags & RTF_LLINFO)) {
797 return (EINVAL);
798 }
799
800 if (out->rt_llinfo_refresh == NULL) {
801 return (ENOTSUP);
802 }
803
804 out->rt_llinfo_refresh(out);
805 }
806 return (0);
807 }
808
809 static void
810 rt_getmetrics(struct rtentry *in, struct rt_metrics *out)
811 {
812 struct timeval caltime;
813
814 VERIFY(in->rt_expire == 0 || in->rt_rmx.rmx_expire != 0);
815 VERIFY(in->rt_expire != 0 || in->rt_rmx.rmx_expire == 0);
816
817 *out = in->rt_rmx;
818
819 if (in->rt_expire != 0) {
820 /* account for system time change */
821 getmicrotime(&caltime);
822
823 in->base_calendartime +=
824 NET_CALCULATE_CLOCKSKEW(caltime,
825 in->base_calendartime, net_uptime(), in->base_uptime);
826
827 out->rmx_expire = in->base_calendartime +
828 in->rt_expire - in->base_uptime;
829 } else {
830 out->rmx_expire = 0;
831 }
832 }
833
834 /*
835 * Set route's interface given info.rti_info[RTAX_IFP],
836 * info.rti_info[RTAX_IFA], and gateway.
837 */
838 static void
839 rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr,
840 struct sockaddr *Gate, unsigned int ifscope)
841 {
842 struct ifaddr *ifa = NULL;
843 struct ifnet *ifp = NULL;
844 void (*ifa_rtrequest)(int, struct rtentry *, struct sockaddr *);
845
846 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
847
848 RT_LOCK_ASSERT_HELD(rt);
849
850 /* Don't update a defunct route */
851 if (rt->rt_flags & RTF_CONDEMNED)
852 return;
853
854 /* Add an extra ref for ourselves */
855 RT_ADDREF_LOCKED(rt);
856
857 /* Become a regular mutex, just in case */
858 RT_CONVERT_LOCK(rt);
859
860 /*
861 * New gateway could require new ifaddr, ifp; flags may also
862 * be different; ifp may be specified by ll sockaddr when
863 * protocol address is ambiguous.
864 */
865 if (Ifpaddr && (ifa = ifa_ifwithnet_scoped(Ifpaddr, ifscope)) &&
866 (ifp = ifa->ifa_ifp) && (Ifaaddr || Gate)) {
867 IFA_REMREF(ifa);
868 ifa = ifaof_ifpforaddr(Ifaaddr ? Ifaaddr : Gate, ifp);
869 } else {
870 if (ifa != NULL) {
871 IFA_REMREF(ifa);
872 ifa = NULL;
873 }
874 if (Ifpaddr && (ifp = if_withname(Ifpaddr))) {
875 if (Gate) {
876 ifa = ifaof_ifpforaddr(Gate, ifp);
877 } else {
878 ifnet_lock_shared(ifp);
879 ifa = TAILQ_FIRST(&ifp->if_addrhead);
880 if (ifa != NULL)
881 IFA_ADDREF(ifa);
882 ifnet_lock_done(ifp);
883 }
884 } else if (Ifaaddr &&
885 (ifa = ifa_ifwithaddr_scoped(Ifaaddr, ifscope))) {
886 ifp = ifa->ifa_ifp;
887 } else if (Gate != NULL) {
888 /*
889 * Safe to drop rt_lock and use rt_key, since holding
890 * rnh_lock here prevents another thread from calling
891 * rt_setgate() on this route. We cannot hold the
892 * lock across ifa_ifwithroute since the lookup done
893 * by that routine may point to the same route.
894 */
895 RT_UNLOCK(rt);
896 if ((ifa = ifa_ifwithroute_scoped_locked(rt->rt_flags,
897 rt_key(rt), Gate, ifscope)) != NULL)
898 ifp = ifa->ifa_ifp;
899 RT_LOCK(rt);
900 /* Don't update a defunct route */
901 if (rt->rt_flags & RTF_CONDEMNED) {
902 if (ifa != NULL)
903 IFA_REMREF(ifa);
904 /* Release extra ref */
905 RT_REMREF_LOCKED(rt);
906 return;
907 }
908 }
909 }
910
911 /* trigger route cache reevaluation */
912 if (rt_key(rt)->sa_family == AF_INET)
913 routegenid_inet_update();
914 #if INET6
915 else if (rt_key(rt)->sa_family == AF_INET6)
916 routegenid_inet6_update();
917 #endif /* INET6 */
918
919 if (ifa != NULL) {
920 struct ifaddr *oifa = rt->rt_ifa;
921 if (oifa != ifa) {
922 if (oifa != NULL) {
923 IFA_LOCK_SPIN(oifa);
924 ifa_rtrequest = oifa->ifa_rtrequest;
925 IFA_UNLOCK(oifa);
926 if (ifa_rtrequest != NULL)
927 ifa_rtrequest(RTM_DELETE, rt, Gate);
928 }
929 rtsetifa(rt, ifa);
930
931 if (rt->rt_ifp != ifp) {
932 /*
933 * Purge any link-layer info caching.
934 */
935 if (rt->rt_llinfo_purge != NULL)
936 rt->rt_llinfo_purge(rt);
937
938 /*
939 * Adjust route ref count for the interfaces.
940 */
941 if (rt->rt_if_ref_fn != NULL) {
942 rt->rt_if_ref_fn(ifp, 1);
943 rt->rt_if_ref_fn(rt->rt_ifp, -1);
944 }
945 }
946 rt->rt_ifp = ifp;
947 /*
948 * If this is the (non-scoped) default route, record
949 * the interface index used for the primary ifscope.
950 */
951 if (rt_primary_default(rt, rt_key(rt))) {
952 set_primary_ifscope(rt_key(rt)->sa_family,
953 rt->rt_ifp->if_index);
954 }
955 /*
956 * If rmx_mtu is not locked, update it
957 * to the MTU used by the new interface.
958 */
959 if (!(rt->rt_rmx.rmx_locks & RTV_MTU))
960 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
961
962 if (rt->rt_ifa != NULL) {
963 IFA_LOCK_SPIN(rt->rt_ifa);
964 ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
965 IFA_UNLOCK(rt->rt_ifa);
966 if (ifa_rtrequest != NULL)
967 ifa_rtrequest(RTM_ADD, rt, Gate);
968 }
969 IFA_REMREF(ifa);
970 /* Release extra ref */
971 RT_REMREF_LOCKED(rt);
972 return;
973 }
974 IFA_REMREF(ifa);
975 ifa = NULL;
976 }
977
978 /* XXX: to reset gateway to correct value, at RTM_CHANGE */
979 if (rt->rt_ifa != NULL) {
980 IFA_LOCK_SPIN(rt->rt_ifa);
981 ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
982 IFA_UNLOCK(rt->rt_ifa);
983 if (ifa_rtrequest != NULL)
984 ifa_rtrequest(RTM_ADD, rt, Gate);
985 }
986
987 /*
988 * Workaround for local address routes pointing to the loopback
989 * interface added by configd, until <rdar://problem/12970142>.
990 */
991 if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
992 (rt->rt_flags & RTF_HOST) && rt->rt_ifa->ifa_ifp == rt->rt_ifp) {
993 ifa = ifa_ifwithaddr(rt_key(rt));
994 if (ifa != NULL) {
995 if (ifa != rt->rt_ifa)
996 rtsetifa(rt, ifa);
997 IFA_REMREF(ifa);
998 }
999 }
1000
1001 /* Release extra ref */
1002 RT_REMREF_LOCKED(rt);
1003 }
1004
1005 /*
1006 * Extract the addresses of the passed sockaddrs.
1007 * Do a little sanity checking so as to avoid bad memory references.
1008 * This data is derived straight from userland.
1009 */
1010 static int
1011 rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo)
1012 {
1013 struct sockaddr *sa;
1014 int i;
1015
1016 bzero(rtinfo->rti_info, sizeof (rtinfo->rti_info));
1017 for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) {
1018 if ((rtinfo->rti_addrs & (1 << i)) == 0)
1019 continue;
1020 sa = (struct sockaddr *)cp;
1021 /*
1022 * It won't fit.
1023 */
1024 if ((cp + sa->sa_len) > cplim)
1025 return (EINVAL);
1026 /*
1027 * there are no more.. quit now
1028 * If there are more bits, they are in error.
1029 * I've seen this. route(1) can evidently generate these.
1030 * This causes kernel to core dump.
1031 * for compatibility, If we see this, point to a safe address.
1032 */
1033 if (sa->sa_len == 0) {
1034 rtinfo->rti_info[i] = &sa_zero;
1035 return (0); /* should be EINVAL but for compat */
1036 }
1037 /* accept it */
1038 rtinfo->rti_info[i] = sa;
1039 ADVANCE32(cp, sa);
1040 }
1041 return (0);
1042 }
1043
1044 static struct mbuf *
1045 rt_msg1(int type, struct rt_addrinfo *rtinfo)
1046 {
1047 struct rt_msghdr *rtm;
1048 struct mbuf *m;
1049 int i;
1050 int len, dlen, off;
1051
1052 switch (type) {
1053
1054 case RTM_DELADDR:
1055 case RTM_NEWADDR:
1056 len = sizeof (struct ifa_msghdr);
1057 break;
1058
1059 case RTM_DELMADDR:
1060 case RTM_NEWMADDR:
1061 len = sizeof (struct ifma_msghdr);
1062 break;
1063
1064 case RTM_IFINFO:
1065 len = sizeof (struct if_msghdr);
1066 break;
1067
1068 default:
1069 len = sizeof (struct rt_msghdr);
1070 }
1071 m = m_gethdr(M_DONTWAIT, MT_DATA);
1072 if (m && len > MHLEN) {
1073 MCLGET(m, M_DONTWAIT);
1074 if (!(m->m_flags & M_EXT)) {
1075 m_free(m);
1076 m = NULL;
1077 }
1078 }
1079 if (m == NULL)
1080 return (NULL);
1081 m->m_pkthdr.len = m->m_len = len;
1082 m->m_pkthdr.rcvif = NULL;
1083 rtm = mtod(m, struct rt_msghdr *);
1084 bzero((caddr_t)rtm, len);
1085 off = len;
1086 for (i = 0; i < RTAX_MAX; i++) {
1087 struct sockaddr *sa, *hint;
1088 uint8_t ssbuf[SOCK_MAXADDRLEN + 1];
1089
1090 /*
1091 * Make sure to accomodate the largest possible size of sa_len.
1092 */
1093 _CASSERT(sizeof (ssbuf) == (SOCK_MAXADDRLEN + 1));
1094
1095 if ((sa = rtinfo->rti_info[i]) == NULL)
1096 continue;
1097
1098 switch (i) {
1099 case RTAX_DST:
1100 case RTAX_NETMASK:
1101 if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL)
1102 hint = rtinfo->rti_info[RTAX_IFA];
1103
1104 /* Scrub away any trace of embedded interface scope */
1105 sa = rtm_scrub(type, i, hint, sa, &ssbuf,
1106 sizeof (ssbuf), NULL);
1107 break;
1108
1109 default:
1110 break;
1111 }
1112
1113 rtinfo->rti_addrs |= (1 << i);
1114 dlen = sa->sa_len;
1115 m_copyback(m, off, dlen, (caddr_t)sa);
1116 len = off + dlen;
1117 off += ROUNDUP32(dlen);
1118 }
1119 if (m->m_pkthdr.len != len) {
1120 m_freem(m);
1121 return (NULL);
1122 }
1123 rtm->rtm_msglen = len;
1124 rtm->rtm_version = RTM_VERSION;
1125 rtm->rtm_type = type;
1126 return (m);
1127 }
1128
1129 static int
1130 rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w,
1131 kauth_cred_t* credp)
1132 {
1133 int i;
1134 int len, dlen, rlen, second_time = 0;
1135 caddr_t cp0;
1136
1137 rtinfo->rti_addrs = 0;
1138 again:
1139 switch (type) {
1140
1141 case RTM_DELADDR:
1142 case RTM_NEWADDR:
1143 len = sizeof (struct ifa_msghdr);
1144 break;
1145
1146 case RTM_DELMADDR:
1147 case RTM_NEWMADDR:
1148 len = sizeof (struct ifma_msghdr);
1149 break;
1150
1151 case RTM_IFINFO:
1152 len = sizeof (struct if_msghdr);
1153 break;
1154
1155 case RTM_IFINFO2:
1156 len = sizeof (struct if_msghdr2);
1157 break;
1158
1159 case RTM_NEWMADDR2:
1160 len = sizeof (struct ifma_msghdr2);
1161 break;
1162
1163 case RTM_GET_EXT:
1164 len = sizeof (struct rt_msghdr_ext);
1165 break;
1166
1167 case RTM_GET2:
1168 len = sizeof (struct rt_msghdr2);
1169 break;
1170
1171 default:
1172 len = sizeof (struct rt_msghdr);
1173 }
1174 cp0 = cp;
1175 if (cp0)
1176 cp += len;
1177 for (i = 0; i < RTAX_MAX; i++) {
1178 struct sockaddr *sa, *hint;
1179 uint8_t ssbuf[SOCK_MAXADDRLEN + 1];
1180
1181 /*
1182 * Make sure to accomodate the largest possible size of sa_len.
1183 */
1184 _CASSERT(sizeof (ssbuf) == (SOCK_MAXADDRLEN + 1));
1185
1186 if ((sa = rtinfo->rti_info[i]) == NULL)
1187 continue;
1188
1189 switch (i) {
1190 case RTAX_DST:
1191 case RTAX_NETMASK:
1192 if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL)
1193 hint = rtinfo->rti_info[RTAX_IFA];
1194
1195 /* Scrub away any trace of embedded interface scope */
1196 sa = rtm_scrub(type, i, hint, sa, &ssbuf,
1197 sizeof (ssbuf), NULL);
1198 break;
1199 case RTAX_GATEWAY:
1200 case RTAX_IFP:
1201 sa = rtm_scrub(type, i, NULL, sa, &ssbuf,
1202 sizeof (ssbuf), credp);
1203 break;
1204
1205 default:
1206 break;
1207 }
1208
1209 rtinfo->rti_addrs |= (1 << i);
1210 dlen = sa->sa_len;
1211 rlen = ROUNDUP32(dlen);
1212 if (cp) {
1213 bcopy((caddr_t)sa, cp, (size_t)dlen);
1214 if (dlen != rlen)
1215 bzero(cp + dlen, rlen - dlen);
1216 cp += rlen;
1217 }
1218 len += rlen;
1219 }
1220 if (cp == NULL && w != NULL && !second_time) {
1221 struct walkarg *rw = w;
1222
1223 if (rw->w_req != NULL) {
1224 if (rw->w_tmemsize < len) {
1225 if (rw->w_tmem != NULL)
1226 FREE(rw->w_tmem, M_RTABLE);
1227 rw->w_tmem = _MALLOC(len, M_RTABLE, M_WAITOK);
1228 if (rw->w_tmem != NULL)
1229 rw->w_tmemsize = len;
1230 }
1231 if (rw->w_tmem != NULL) {
1232 cp = rw->w_tmem;
1233 second_time = 1;
1234 goto again;
1235 }
1236 }
1237 }
1238 if (cp) {
1239 struct rt_msghdr *rtm = (struct rt_msghdr *)(void *)cp0;
1240
1241 rtm->rtm_version = RTM_VERSION;
1242 rtm->rtm_type = type;
1243 rtm->rtm_msglen = len;
1244 }
1245 return (len);
1246 }
1247
1248 /*
1249 * This routine is called to generate a message from the routing
1250 * socket indicating that a redirect has occurred, a routing lookup
1251 * has failed, or that a protocol has detected timeouts to a particular
1252 * destination.
1253 */
1254 void
1255 rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error)
1256 {
1257 struct rt_msghdr *rtm;
1258 struct mbuf *m;
1259 struct sockaddr *sa = rtinfo->rti_info[RTAX_DST];
1260 struct sockproto route_proto = { PF_ROUTE, 0 };
1261
1262 if (route_cb.any_count == 0)
1263 return;
1264 m = rt_msg1(type, rtinfo);
1265 if (m == NULL)
1266 return;
1267 rtm = mtod(m, struct rt_msghdr *);
1268 rtm->rtm_flags = RTF_DONE | flags;
1269 rtm->rtm_errno = error;
1270 rtm->rtm_addrs = rtinfo->rti_addrs;
1271 route_proto.sp_family = sa ? sa->sa_family : 0;
1272 raw_input(m, &route_proto, &route_src, &route_dst);
1273 }
1274
1275 /*
1276 * This routine is called to generate a message from the routing
1277 * socket indicating that the status of a network interface has changed.
1278 */
1279 void
1280 rt_ifmsg(struct ifnet *ifp)
1281 {
1282 struct if_msghdr *ifm;
1283 struct mbuf *m;
1284 struct rt_addrinfo info;
1285 struct sockproto route_proto = { PF_ROUTE, 0 };
1286
1287 if (route_cb.any_count == 0)
1288 return;
1289 bzero((caddr_t)&info, sizeof (info));
1290 m = rt_msg1(RTM_IFINFO, &info);
1291 if (m == NULL)
1292 return;
1293 ifm = mtod(m, struct if_msghdr *);
1294 ifm->ifm_index = ifp->if_index;
1295 ifm->ifm_flags = (u_short)ifp->if_flags;
1296 if_data_internal_to_if_data(ifp, &ifp->if_data, &ifm->ifm_data);
1297 ifm->ifm_addrs = 0;
1298 raw_input(m, &route_proto, &route_src, &route_dst);
1299 }
1300
1301 /*
1302 * This is called to generate messages from the routing socket
1303 * indicating a network interface has had addresses associated with it.
1304 * if we ever reverse the logic and replace messages TO the routing
1305 * socket indicate a request to configure interfaces, then it will
1306 * be unnecessary as the routing socket will automatically generate
1307 * copies of it.
1308 *
1309 * Since this is coming from the interface, it is expected that the
1310 * interface will be locked. Caller must hold rnh_lock and rt_lock.
1311 */
1312 void
1313 rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt)
1314 {
1315 struct rt_addrinfo info;
1316 struct sockaddr *sa = 0;
1317 int pass;
1318 struct mbuf *m = 0;
1319 struct ifnet *ifp = ifa->ifa_ifp;
1320 struct sockproto route_proto = { PF_ROUTE, 0 };
1321
1322 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1323 RT_LOCK_ASSERT_HELD(rt);
1324
1325 if (route_cb.any_count == 0)
1326 return;
1327
1328 /* Become a regular mutex, just in case */
1329 RT_CONVERT_LOCK(rt);
1330 for (pass = 1; pass < 3; pass++) {
1331 bzero((caddr_t)&info, sizeof (info));
1332 if ((cmd == RTM_ADD && pass == 1) ||
1333 (cmd == RTM_DELETE && pass == 2)) {
1334 struct ifa_msghdr *ifam;
1335 int ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR;
1336
1337 /* Lock ifp for if_lladdr */
1338 ifnet_lock_shared(ifp);
1339 IFA_LOCK(ifa);
1340 info.rti_info[RTAX_IFA] = sa = ifa->ifa_addr;
1341 /*
1342 * Holding ifnet lock here prevents the link address
1343 * from changing contents, so no need to hold its
1344 * lock. The link address is always present; it's
1345 * never freed.
1346 */
1347 info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr;
1348 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1349 info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
1350 if ((m = rt_msg1(ncmd, &info)) == NULL) {
1351 IFA_UNLOCK(ifa);
1352 ifnet_lock_done(ifp);
1353 continue;
1354 }
1355 IFA_UNLOCK(ifa);
1356 ifnet_lock_done(ifp);
1357 ifam = mtod(m, struct ifa_msghdr *);
1358 ifam->ifam_index = ifp->if_index;
1359 IFA_LOCK_SPIN(ifa);
1360 ifam->ifam_metric = ifa->ifa_metric;
1361 ifam->ifam_flags = ifa->ifa_flags;
1362 IFA_UNLOCK(ifa);
1363 ifam->ifam_addrs = info.rti_addrs;
1364 }
1365 if ((cmd == RTM_ADD && pass == 2) ||
1366 (cmd == RTM_DELETE && pass == 1)) {
1367 struct rt_msghdr *rtm;
1368
1369 if (rt == NULL)
1370 continue;
1371 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1372 info.rti_info[RTAX_DST] = sa = rt_key(rt);
1373 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1374 if ((m = rt_msg1(cmd, &info)) == NULL)
1375 continue;
1376 rtm = mtod(m, struct rt_msghdr *);
1377 rtm->rtm_index = ifp->if_index;
1378 rtm->rtm_flags |= rt->rt_flags;
1379 rtm->rtm_errno = error;
1380 rtm->rtm_addrs = info.rti_addrs;
1381 }
1382 route_proto.sp_protocol = sa ? sa->sa_family : 0;
1383 raw_input(m, &route_proto, &route_src, &route_dst);
1384 }
1385 }
1386
1387 /*
1388 * This is the analogue to the rt_newaddrmsg which performs the same
1389 * function but for multicast group memberhips. This is easier since
1390 * there is no route state to worry about.
1391 */
1392 void
1393 rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma)
1394 {
1395 struct rt_addrinfo info;
1396 struct mbuf *m = 0;
1397 struct ifnet *ifp = ifma->ifma_ifp;
1398 struct ifma_msghdr *ifmam;
1399 struct sockproto route_proto = { PF_ROUTE, 0 };
1400
1401 if (route_cb.any_count == 0)
1402 return;
1403
1404 /* Lock ifp for if_lladdr */
1405 ifnet_lock_shared(ifp);
1406 bzero((caddr_t)&info, sizeof (info));
1407 IFMA_LOCK(ifma);
1408 info.rti_info[RTAX_IFA] = ifma->ifma_addr;
1409 /* lladdr doesn't need lock */
1410 info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr;
1411
1412 /*
1413 * If a link-layer address is present, present it as a ``gateway''
1414 * (similarly to how ARP entries, e.g., are presented).
1415 */
1416 info.rti_info[RTAX_GATEWAY] = (ifma->ifma_ll != NULL) ?
1417 ifma->ifma_ll->ifma_addr : NULL;
1418 if ((m = rt_msg1(cmd, &info)) == NULL) {
1419 IFMA_UNLOCK(ifma);
1420 ifnet_lock_done(ifp);
1421 return;
1422 }
1423 ifmam = mtod(m, struct ifma_msghdr *);
1424 ifmam->ifmam_index = ifp->if_index;
1425 ifmam->ifmam_addrs = info.rti_addrs;
1426 route_proto.sp_protocol = ifma->ifma_addr->sa_family;
1427 IFMA_UNLOCK(ifma);
1428 ifnet_lock_done(ifp);
1429 raw_input(m, &route_proto, &route_src, &route_dst);
1430 }
1431
1432 const char *
1433 rtm2str(int cmd)
1434 {
1435 const char *c = "RTM_?";
1436
1437 switch (cmd) {
1438 case RTM_ADD:
1439 c = "RTM_ADD";
1440 break;
1441 case RTM_DELETE:
1442 c = "RTM_DELETE";
1443 break;
1444 case RTM_CHANGE:
1445 c = "RTM_CHANGE";
1446 break;
1447 case RTM_GET:
1448 c = "RTM_GET";
1449 break;
1450 case RTM_LOSING:
1451 c = "RTM_LOSING";
1452 break;
1453 case RTM_REDIRECT:
1454 c = "RTM_REDIRECT";
1455 break;
1456 case RTM_MISS:
1457 c = "RTM_MISS";
1458 break;
1459 case RTM_LOCK:
1460 c = "RTM_LOCK";
1461 break;
1462 case RTM_OLDADD:
1463 c = "RTM_OLDADD";
1464 break;
1465 case RTM_OLDDEL:
1466 c = "RTM_OLDDEL";
1467 break;
1468 case RTM_RESOLVE:
1469 c = "RTM_RESOLVE";
1470 break;
1471 case RTM_NEWADDR:
1472 c = "RTM_NEWADDR";
1473 break;
1474 case RTM_DELADDR:
1475 c = "RTM_DELADDR";
1476 break;
1477 case RTM_IFINFO:
1478 c = "RTM_IFINFO";
1479 break;
1480 case RTM_NEWMADDR:
1481 c = "RTM_NEWMADDR";
1482 break;
1483 case RTM_DELMADDR:
1484 c = "RTM_DELMADDR";
1485 break;
1486 case RTM_GET_SILENT:
1487 c = "RTM_GET_SILENT";
1488 break;
1489 case RTM_IFINFO2:
1490 c = "RTM_IFINFO2";
1491 break;
1492 case RTM_NEWMADDR2:
1493 c = "RTM_NEWMADDR2";
1494 break;
1495 case RTM_GET2:
1496 c = "RTM_GET2";
1497 break;
1498 case RTM_GET_EXT:
1499 c = "RTM_GET_EXT";
1500 break;
1501 }
1502
1503 return (c);
1504 }
1505
1506 /*
1507 * This is used in dumping the kernel table via sysctl().
1508 */
1509 static int
1510 sysctl_dumpentry(struct radix_node *rn, void *vw)
1511 {
1512 struct walkarg *w = vw;
1513 struct rtentry *rt = (struct rtentry *)rn;
1514 int error = 0, size;
1515 struct rt_addrinfo info;
1516 kauth_cred_t cred;
1517
1518 cred = kauth_cred_proc_ref(current_proc());
1519
1520 RT_LOCK(rt);
1521 if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg))
1522 goto done;
1523 bzero((caddr_t)&info, sizeof (info));
1524 info.rti_info[RTAX_DST] = rt_key(rt);
1525 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1526 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1527 info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
1528
1529 if (w->w_op != NET_RT_DUMP2) {
1530 size = rt_msg2(RTM_GET, &info, NULL, w, &cred);
1531 if (w->w_req != NULL && w->w_tmem != NULL) {
1532 struct rt_msghdr *rtm =
1533 (struct rt_msghdr *)(void *)w->w_tmem;
1534
1535 rtm->rtm_flags = rt->rt_flags;
1536 rtm->rtm_use = rt->rt_use;
1537 rt_getmetrics(rt, &rtm->rtm_rmx);
1538 rtm->rtm_index = rt->rt_ifp->if_index;
1539 rtm->rtm_pid = 0;
1540 rtm->rtm_seq = 0;
1541 rtm->rtm_errno = 0;
1542 rtm->rtm_addrs = info.rti_addrs;
1543 error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
1544 }
1545 } else {
1546 size = rt_msg2(RTM_GET2, &info, NULL, w, &cred);
1547 if (w->w_req != NULL && w->w_tmem != NULL) {
1548 struct rt_msghdr2 *rtm =
1549 (struct rt_msghdr2 *)(void *)w->w_tmem;
1550
1551 rtm->rtm_flags = rt->rt_flags;
1552 rtm->rtm_use = rt->rt_use;
1553 rt_getmetrics(rt, &rtm->rtm_rmx);
1554 rtm->rtm_index = rt->rt_ifp->if_index;
1555 rtm->rtm_refcnt = rt->rt_refcnt;
1556 if (rt->rt_parent)
1557 rtm->rtm_parentflags = rt->rt_parent->rt_flags;
1558 else
1559 rtm->rtm_parentflags = 0;
1560 rtm->rtm_reserved = 0;
1561 rtm->rtm_addrs = info.rti_addrs;
1562 error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
1563 }
1564 }
1565
1566 done:
1567 RT_UNLOCK(rt);
1568 kauth_cred_unref(&cred);
1569 return (error);
1570 }
1571
1572 /*
1573 * This is used for dumping extended information from route entries.
1574 */
1575 static int
1576 sysctl_dumpentry_ext(struct radix_node *rn, void *vw)
1577 {
1578 struct walkarg *w = vw;
1579 struct rtentry *rt = (struct rtentry *)rn;
1580 int error = 0, size;
1581 struct rt_addrinfo info;
1582 kauth_cred_t cred;
1583
1584 cred = kauth_cred_proc_ref(current_proc());
1585
1586 RT_LOCK(rt);
1587 if (w->w_op == NET_RT_DUMPX_FLAGS && !(rt->rt_flags & w->w_arg))
1588 goto done;
1589 bzero(&info, sizeof (info));
1590 info.rti_info[RTAX_DST] = rt_key(rt);
1591 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1592 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1593 info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
1594
1595 size = rt_msg2(RTM_GET_EXT, &info, NULL, w, &cred);
1596 if (w->w_req != NULL && w->w_tmem != NULL) {
1597 struct rt_msghdr_ext *ertm =
1598 (struct rt_msghdr_ext *)(void *)w->w_tmem;
1599
1600 ertm->rtm_flags = rt->rt_flags;
1601 ertm->rtm_use = rt->rt_use;
1602 rt_getmetrics(rt, &ertm->rtm_rmx);
1603 ertm->rtm_index = rt->rt_ifp->if_index;
1604 ertm->rtm_pid = 0;
1605 ertm->rtm_seq = 0;
1606 ertm->rtm_errno = 0;
1607 ertm->rtm_addrs = info.rti_addrs;
1608 if (rt->rt_llinfo_get_ri == NULL) {
1609 bzero(&ertm->rtm_ri, sizeof (ertm->rtm_ri));
1610 ertm->rtm_ri.ri_rssi = IFNET_RSSI_UNKNOWN;
1611 ertm->rtm_ri.ri_lqm = IFNET_LQM_THRESH_OFF;
1612 ertm->rtm_ri.ri_npm = IFNET_NPM_THRESH_UNKNOWN;
1613 } else {
1614 rt->rt_llinfo_get_ri(rt, &ertm->rtm_ri);
1615 }
1616 error = SYSCTL_OUT(w->w_req, (caddr_t)ertm, size);
1617 }
1618
1619 done:
1620 RT_UNLOCK(rt);
1621 kauth_cred_unref(&cred);
1622 return (error);
1623 }
1624
1625 /*
1626 * rdar://9307819
1627 * To avoid to call copyout() while holding locks and to cause problems
1628 * in the paging path, sysctl_iflist() and sysctl_iflist2() contstruct
1629 * the list in two passes. In the first pass we compute the total
1630 * length of the data we are going to copyout, then we release
1631 * all locks to allocate a temporary buffer that gets filled
1632 * in the second pass.
1633 *
1634 * Note that we are verifying the assumption that _MALLOC returns a buffer
1635 * that is at least 32 bits aligned and that the messages and addresses are
1636 * 32 bits aligned.
1637 */
1638 static int
1639 sysctl_iflist(int af, struct walkarg *w)
1640 {
1641 struct ifnet *ifp;
1642 struct ifaddr *ifa;
1643 struct rt_addrinfo info;
1644 int len = 0, error = 0;
1645 int pass = 0;
1646 int total_len = 0, current_len = 0;
1647 char *total_buffer = NULL, *cp = NULL;
1648 kauth_cred_t cred;
1649
1650 cred = kauth_cred_proc_ref(current_proc());
1651
1652 bzero((caddr_t)&info, sizeof (info));
1653
1654 for (pass = 0; pass < 2; pass++) {
1655 ifnet_head_lock_shared();
1656
1657 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1658 if (error)
1659 break;
1660 if (w->w_arg && w->w_arg != ifp->if_index)
1661 continue;
1662 ifnet_lock_shared(ifp);
1663 /*
1664 * Holding ifnet lock here prevents the link address
1665 * from changing contents, so no need to hold the ifa
1666 * lock. The link address is always present; it's
1667 * never freed.
1668 */
1669 ifa = ifp->if_lladdr;
1670 info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1671 len = rt_msg2(RTM_IFINFO, &info, NULL, NULL, &cred);
1672 if (pass == 0) {
1673 total_len += len;
1674 } else {
1675 struct if_msghdr *ifm;
1676
1677 if (current_len + len > total_len) {
1678 ifnet_lock_done(ifp);
1679 error = ENOBUFS;
1680 break;
1681 }
1682 info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1683 len = rt_msg2(RTM_IFINFO, &info,
1684 (caddr_t)cp, NULL, &cred);
1685 info.rti_info[RTAX_IFP] = NULL;
1686
1687 ifm = (struct if_msghdr *)(void *)cp;
1688 ifm->ifm_index = ifp->if_index;
1689 ifm->ifm_flags = (u_short)ifp->if_flags;
1690 if_data_internal_to_if_data(ifp, &ifp->if_data,
1691 &ifm->ifm_data);
1692 ifm->ifm_addrs = info.rti_addrs;
1693 /*
1694 * <rdar://problem/32940901>
1695 * Round bytes only for non-platform
1696 */
1697 if (!csproc_get_platform_binary(w->w_req->p)) {
1698 ALIGN_BYTES(ifm->ifm_data.ifi_ibytes);
1699 ALIGN_BYTES(ifm->ifm_data.ifi_obytes);
1700 }
1701
1702 cp += len;
1703 VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1704 current_len += len;
1705 }
1706 while ((ifa = ifa->ifa_link.tqe_next) != NULL) {
1707 IFA_LOCK(ifa);
1708 if (af && af != ifa->ifa_addr->sa_family) {
1709 IFA_UNLOCK(ifa);
1710 continue;
1711 }
1712 info.rti_info[RTAX_IFA] = ifa->ifa_addr;
1713 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1714 info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
1715 len = rt_msg2(RTM_NEWADDR, &info, NULL, NULL,
1716 &cred);
1717 if (pass == 0) {
1718 total_len += len;
1719 } else {
1720 struct ifa_msghdr *ifam;
1721
1722 if (current_len + len > total_len) {
1723 IFA_UNLOCK(ifa);
1724 error = ENOBUFS;
1725 break;
1726 }
1727 len = rt_msg2(RTM_NEWADDR, &info,
1728 (caddr_t)cp, NULL, &cred);
1729
1730 ifam = (struct ifa_msghdr *)(void *)cp;
1731 ifam->ifam_index =
1732 ifa->ifa_ifp->if_index;
1733 ifam->ifam_flags = ifa->ifa_flags;
1734 ifam->ifam_metric = ifa->ifa_metric;
1735 ifam->ifam_addrs = info.rti_addrs;
1736
1737 cp += len;
1738 VERIFY(IS_P2ALIGNED(cp,
1739 sizeof (u_int32_t)));
1740 current_len += len;
1741 }
1742 IFA_UNLOCK(ifa);
1743 }
1744 ifnet_lock_done(ifp);
1745 info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
1746 info.rti_info[RTAX_BRD] = NULL;
1747 }
1748
1749 ifnet_head_done();
1750
1751 if (error != 0) {
1752 if (error == ENOBUFS)
1753 printf("%s: current_len (%d) + len (%d) > "
1754 "total_len (%d)\n", __func__, current_len,
1755 len, total_len);
1756 break;
1757 }
1758
1759 if (pass == 0) {
1760 /* Better to return zero length buffer than ENOBUFS */
1761 if (total_len == 0)
1762 total_len = 1;
1763 total_len += total_len >> 3;
1764 total_buffer = _MALLOC(total_len, M_RTABLE,
1765 M_ZERO | M_WAITOK);
1766 if (total_buffer == NULL) {
1767 printf("%s: _MALLOC(%d) failed\n", __func__,
1768 total_len);
1769 error = ENOBUFS;
1770 break;
1771 }
1772 cp = total_buffer;
1773 VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1774 } else {
1775 error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
1776 if (error)
1777 break;
1778 }
1779 }
1780
1781 if (total_buffer != NULL)
1782 _FREE(total_buffer, M_RTABLE);
1783
1784 kauth_cred_unref(&cred);
1785 return (error);
1786 }
1787
1788 static int
1789 sysctl_iflist2(int af, struct walkarg *w)
1790 {
1791 struct ifnet *ifp;
1792 struct ifaddr *ifa;
1793 struct rt_addrinfo info;
1794 int len = 0, error = 0;
1795 int pass = 0;
1796 int total_len = 0, current_len = 0;
1797 char *total_buffer = NULL, *cp = NULL;
1798 kauth_cred_t cred;
1799
1800 cred = kauth_cred_proc_ref(current_proc());
1801
1802 bzero((caddr_t)&info, sizeof (info));
1803
1804 for (pass = 0; pass < 2; pass++) {
1805 struct ifmultiaddr *ifma;
1806
1807 ifnet_head_lock_shared();
1808
1809 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1810 if (error)
1811 break;
1812 if (w->w_arg && w->w_arg != ifp->if_index)
1813 continue;
1814 ifnet_lock_shared(ifp);
1815 /*
1816 * Holding ifnet lock here prevents the link address
1817 * from changing contents, so no need to hold the ifa
1818 * lock. The link address is always present; it's
1819 * never freed.
1820 */
1821 ifa = ifp->if_lladdr;
1822 info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1823 len = rt_msg2(RTM_IFINFO2, &info, NULL, NULL, &cred);
1824 if (pass == 0) {
1825 total_len += len;
1826 } else {
1827 struct if_msghdr2 *ifm;
1828
1829 if (current_len + len > total_len) {
1830 ifnet_lock_done(ifp);
1831 error = ENOBUFS;
1832 break;
1833 }
1834 info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1835 len = rt_msg2(RTM_IFINFO2, &info,
1836 (caddr_t)cp, NULL, &cred);
1837 info.rti_info[RTAX_IFP] = NULL;
1838
1839 ifm = (struct if_msghdr2 *)(void *)cp;
1840 ifm->ifm_addrs = info.rti_addrs;
1841 ifm->ifm_flags = (u_short)ifp->if_flags;
1842 ifm->ifm_index = ifp->if_index;
1843 ifm->ifm_snd_len = IFCQ_LEN(&ifp->if_snd);
1844 ifm->ifm_snd_maxlen = IFCQ_MAXLEN(&ifp->if_snd);
1845 ifm->ifm_snd_drops =
1846 ifp->if_snd.ifcq_dropcnt.packets;
1847 ifm->ifm_timer = ifp->if_timer;
1848 if_data_internal_to_if_data64(ifp,
1849 &ifp->if_data, &ifm->ifm_data);
1850 /*
1851 * <rdar://problem/32940901>
1852 * Round bytes only for non-platform
1853 */
1854 if (!csproc_get_platform_binary(w->w_req->p)) {
1855 ALIGN_BYTES(ifm->ifm_data.ifi_ibytes);
1856 ALIGN_BYTES(ifm->ifm_data.ifi_obytes);
1857 }
1858
1859 cp += len;
1860 VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1861 current_len += len;
1862 }
1863 while ((ifa = ifa->ifa_link.tqe_next) != NULL) {
1864 IFA_LOCK(ifa);
1865 if (af && af != ifa->ifa_addr->sa_family) {
1866 IFA_UNLOCK(ifa);
1867 continue;
1868 }
1869 info.rti_info[RTAX_IFA] = ifa->ifa_addr;
1870 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1871 info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
1872 len = rt_msg2(RTM_NEWADDR, &info, NULL, NULL,
1873 &cred);
1874 if (pass == 0) {
1875 total_len += len;
1876 } else {
1877 struct ifa_msghdr *ifam;
1878
1879 if (current_len + len > total_len) {
1880 IFA_UNLOCK(ifa);
1881 error = ENOBUFS;
1882 break;
1883 }
1884 len = rt_msg2(RTM_NEWADDR, &info,
1885 (caddr_t)cp, NULL, &cred);
1886
1887 ifam = (struct ifa_msghdr *)(void *)cp;
1888 ifam->ifam_index =
1889 ifa->ifa_ifp->if_index;
1890 ifam->ifam_flags = ifa->ifa_flags;
1891 ifam->ifam_metric = ifa->ifa_metric;
1892 ifam->ifam_addrs = info.rti_addrs;
1893
1894 cp += len;
1895 VERIFY(IS_P2ALIGNED(cp,
1896 sizeof (u_int32_t)));
1897 current_len += len;
1898 }
1899 IFA_UNLOCK(ifa);
1900 }
1901 if (error) {
1902 ifnet_lock_done(ifp);
1903 break;
1904 }
1905
1906 for (ifma = LIST_FIRST(&ifp->if_multiaddrs);
1907 ifma != NULL; ifma = LIST_NEXT(ifma, ifma_link)) {
1908 struct ifaddr *ifa0;
1909
1910 IFMA_LOCK(ifma);
1911 if (af && af != ifma->ifma_addr->sa_family) {
1912 IFMA_UNLOCK(ifma);
1913 continue;
1914 }
1915 bzero((caddr_t)&info, sizeof (info));
1916 info.rti_info[RTAX_IFA] = ifma->ifma_addr;
1917 /*
1918 * Holding ifnet lock here prevents the link
1919 * address from changing contents, so no need
1920 * to hold the ifa0 lock. The link address is
1921 * always present; it's never freed.
1922 */
1923 ifa0 = ifp->if_lladdr;
1924 info.rti_info[RTAX_IFP] = ifa0->ifa_addr;
1925 if (ifma->ifma_ll != NULL)
1926 info.rti_info[RTAX_GATEWAY] =
1927 ifma->ifma_ll->ifma_addr;
1928 len = rt_msg2(RTM_NEWMADDR2, &info, NULL, NULL,
1929 &cred);
1930 if (pass == 0) {
1931 total_len += len;
1932 } else {
1933 struct ifma_msghdr2 *ifmam;
1934
1935 if (current_len + len > total_len) {
1936 IFMA_UNLOCK(ifma);
1937 error = ENOBUFS;
1938 break;
1939 }
1940 len = rt_msg2(RTM_NEWMADDR2, &info,
1941 (caddr_t)cp, NULL, &cred);
1942
1943 ifmam =
1944 (struct ifma_msghdr2 *)(void *)cp;
1945 ifmam->ifmam_addrs = info.rti_addrs;
1946 ifmam->ifmam_flags = 0;
1947 ifmam->ifmam_index =
1948 ifma->ifma_ifp->if_index;
1949 ifmam->ifmam_refcount =
1950 ifma->ifma_reqcnt;
1951
1952 cp += len;
1953 VERIFY(IS_P2ALIGNED(cp,
1954 sizeof (u_int32_t)));
1955 current_len += len;
1956 }
1957 IFMA_UNLOCK(ifma);
1958 }
1959 ifnet_lock_done(ifp);
1960 info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
1961 info.rti_info[RTAX_BRD] = NULL;
1962 }
1963 ifnet_head_done();
1964
1965 if (error) {
1966 if (error == ENOBUFS)
1967 printf("%s: current_len (%d) + len (%d) > "
1968 "total_len (%d)\n", __func__, current_len,
1969 len, total_len);
1970 break;
1971 }
1972
1973 if (pass == 0) {
1974 /* Better to return zero length buffer than ENOBUFS */
1975 if (total_len == 0)
1976 total_len = 1;
1977 total_len += total_len >> 3;
1978 total_buffer = _MALLOC(total_len, M_RTABLE,
1979 M_ZERO | M_WAITOK);
1980 if (total_buffer == NULL) {
1981 printf("%s: _MALLOC(%d) failed\n", __func__,
1982 total_len);
1983 error = ENOBUFS;
1984 break;
1985 }
1986 cp = total_buffer;
1987 VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1988 } else {
1989 error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
1990 if (error)
1991 break;
1992 }
1993 }
1994
1995 if (total_buffer != NULL)
1996 _FREE(total_buffer, M_RTABLE);
1997
1998 kauth_cred_unref(&cred);
1999 return (error);
2000 }
2001
2002
2003 static int
2004 sysctl_rtstat(struct sysctl_req *req)
2005 {
2006 return (SYSCTL_OUT(req, &rtstat, sizeof (struct rtstat)));
2007 }
2008
2009 static int
2010 sysctl_rttrash(struct sysctl_req *req)
2011 {
2012 return (SYSCTL_OUT(req, &rttrash, sizeof (rttrash)));
2013 }
2014
2015 static int
2016 sysctl_rtsock SYSCTL_HANDLER_ARGS
2017 {
2018 #pragma unused(oidp)
2019 int *name = (int *)arg1;
2020 u_int namelen = arg2;
2021 struct radix_node_head *rnh;
2022 int i, error = EINVAL;
2023 u_char af;
2024 struct walkarg w;
2025
2026 name ++;
2027 namelen--;
2028 if (req->newptr)
2029 return (EPERM);
2030 if (namelen != 3)
2031 return (EINVAL);
2032 af = name[0];
2033 Bzero(&w, sizeof (w));
2034 w.w_op = name[1];
2035 w.w_arg = name[2];
2036 w.w_req = req;
2037
2038 switch (w.w_op) {
2039
2040 case NET_RT_DUMP:
2041 case NET_RT_DUMP2:
2042 case NET_RT_FLAGS:
2043 lck_mtx_lock(rnh_lock);
2044 for (i = 1; i <= AF_MAX; i++)
2045 if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
2046 (error = rnh->rnh_walktree(rnh,
2047 sysctl_dumpentry, &w)))
2048 break;
2049 lck_mtx_unlock(rnh_lock);
2050 break;
2051 case NET_RT_DUMPX:
2052 case NET_RT_DUMPX_FLAGS:
2053 lck_mtx_lock(rnh_lock);
2054 for (i = 1; i <= AF_MAX; i++)
2055 if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
2056 (error = rnh->rnh_walktree(rnh,
2057 sysctl_dumpentry_ext, &w)))
2058 break;
2059 lck_mtx_unlock(rnh_lock);
2060 break;
2061 case NET_RT_IFLIST:
2062 error = sysctl_iflist(af, &w);
2063 break;
2064 case NET_RT_IFLIST2:
2065 error = sysctl_iflist2(af, &w);
2066 break;
2067 case NET_RT_STAT:
2068 error = sysctl_rtstat(req);
2069 break;
2070 case NET_RT_TRASH:
2071 error = sysctl_rttrash(req);
2072 break;
2073 }
2074 if (w.w_tmem != NULL)
2075 FREE(w.w_tmem, M_RTABLE);
2076 return (error);
2077 }
2078
2079 /*
2080 * Definitions of protocols supported in the ROUTE domain.
2081 */
2082 static struct protosw routesw[] = {
2083 {
2084 .pr_type = SOCK_RAW,
2085 .pr_protocol = 0,
2086 .pr_flags = PR_ATOMIC|PR_ADDR,
2087 .pr_output = route_output,
2088 .pr_ctlinput = raw_ctlinput,
2089 .pr_init = raw_init,
2090 .pr_usrreqs = &route_usrreqs,
2091 }
2092 };
2093
2094 static int route_proto_count = (sizeof (routesw) / sizeof (struct protosw));
2095
2096 struct domain routedomain_s = {
2097 .dom_family = PF_ROUTE,
2098 .dom_name = "route",
2099 .dom_init = route_dinit,
2100 };
2101
2102 static void
2103 route_dinit(struct domain *dp)
2104 {
2105 struct protosw *pr;
2106 int i;
2107
2108 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
2109 VERIFY(routedomain == NULL);
2110
2111 routedomain = dp;
2112
2113 for (i = 0, pr = &routesw[0]; i < route_proto_count; i++, pr++)
2114 net_add_proto(pr, dp, 1);
2115
2116 route_init();
2117 }