]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/net/rtsock.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / bsd / net / rtsock.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1988, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)rtsock.c 8.5 (Berkeley) 11/2/94
61 */
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kauth.h>
66#include <sys/kernel.h>
67#include <sys/sysctl.h>
68#include <sys/proc.h>
69#include <sys/malloc.h>
70#include <sys/mbuf.h>
71#include <sys/socket.h>
72#include <sys/socketvar.h>
73#include <sys/domain.h>
74#include <sys/protosw.h>
75#include <sys/syslog.h>
76#include <sys/mcache.h>
77#include <kern/locks.h>
78
79#include <net/if.h>
80#include <net/route.h>
81#include <net/dlil.h>
82#include <net/raw_cb.h>
83#include <netinet/in.h>
84#include <netinet/in_var.h>
85#include <netinet/in_arp.h>
86#include <netinet6/nd6.h>
87
88extern struct rtstat rtstat;
89extern struct domain routedomain_s;
90static struct domain *routedomain = NULL;
91
92MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
93
94static struct sockaddr route_dst = { 2, PF_ROUTE, { 0, } };
95static struct sockaddr route_src = { 2, PF_ROUTE, { 0, } };
96static struct sockaddr sa_zero = { sizeof (sa_zero), AF_INET, { 0, } };
97
98struct route_cb {
99 u_int32_t ip_count; /* attached w/ AF_INET */
100 u_int32_t ip6_count; /* attached w/ AF_INET6 */
101 u_int32_t any_count; /* total attached */
102};
103
104static struct route_cb route_cb;
105
106struct walkarg {
107 int w_tmemsize;
108 int w_op, w_arg;
109 caddr_t w_tmem;
110 struct sysctl_req *w_req;
111};
112
113static void route_dinit(struct domain *);
114static int rts_abort(struct socket *);
115static int rts_attach(struct socket *, int, struct proc *);
116static int rts_bind(struct socket *, struct sockaddr *, struct proc *);
117static int rts_connect(struct socket *, struct sockaddr *, struct proc *);
118static int rts_detach(struct socket *);
119static int rts_disconnect(struct socket *);
120static int rts_peeraddr(struct socket *, struct sockaddr **);
121static int rts_send(struct socket *, int, struct mbuf *, struct sockaddr *,
122 struct mbuf *, struct proc *);
123static int rts_shutdown(struct socket *);
124static int rts_sockaddr(struct socket *, struct sockaddr **);
125
126static int route_output(struct mbuf *, struct socket *);
127static int rt_setmetrics(u_int32_t, struct rt_metrics *, struct rtentry *);
128static void rt_getmetrics(struct rtentry *, struct rt_metrics *);
129static void rt_setif(struct rtentry *, struct sockaddr *, struct sockaddr *,
130 struct sockaddr *, unsigned int);
131static int rt_xaddrs(caddr_t, caddr_t, struct rt_addrinfo *);
132static struct mbuf *rt_msg1(int, struct rt_addrinfo *);
133static int rt_msg2(int, struct rt_addrinfo *, caddr_t, struct walkarg *,
134 kauth_cred_t *);
135static int sysctl_dumpentry(struct radix_node *rn, void *vw);
136static int sysctl_dumpentry_ext(struct radix_node *rn, void *vw);
137static int sysctl_iflist(int af, struct walkarg *w);
138static int sysctl_iflist2(int af, struct walkarg *w);
139static int sysctl_rtstat(struct sysctl_req *);
140static int sysctl_rttrash(struct sysctl_req *);
141static int sysctl_rtsock SYSCTL_HANDLER_ARGS;
142
143SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD | CTLFLAG_LOCKED,
144 sysctl_rtsock, "");
145
146SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "routing");
147
148#define ROUNDUP32(a) \
149 ((a) > 0 ? (1 + (((a) - 1) | (sizeof (uint32_t) - 1))) : \
150 sizeof (uint32_t))
151
152#define ADVANCE32(x, n) \
153 (x += ROUNDUP32((n)->sa_len))
154
155/*
156 * It really doesn't make any sense at all for this code to share much
157 * with raw_usrreq.c, since its functionality is so restricted. XXX
158 */
159static int
160rts_abort(struct socket *so)
161{
162 return (raw_usrreqs.pru_abort(so));
163}
164
165/* pru_accept is EOPNOTSUPP */
166
167static int
168rts_attach(struct socket *so, int proto, struct proc *p)
169{
170#pragma unused(p)
171 struct rawcb *rp;
172 int error;
173
174 VERIFY(so->so_pcb == NULL);
175
176 MALLOC(rp, struct rawcb *, sizeof (*rp), M_PCB, M_WAITOK | M_ZERO);
177 if (rp == NULL)
178 return (ENOBUFS);
179
180 so->so_pcb = (caddr_t)rp;
181 /* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */
182 error = raw_attach(so, proto);
183 rp = sotorawcb(so);
184 if (error) {
185 FREE(rp, M_PCB);
186 so->so_pcb = NULL;
187 so->so_flags |= SOF_PCBCLEARING;
188 return (error);
189 }
190
191 switch (rp->rcb_proto.sp_protocol) {
192 case AF_INET:
193 atomic_add_32(&route_cb.ip_count, 1);
194 break;
195 case AF_INET6:
196 atomic_add_32(&route_cb.ip6_count, 1);
197 break;
198 }
199 rp->rcb_faddr = &route_src;
200 atomic_add_32(&route_cb.any_count, 1);
201 /* the socket is already locked when we enter rts_attach */
202 soisconnected(so);
203 so->so_options |= SO_USELOOPBACK;
204 return (0);
205}
206
207static int
208rts_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
209{
210 return (raw_usrreqs.pru_bind(so, nam, p)); /* xxx just EINVAL */
211}
212
213static int
214rts_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
215{
216 return (raw_usrreqs.pru_connect(so, nam, p)); /* XXX just EINVAL */
217}
218
219/* pru_connect2 is EOPNOTSUPP */
220/* pru_control is EOPNOTSUPP */
221
222static int
223rts_detach(struct socket *so)
224{
225 struct rawcb *rp = sotorawcb(so);
226
227 VERIFY(rp != NULL);
228
229 switch (rp->rcb_proto.sp_protocol) {
230 case AF_INET:
231 atomic_add_32(&route_cb.ip_count, -1);
232 break;
233 case AF_INET6:
234 atomic_add_32(&route_cb.ip6_count, -1);
235 break;
236 }
237 atomic_add_32(&route_cb.any_count, -1);
238 return (raw_usrreqs.pru_detach(so));
239}
240
241static int
242rts_disconnect(struct socket *so)
243{
244 return (raw_usrreqs.pru_disconnect(so));
245}
246
247/* pru_listen is EOPNOTSUPP */
248
249static int
250rts_peeraddr(struct socket *so, struct sockaddr **nam)
251{
252 return (raw_usrreqs.pru_peeraddr(so, nam));
253}
254
255/* pru_rcvd is EOPNOTSUPP */
256/* pru_rcvoob is EOPNOTSUPP */
257
258static int
259rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
260 struct mbuf *control, struct proc *p)
261{
262 return (raw_usrreqs.pru_send(so, flags, m, nam, control, p));
263}
264
265/* pru_sense is null */
266
267static int
268rts_shutdown(struct socket *so)
269{
270 return (raw_usrreqs.pru_shutdown(so));
271}
272
273static int
274rts_sockaddr(struct socket *so, struct sockaddr **nam)
275{
276 return (raw_usrreqs.pru_sockaddr(so, nam));
277}
278
279static struct pr_usrreqs route_usrreqs = {
280 .pru_abort = rts_abort,
281 .pru_attach = rts_attach,
282 .pru_bind = rts_bind,
283 .pru_connect = rts_connect,
284 .pru_detach = rts_detach,
285 .pru_disconnect = rts_disconnect,
286 .pru_peeraddr = rts_peeraddr,
287 .pru_send = rts_send,
288 .pru_shutdown = rts_shutdown,
289 .pru_sockaddr = rts_sockaddr,
290 .pru_sosend = sosend,
291 .pru_soreceive = soreceive,
292};
293
294/*ARGSUSED*/
295static int
296route_output(struct mbuf *m, struct socket *so)
297{
298 struct rt_msghdr *rtm = NULL;
299 struct rtentry *rt = NULL;
300 struct rtentry *saved_nrt = NULL;
301 struct radix_node_head *rnh;
302 struct rt_addrinfo info;
303 int len, error = 0;
304 sa_family_t dst_sa_family = 0;
305 struct ifnet *ifp = NULL;
306 struct sockaddr_in dst_in, gate_in;
307 int sendonlytoself = 0;
308 unsigned int ifscope = IFSCOPE_NONE;
309 struct rawcb *rp = NULL;
310
311#define senderr(e) { error = (e); goto flush; }
312 if (m == NULL || ((m->m_len < sizeof (intptr_t)) &&
313 (m = m_pullup(m, sizeof (intptr_t))) == NULL))
314 return (ENOBUFS);
315 VERIFY(m->m_flags & M_PKTHDR);
316
317 /*
318 * Unlock the socket (but keep a reference) it won't be
319 * accessed until raw_input appends to it.
320 */
321 socket_unlock(so, 0);
322 lck_mtx_lock(rnh_lock);
323
324 len = m->m_pkthdr.len;
325 if (len < sizeof (*rtm) ||
326 len != mtod(m, struct rt_msghdr *)->rtm_msglen) {
327 info.rti_info[RTAX_DST] = NULL;
328 senderr(EINVAL);
329 }
330 R_Malloc(rtm, struct rt_msghdr *, len);
331 if (rtm == NULL) {
332 info.rti_info[RTAX_DST] = NULL;
333 senderr(ENOBUFS);
334 }
335 m_copydata(m, 0, len, (caddr_t)rtm);
336 if (rtm->rtm_version != RTM_VERSION) {
337 info.rti_info[RTAX_DST] = NULL;
338 senderr(EPROTONOSUPPORT);
339 }
340
341 /*
342 * Silent version of RTM_GET for Reachabiltiy APIs. We may change
343 * all RTM_GETs to be silent in the future, so this is private for now.
344 */
345 if (rtm->rtm_type == RTM_GET_SILENT) {
346 if (!(so->so_options & SO_USELOOPBACK))
347 senderr(EINVAL);
348 sendonlytoself = 1;
349 rtm->rtm_type = RTM_GET;
350 }
351
352 /*
353 * Perform permission checking, only privileged sockets
354 * may perform operations other than RTM_GET
355 */
356 if (rtm->rtm_type != RTM_GET && !(so->so_state & SS_PRIV)) {
357 info.rti_info[RTAX_DST] = NULL;
358 senderr(EPERM);
359 }
360
361 rtm->rtm_pid = proc_selfpid();
362 info.rti_addrs = rtm->rtm_addrs;
363 if (rt_xaddrs((caddr_t)(rtm + 1), len + (caddr_t)rtm, &info)) {
364 info.rti_info[RTAX_DST] = NULL;
365 senderr(EINVAL);
366 }
367 if (info.rti_info[RTAX_DST] == NULL ||
368 info.rti_info[RTAX_DST]->sa_family >= AF_MAX ||
369 (info.rti_info[RTAX_GATEWAY] != NULL &&
370 info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX))
371 senderr(EINVAL);
372
373 if (info.rti_info[RTAX_DST]->sa_family == AF_INET &&
374 info.rti_info[RTAX_DST]->sa_len != sizeof (dst_in)) {
375 /* At minimum, we need up to sin_addr */
376 if (info.rti_info[RTAX_DST]->sa_len <
377 offsetof(struct sockaddr_in, sin_zero))
378 senderr(EINVAL);
379 bzero(&dst_in, sizeof (dst_in));
380 dst_in.sin_len = sizeof (dst_in);
381 dst_in.sin_family = AF_INET;
382 dst_in.sin_port = SIN(info.rti_info[RTAX_DST])->sin_port;
383 dst_in.sin_addr = SIN(info.rti_info[RTAX_DST])->sin_addr;
384 info.rti_info[RTAX_DST] = (struct sockaddr *)&dst_in;
385 dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
386 }
387
388 if (info.rti_info[RTAX_GATEWAY] != NULL &&
389 info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET &&
390 info.rti_info[RTAX_GATEWAY]->sa_len != sizeof (gate_in)) {
391 /* At minimum, we need up to sin_addr */
392 if (info.rti_info[RTAX_GATEWAY]->sa_len <
393 offsetof(struct sockaddr_in, sin_zero))
394 senderr(EINVAL);
395 bzero(&gate_in, sizeof (gate_in));
396 gate_in.sin_len = sizeof (gate_in);
397 gate_in.sin_family = AF_INET;
398 gate_in.sin_port = SIN(info.rti_info[RTAX_GATEWAY])->sin_port;
399 gate_in.sin_addr = SIN(info.rti_info[RTAX_GATEWAY])->sin_addr;
400 info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&gate_in;
401 }
402
403 if (info.rti_info[RTAX_GENMASK]) {
404 struct radix_node *t;
405 t = rn_addmask((caddr_t)info.rti_info[RTAX_GENMASK], 0, 1);
406 if (t != NULL && Bcmp(info.rti_info[RTAX_GENMASK],
407 t->rn_key, *(u_char *)info.rti_info[RTAX_GENMASK]) == 0)
408 info.rti_info[RTAX_GENMASK] =
409 (struct sockaddr *)(t->rn_key);
410 else
411 senderr(ENOBUFS);
412 }
413
414 /*
415 * If RTF_IFSCOPE flag is set, then rtm_index specifies the scope.
416 */
417 if (rtm->rtm_flags & RTF_IFSCOPE) {
418 if (info.rti_info[RTAX_DST]->sa_family != AF_INET &&
419 info.rti_info[RTAX_DST]->sa_family != AF_INET6)
420 senderr(EINVAL);
421 ifscope = rtm->rtm_index;
422 }
423
424 /*
425 * RTF_PROXY can only be set internally from within the kernel.
426 */
427 if (rtm->rtm_flags & RTF_PROXY)
428 senderr(EINVAL);
429
430 /*
431 * For AF_INET, always zero out the embedded scope ID. If this is
432 * a scoped request, it must be done explicitly by setting RTF_IFSCOPE
433 * flag and the corresponding rtm_index value. This is to prevent
434 * false interpretation of the scope ID because it's using the sin_zero
435 * field, which might not be properly cleared by the requestor.
436 */
437 if (info.rti_info[RTAX_DST]->sa_family == AF_INET)
438 sin_set_ifscope(info.rti_info[RTAX_DST], IFSCOPE_NONE);
439 if (info.rti_info[RTAX_GATEWAY] != NULL &&
440 info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET)
441 sin_set_ifscope(info.rti_info[RTAX_GATEWAY], IFSCOPE_NONE);
442
443 switch (rtm->rtm_type) {
444 case RTM_ADD:
445 if (info.rti_info[RTAX_GATEWAY] == NULL)
446 senderr(EINVAL);
447
448 error = rtrequest_scoped_locked(RTM_ADD,
449 info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
450 info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt,
451 ifscope);
452 if (error == 0 && saved_nrt != NULL) {
453 RT_LOCK(saved_nrt);
454 /*
455 * If the route request specified an interface with
456 * IFA and/or IFP, we set the requested interface on
457 * the route with rt_setif. It would be much better
458 * to do this inside rtrequest, but that would
459 * require passing the desired interface, in some
460 * form, to rtrequest. Since rtrequest is called in
461 * so many places (roughly 40 in our source), adding
462 * a parameter is to much for us to swallow; this is
463 * something for the FreeBSD developers to tackle.
464 * Instead, we let rtrequest compute whatever
465 * interface it wants, then come in behind it and
466 * stick in the interface that we really want. This
467 * works reasonably well except when rtrequest can't
468 * figure out what interface to use (with
469 * ifa_withroute) and returns ENETUNREACH. Ideally
470 * it shouldn't matter if rtrequest can't figure out
471 * the interface if we're going to explicitly set it
472 * ourselves anyway. But practically we can't
473 * recover here because rtrequest will not do any of
474 * the work necessary to add the route if it can't
475 * find an interface. As long as there is a default
476 * route that leads to some interface, rtrequest will
477 * find an interface, so this problem should be
478 * rarely encountered.
479 * dwiggins@bbn.com
480 */
481 rt_setif(saved_nrt,
482 info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA],
483 info.rti_info[RTAX_GATEWAY], ifscope);
484 (void)rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, saved_nrt);
485 saved_nrt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
486 saved_nrt->rt_rmx.rmx_locks |=
487 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
488 saved_nrt->rt_genmask = info.rti_info[RTAX_GENMASK];
489 RT_REMREF_LOCKED(saved_nrt);
490 RT_UNLOCK(saved_nrt);
491 }
492 break;
493
494 case RTM_DELETE:
495 error = rtrequest_scoped_locked(RTM_DELETE,
496 info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
497 info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt,
498 ifscope);
499 if (error == 0) {
500 rt = saved_nrt;
501 RT_LOCK(rt);
502 goto report;
503 }
504 break;
505
506 case RTM_GET:
507 case RTM_CHANGE:
508 case RTM_LOCK:
509 rnh = rt_tables[info.rti_info[RTAX_DST]->sa_family];
510 if (rnh == NULL)
511 senderr(EAFNOSUPPORT);
512 /*
513 * Lookup the best match based on the key-mask pair;
514 * callee adds a reference and checks for root node.
515 */
516 rt = rt_lookup(TRUE, info.rti_info[RTAX_DST],
517 info.rti_info[RTAX_NETMASK], rnh, ifscope);
518 if (rt == NULL)
519 senderr(ESRCH);
520 RT_LOCK(rt);
521
522 /*
523 * Holding rnh_lock here prevents the possibility of
524 * ifa from changing (e.g. in_ifinit), so it is safe
525 * to access its ifa_addr (down below) without locking.
526 */
527 switch (rtm->rtm_type) {
528 case RTM_GET: {
529 struct ifaddr *ifa2;
530report:
531 ifa2 = NULL;
532 RT_LOCK_ASSERT_HELD(rt);
533 info.rti_info[RTAX_DST] = rt_key(rt);
534 dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
535 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
536 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
537 info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
538 if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
539 ifp = rt->rt_ifp;
540 if (ifp != NULL) {
541 ifnet_lock_shared(ifp);
542 ifa2 = ifp->if_lladdr;
543 info.rti_info[RTAX_IFP] =
544 ifa2->ifa_addr;
545 IFA_ADDREF(ifa2);
546 ifnet_lock_done(ifp);
547 info.rti_info[RTAX_IFA] =
548 rt->rt_ifa->ifa_addr;
549 rtm->rtm_index = ifp->if_index;
550 } else {
551 info.rti_info[RTAX_IFP] = NULL;
552 info.rti_info[RTAX_IFA] = NULL;
553 }
554 } else if ((ifp = rt->rt_ifp) != NULL) {
555 rtm->rtm_index = ifp->if_index;
556 }
557 if (ifa2 != NULL)
558 IFA_LOCK(ifa2);
559 len = rt_msg2(rtm->rtm_type, &info, NULL, NULL, NULL);
560 if (ifa2 != NULL)
561 IFA_UNLOCK(ifa2);
562 if (len > rtm->rtm_msglen) {
563 struct rt_msghdr *new_rtm;
564 R_Malloc(new_rtm, struct rt_msghdr *, len);
565 if (new_rtm == NULL) {
566 RT_UNLOCK(rt);
567 if (ifa2 != NULL)
568 IFA_REMREF(ifa2);
569 senderr(ENOBUFS);
570 }
571 Bcopy(rtm, new_rtm, rtm->rtm_msglen);
572 R_Free(rtm); rtm = new_rtm;
573 }
574 if (ifa2 != NULL)
575 IFA_LOCK(ifa2);
576 (void) rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm,
577 NULL, NULL);
578 if (ifa2 != NULL)
579 IFA_UNLOCK(ifa2);
580 rtm->rtm_flags = rt->rt_flags;
581 rt_getmetrics(rt, &rtm->rtm_rmx);
582 rtm->rtm_addrs = info.rti_addrs;
583 if (ifa2 != NULL)
584 IFA_REMREF(ifa2);
585 break;
586 }
587
588 case RTM_CHANGE:
589 if (info.rti_info[RTAX_GATEWAY] != NULL &&
590 (error = rt_setgate(rt, rt_key(rt),
591 info.rti_info[RTAX_GATEWAY]))) {
592 int tmp = error;
593 RT_UNLOCK(rt);
594 senderr(tmp);
595 }
596 /*
597 * If they tried to change things but didn't specify
598 * the required gateway, then just use the old one.
599 * This can happen if the user tries to change the
600 * flags on the default route without changing the
601 * default gateway. Changing flags still doesn't work.
602 */
603 if ((rt->rt_flags & RTF_GATEWAY) &&
604 info.rti_info[RTAX_GATEWAY] == NULL)
605 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
606
607 /*
608 * On Darwin, we call rt_setif which contains the
609 * equivalent to the code found at this very spot
610 * in BSD.
611 */
612 rt_setif(rt,
613 info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA],
614 info.rti_info[RTAX_GATEWAY], ifscope);
615
616 if ((error = rt_setmetrics(rtm->rtm_inits,
617 &rtm->rtm_rmx, rt))) {
618 int tmp = error;
619 RT_UNLOCK(rt);
620 senderr(tmp);
621 }
622 if (info.rti_info[RTAX_GENMASK])
623 rt->rt_genmask = info.rti_info[RTAX_GENMASK];
624 /* FALLTHRU */
625 case RTM_LOCK:
626 rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
627 rt->rt_rmx.rmx_locks |=
628 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
629 break;
630 }
631 RT_UNLOCK(rt);
632 break;
633
634 default:
635 senderr(EOPNOTSUPP);
636 }
637flush:
638 if (rtm != NULL) {
639 if (error)
640 rtm->rtm_errno = error;
641 else
642 rtm->rtm_flags |= RTF_DONE;
643 }
644 if (rt != NULL) {
645 RT_LOCK_ASSERT_NOTHELD(rt);
646 rtfree_locked(rt);
647 }
648 lck_mtx_unlock(rnh_lock);
649
650 /* relock the socket now */
651 socket_lock(so, 0);
652 /*
653 * Check to see if we don't want our own messages.
654 */
655 if (!(so->so_options & SO_USELOOPBACK)) {
656 if (route_cb.any_count <= 1) {
657 if (rtm != NULL)
658 R_Free(rtm);
659 m_freem(m);
660 return (error);
661 }
662 /* There is another listener, so construct message */
663 rp = sotorawcb(so);
664 }
665 if (rtm != NULL) {
666 m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm);
667 if (m->m_pkthdr.len < rtm->rtm_msglen) {
668 m_freem(m);
669 m = NULL;
670 } else if (m->m_pkthdr.len > rtm->rtm_msglen) {
671 m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len);
672 }
673 R_Free(rtm);
674 }
675 if (sendonlytoself && m != NULL) {
676 error = 0;
677 if (sbappendaddr(&so->so_rcv, &route_src, m,
678 NULL, &error) != 0) {
679 sorwakeup(so);
680 }
681 if (error)
682 return (error);
683 } else {
684 struct sockproto route_proto = { PF_ROUTE, 0 };
685 if (rp != NULL)
686 rp->rcb_proto.sp_family = 0; /* Avoid us */
687 if (dst_sa_family != 0)
688 route_proto.sp_protocol = dst_sa_family;
689 if (m != NULL) {
690 socket_unlock(so, 0);
691 raw_input(m, &route_proto, &route_src, &route_dst);
692 socket_lock(so, 0);
693 }
694 if (rp != NULL)
695 rp->rcb_proto.sp_family = PF_ROUTE;
696 }
697 return (error);
698}
699
700void
701rt_setexpire(struct rtentry *rt, uint64_t expiry)
702{
703 /* set both rt_expire and rmx_expire */
704 rt->rt_expire = expiry;
705 if (expiry) {
706 rt->rt_rmx.rmx_expire = expiry + rt->base_calendartime -
707 rt->base_uptime;
708 } else {
709 rt->rt_rmx.rmx_expire = 0;
710 }
711}
712
713static int
714rt_setmetrics(u_int32_t which, struct rt_metrics *in, struct rtentry *out)
715{
716 if (!(which & RTV_REFRESH_HOST)) {
717 struct timeval caltime;
718 getmicrotime(&caltime);
719#define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e;
720 metric(RTV_RPIPE, rmx_recvpipe);
721 metric(RTV_SPIPE, rmx_sendpipe);
722 metric(RTV_SSTHRESH, rmx_ssthresh);
723 metric(RTV_RTT, rmx_rtt);
724 metric(RTV_RTTVAR, rmx_rttvar);
725 metric(RTV_HOPCOUNT, rmx_hopcount);
726 metric(RTV_MTU, rmx_mtu);
727 metric(RTV_EXPIRE, rmx_expire);
728#undef metric
729 if (out->rt_rmx.rmx_expire > 0) {
730 /* account for system time change */
731 getmicrotime(&caltime);
732 out->base_calendartime +=
733 NET_CALCULATE_CLOCKSKEW(caltime,
734 out->base_calendartime,
735 net_uptime(), out->base_uptime);
736 rt_setexpire(out,
737 out->rt_rmx.rmx_expire -
738 out->base_calendartime +
739 out->base_uptime);
740 } else {
741 rt_setexpire(out, 0);
742 }
743
744 VERIFY(out->rt_expire == 0 || out->rt_rmx.rmx_expire != 0);
745 VERIFY(out->rt_expire != 0 || out->rt_rmx.rmx_expire == 0);
746 } else {
747 /* Only RTV_REFRESH_HOST must be set */
748 if ((which & ~RTV_REFRESH_HOST) ||
749 (out->rt_flags & RTF_STATIC) ||
750 !(out->rt_flags & RTF_LLINFO)) {
751 return (EINVAL);
752 }
753
754 if (out->rt_llinfo_refresh == NULL) {
755 return (ENOTSUP);
756 }
757
758 out->rt_llinfo_refresh(out);
759 }
760 return (0);
761}
762
763static void
764rt_getmetrics(struct rtentry *in, struct rt_metrics *out)
765{
766 struct timeval caltime;
767
768 VERIFY(in->rt_expire == 0 || in->rt_rmx.rmx_expire != 0);
769 VERIFY(in->rt_expire != 0 || in->rt_rmx.rmx_expire == 0);
770
771 *out = in->rt_rmx;
772
773 if (in->rt_expire != 0) {
774 /* account for system time change */
775 getmicrotime(&caltime);
776
777 in->base_calendartime +=
778 NET_CALCULATE_CLOCKSKEW(caltime,
779 in->base_calendartime, net_uptime(), in->base_uptime);
780
781 out->rmx_expire = in->base_calendartime +
782 in->rt_expire - in->base_uptime;
783 } else {
784 out->rmx_expire = 0;
785 }
786}
787
788/*
789 * Set route's interface given info.rti_info[RTAX_IFP],
790 * info.rti_info[RTAX_IFA], and gateway.
791 */
792static void
793rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr,
794 struct sockaddr *Gate, unsigned int ifscope)
795{
796 struct ifaddr *ifa = NULL;
797 struct ifnet *ifp = NULL;
798 void (*ifa_rtrequest)(int, struct rtentry *, struct sockaddr *);
799
800 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
801
802 RT_LOCK_ASSERT_HELD(rt);
803
804 /* Don't update a defunct route */
805 if (rt->rt_flags & RTF_CONDEMNED)
806 return;
807
808 /* Add an extra ref for ourselves */
809 RT_ADDREF_LOCKED(rt);
810
811 /* Become a regular mutex, just in case */
812 RT_CONVERT_LOCK(rt);
813
814 /*
815 * New gateway could require new ifaddr, ifp; flags may also
816 * be different; ifp may be specified by ll sockaddr when
817 * protocol address is ambiguous.
818 */
819 if (Ifpaddr && (ifa = ifa_ifwithnet_scoped(Ifpaddr, ifscope)) &&
820 (ifp = ifa->ifa_ifp) && (Ifaaddr || Gate)) {
821 IFA_REMREF(ifa);
822 ifa = ifaof_ifpforaddr(Ifaaddr ? Ifaaddr : Gate, ifp);
823 } else {
824 if (ifa != NULL) {
825 IFA_REMREF(ifa);
826 ifa = NULL;
827 }
828 if (Ifpaddr && (ifp = if_withname(Ifpaddr))) {
829 if (Gate) {
830 ifa = ifaof_ifpforaddr(Gate, ifp);
831 } else {
832 ifnet_lock_shared(ifp);
833 ifa = TAILQ_FIRST(&ifp->if_addrhead);
834 if (ifa != NULL)
835 IFA_ADDREF(ifa);
836 ifnet_lock_done(ifp);
837 }
838 } else if (Ifaaddr &&
839 (ifa = ifa_ifwithaddr_scoped(Ifaaddr, ifscope))) {
840 ifp = ifa->ifa_ifp;
841 } else if (Gate != NULL) {
842 /*
843 * Safe to drop rt_lock and use rt_key, since holding
844 * rnh_lock here prevents another thread from calling
845 * rt_setgate() on this route. We cannot hold the
846 * lock across ifa_ifwithroute since the lookup done
847 * by that routine may point to the same route.
848 */
849 RT_UNLOCK(rt);
850 if ((ifa = ifa_ifwithroute_scoped_locked(rt->rt_flags,
851 rt_key(rt), Gate, ifscope)) != NULL)
852 ifp = ifa->ifa_ifp;
853 RT_LOCK(rt);
854 /* Don't update a defunct route */
855 if (rt->rt_flags & RTF_CONDEMNED) {
856 if (ifa != NULL)
857 IFA_REMREF(ifa);
858 /* Release extra ref */
859 RT_REMREF_LOCKED(rt);
860 return;
861 }
862 }
863 }
864
865 /* trigger route cache reevaluation */
866 if (rt_key(rt)->sa_family == AF_INET)
867 routegenid_inet_update();
868#if INET6
869 else if (rt_key(rt)->sa_family == AF_INET6)
870 routegenid_inet6_update();
871#endif /* INET6 */
872
873 if (ifa != NULL) {
874 struct ifaddr *oifa = rt->rt_ifa;
875 if (oifa != ifa) {
876 if (oifa != NULL) {
877 IFA_LOCK_SPIN(oifa);
878 ifa_rtrequest = oifa->ifa_rtrequest;
879 IFA_UNLOCK(oifa);
880 if (ifa_rtrequest != NULL)
881 ifa_rtrequest(RTM_DELETE, rt, Gate);
882 }
883 rtsetifa(rt, ifa);
884
885 if (rt->rt_ifp != ifp) {
886 /*
887 * Purge any link-layer info caching.
888 */
889 if (rt->rt_llinfo_purge != NULL)
890 rt->rt_llinfo_purge(rt);
891
892 /*
893 * Adjust route ref count for the interfaces.
894 */
895 if (rt->rt_if_ref_fn != NULL) {
896 rt->rt_if_ref_fn(ifp, 1);
897 rt->rt_if_ref_fn(rt->rt_ifp, -1);
898 }
899 }
900 rt->rt_ifp = ifp;
901 /*
902 * If this is the (non-scoped) default route, record
903 * the interface index used for the primary ifscope.
904 */
905 if (rt_primary_default(rt, rt_key(rt))) {
906 set_primary_ifscope(rt_key(rt)->sa_family,
907 rt->rt_ifp->if_index);
908 }
909 /*
910 * If rmx_mtu is not locked, update it
911 * to the MTU used by the new interface.
912 */
913 if (!(rt->rt_rmx.rmx_locks & RTV_MTU))
914 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
915
916 if (rt->rt_ifa != NULL) {
917 IFA_LOCK_SPIN(rt->rt_ifa);
918 ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
919 IFA_UNLOCK(rt->rt_ifa);
920 if (ifa_rtrequest != NULL)
921 ifa_rtrequest(RTM_ADD, rt, Gate);
922 }
923 IFA_REMREF(ifa);
924 /* Release extra ref */
925 RT_REMREF_LOCKED(rt);
926 return;
927 }
928 IFA_REMREF(ifa);
929 ifa = NULL;
930 }
931
932 /* XXX: to reset gateway to correct value, at RTM_CHANGE */
933 if (rt->rt_ifa != NULL) {
934 IFA_LOCK_SPIN(rt->rt_ifa);
935 ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
936 IFA_UNLOCK(rt->rt_ifa);
937 if (ifa_rtrequest != NULL)
938 ifa_rtrequest(RTM_ADD, rt, Gate);
939 }
940
941 /*
942 * Workaround for local address routes pointing to the loopback
943 * interface added by configd, until <rdar://problem/12970142>.
944 */
945 if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
946 (rt->rt_flags & RTF_HOST) && rt->rt_ifa->ifa_ifp == rt->rt_ifp) {
947 ifa = ifa_ifwithaddr(rt_key(rt));
948 if (ifa != NULL) {
949 if (ifa != rt->rt_ifa)
950 rtsetifa(rt, ifa);
951 IFA_REMREF(ifa);
952 }
953 }
954
955 /* Release extra ref */
956 RT_REMREF_LOCKED(rt);
957}
958
959/*
960 * Extract the addresses of the passed sockaddrs.
961 * Do a little sanity checking so as to avoid bad memory references.
962 * This data is derived straight from userland.
963 */
964static int
965rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo)
966{
967 struct sockaddr *sa;
968 int i;
969
970 bzero(rtinfo->rti_info, sizeof (rtinfo->rti_info));
971 for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) {
972 if ((rtinfo->rti_addrs & (1 << i)) == 0)
973 continue;
974 sa = (struct sockaddr *)cp;
975 /*
976 * It won't fit.
977 */
978 if ((cp + sa->sa_len) > cplim)
979 return (EINVAL);
980 /*
981 * there are no more.. quit now
982 * If there are more bits, they are in error.
983 * I've seen this. route(1) can evidently generate these.
984 * This causes kernel to core dump.
985 * for compatibility, If we see this, point to a safe address.
986 */
987 if (sa->sa_len == 0) {
988 rtinfo->rti_info[i] = &sa_zero;
989 return (0); /* should be EINVAL but for compat */
990 }
991 /* accept it */
992 rtinfo->rti_info[i] = sa;
993 ADVANCE32(cp, sa);
994 }
995 return (0);
996}
997
998static struct mbuf *
999rt_msg1(int type, struct rt_addrinfo *rtinfo)
1000{
1001 struct rt_msghdr *rtm;
1002 struct mbuf *m;
1003 int i;
1004 int len, dlen, off;
1005
1006 switch (type) {
1007
1008 case RTM_DELADDR:
1009 case RTM_NEWADDR:
1010 len = sizeof (struct ifa_msghdr);
1011 break;
1012
1013 case RTM_DELMADDR:
1014 case RTM_NEWMADDR:
1015 len = sizeof (struct ifma_msghdr);
1016 break;
1017
1018 case RTM_IFINFO:
1019 len = sizeof (struct if_msghdr);
1020 break;
1021
1022 default:
1023 len = sizeof (struct rt_msghdr);
1024 }
1025 m = m_gethdr(M_DONTWAIT, MT_DATA);
1026 if (m && len > MHLEN) {
1027 MCLGET(m, M_DONTWAIT);
1028 if (!(m->m_flags & M_EXT)) {
1029 m_free(m);
1030 m = NULL;
1031 }
1032 }
1033 if (m == NULL)
1034 return (NULL);
1035 m->m_pkthdr.len = m->m_len = len;
1036 m->m_pkthdr.rcvif = NULL;
1037 rtm = mtod(m, struct rt_msghdr *);
1038 bzero((caddr_t)rtm, len);
1039 off = len;
1040 for (i = 0; i < RTAX_MAX; i++) {
1041 struct sockaddr *sa, *hint;
1042 uint8_t ssbuf[SOCK_MAXADDRLEN + 1];
1043
1044 /*
1045 * Make sure to accomodate the largest possible size of sa_len.
1046 */
1047 _CASSERT(sizeof (ssbuf) == (SOCK_MAXADDRLEN + 1));
1048
1049 if ((sa = rtinfo->rti_info[i]) == NULL)
1050 continue;
1051
1052 switch (i) {
1053 case RTAX_DST:
1054 case RTAX_NETMASK:
1055 if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL)
1056 hint = rtinfo->rti_info[RTAX_IFA];
1057
1058 /* Scrub away any trace of embedded interface scope */
1059 sa = rtm_scrub(type, i, hint, sa, &ssbuf,
1060 sizeof (ssbuf), NULL);
1061 break;
1062
1063 default:
1064 break;
1065 }
1066
1067 rtinfo->rti_addrs |= (1 << i);
1068 dlen = sa->sa_len;
1069 m_copyback(m, off, dlen, (caddr_t)sa);
1070 len = off + dlen;
1071 off += ROUNDUP32(dlen);
1072 }
1073 if (m->m_pkthdr.len != len) {
1074 m_freem(m);
1075 return (NULL);
1076 }
1077 rtm->rtm_msglen = len;
1078 rtm->rtm_version = RTM_VERSION;
1079 rtm->rtm_type = type;
1080 return (m);
1081}
1082
1083static int
1084rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w,
1085 kauth_cred_t* credp)
1086{
1087 int i;
1088 int len, dlen, rlen, second_time = 0;
1089 caddr_t cp0;
1090
1091 rtinfo->rti_addrs = 0;
1092again:
1093 switch (type) {
1094
1095 case RTM_DELADDR:
1096 case RTM_NEWADDR:
1097 len = sizeof (struct ifa_msghdr);
1098 break;
1099
1100 case RTM_DELMADDR:
1101 case RTM_NEWMADDR:
1102 len = sizeof (struct ifma_msghdr);
1103 break;
1104
1105 case RTM_IFINFO:
1106 len = sizeof (struct if_msghdr);
1107 break;
1108
1109 case RTM_IFINFO2:
1110 len = sizeof (struct if_msghdr2);
1111 break;
1112
1113 case RTM_NEWMADDR2:
1114 len = sizeof (struct ifma_msghdr2);
1115 break;
1116
1117 case RTM_GET_EXT:
1118 len = sizeof (struct rt_msghdr_ext);
1119 break;
1120
1121 case RTM_GET2:
1122 len = sizeof (struct rt_msghdr2);
1123 break;
1124
1125 default:
1126 len = sizeof (struct rt_msghdr);
1127 }
1128 cp0 = cp;
1129 if (cp0)
1130 cp += len;
1131 for (i = 0; i < RTAX_MAX; i++) {
1132 struct sockaddr *sa, *hint;
1133 uint8_t ssbuf[SOCK_MAXADDRLEN + 1];
1134
1135 /*
1136 * Make sure to accomodate the largest possible size of sa_len.
1137 */
1138 _CASSERT(sizeof (ssbuf) == (SOCK_MAXADDRLEN + 1));
1139
1140 if ((sa = rtinfo->rti_info[i]) == NULL)
1141 continue;
1142
1143 switch (i) {
1144 case RTAX_DST:
1145 case RTAX_NETMASK:
1146 if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL)
1147 hint = rtinfo->rti_info[RTAX_IFA];
1148
1149 /* Scrub away any trace of embedded interface scope */
1150 sa = rtm_scrub(type, i, hint, sa, &ssbuf,
1151 sizeof (ssbuf), NULL);
1152 break;
1153
1154 case RTAX_IFP:
1155 sa = rtm_scrub(type, i, NULL, sa, &ssbuf,
1156 sizeof (ssbuf), credp);
1157 break;
1158
1159 default:
1160 break;
1161 }
1162
1163 rtinfo->rti_addrs |= (1 << i);
1164 dlen = sa->sa_len;
1165 rlen = ROUNDUP32(dlen);
1166 if (cp) {
1167 bcopy((caddr_t)sa, cp, (size_t)dlen);
1168 if (dlen != rlen)
1169 bzero(cp + dlen, rlen - dlen);
1170 cp += rlen;
1171 }
1172 len += rlen;
1173 }
1174 if (cp == NULL && w != NULL && !second_time) {
1175 struct walkarg *rw = w;
1176
1177 if (rw->w_req != NULL) {
1178 if (rw->w_tmemsize < len) {
1179 if (rw->w_tmem != NULL)
1180 FREE(rw->w_tmem, M_RTABLE);
1181 rw->w_tmem = _MALLOC(len, M_RTABLE, M_WAITOK);
1182 if (rw->w_tmem != NULL)
1183 rw->w_tmemsize = len;
1184 }
1185 if (rw->w_tmem != NULL) {
1186 cp = rw->w_tmem;
1187 second_time = 1;
1188 goto again;
1189 }
1190 }
1191 }
1192 if (cp) {
1193 struct rt_msghdr *rtm = (struct rt_msghdr *)(void *)cp0;
1194
1195 rtm->rtm_version = RTM_VERSION;
1196 rtm->rtm_type = type;
1197 rtm->rtm_msglen = len;
1198 }
1199 return (len);
1200}
1201
1202/*
1203 * This routine is called to generate a message from the routing
1204 * socket indicating that a redirect has occurred, a routing lookup
1205 * has failed, or that a protocol has detected timeouts to a particular
1206 * destination.
1207 */
1208void
1209rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error)
1210{
1211 struct rt_msghdr *rtm;
1212 struct mbuf *m;
1213 struct sockaddr *sa = rtinfo->rti_info[RTAX_DST];
1214 struct sockproto route_proto = { PF_ROUTE, 0 };
1215
1216 if (route_cb.any_count == 0)
1217 return;
1218 m = rt_msg1(type, rtinfo);
1219 if (m == NULL)
1220 return;
1221 rtm = mtod(m, struct rt_msghdr *);
1222 rtm->rtm_flags = RTF_DONE | flags;
1223 rtm->rtm_errno = error;
1224 rtm->rtm_addrs = rtinfo->rti_addrs;
1225 route_proto.sp_family = sa ? sa->sa_family : 0;
1226 raw_input(m, &route_proto, &route_src, &route_dst);
1227}
1228
1229/*
1230 * This routine is called to generate a message from the routing
1231 * socket indicating that the status of a network interface has changed.
1232 */
1233void
1234rt_ifmsg(struct ifnet *ifp)
1235{
1236 struct if_msghdr *ifm;
1237 struct mbuf *m;
1238 struct rt_addrinfo info;
1239 struct sockproto route_proto = { PF_ROUTE, 0 };
1240
1241 if (route_cb.any_count == 0)
1242 return;
1243 bzero((caddr_t)&info, sizeof (info));
1244 m = rt_msg1(RTM_IFINFO, &info);
1245 if (m == NULL)
1246 return;
1247 ifm = mtod(m, struct if_msghdr *);
1248 ifm->ifm_index = ifp->if_index;
1249 ifm->ifm_flags = (u_short)ifp->if_flags;
1250 if_data_internal_to_if_data(ifp, &ifp->if_data, &ifm->ifm_data);
1251 ifm->ifm_addrs = 0;
1252 raw_input(m, &route_proto, &route_src, &route_dst);
1253}
1254
1255/*
1256 * This is called to generate messages from the routing socket
1257 * indicating a network interface has had addresses associated with it.
1258 * if we ever reverse the logic and replace messages TO the routing
1259 * socket indicate a request to configure interfaces, then it will
1260 * be unnecessary as the routing socket will automatically generate
1261 * copies of it.
1262 *
1263 * Since this is coming from the interface, it is expected that the
1264 * interface will be locked. Caller must hold rnh_lock and rt_lock.
1265 */
1266void
1267rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt)
1268{
1269 struct rt_addrinfo info;
1270 struct sockaddr *sa = 0;
1271 int pass;
1272 struct mbuf *m = 0;
1273 struct ifnet *ifp = ifa->ifa_ifp;
1274 struct sockproto route_proto = { PF_ROUTE, 0 };
1275
1276 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1277 RT_LOCK_ASSERT_HELD(rt);
1278
1279 if (route_cb.any_count == 0)
1280 return;
1281
1282 /* Become a regular mutex, just in case */
1283 RT_CONVERT_LOCK(rt);
1284 for (pass = 1; pass < 3; pass++) {
1285 bzero((caddr_t)&info, sizeof (info));
1286 if ((cmd == RTM_ADD && pass == 1) ||
1287 (cmd == RTM_DELETE && pass == 2)) {
1288 struct ifa_msghdr *ifam;
1289 int ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR;
1290
1291 /* Lock ifp for if_lladdr */
1292 ifnet_lock_shared(ifp);
1293 IFA_LOCK(ifa);
1294 info.rti_info[RTAX_IFA] = sa = ifa->ifa_addr;
1295 /*
1296 * Holding ifnet lock here prevents the link address
1297 * from changing contents, so no need to hold its
1298 * lock. The link address is always present; it's
1299 * never freed.
1300 */
1301 info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr;
1302 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1303 info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
1304 if ((m = rt_msg1(ncmd, &info)) == NULL) {
1305 IFA_UNLOCK(ifa);
1306 ifnet_lock_done(ifp);
1307 continue;
1308 }
1309 IFA_UNLOCK(ifa);
1310 ifnet_lock_done(ifp);
1311 ifam = mtod(m, struct ifa_msghdr *);
1312 ifam->ifam_index = ifp->if_index;
1313 IFA_LOCK_SPIN(ifa);
1314 ifam->ifam_metric = ifa->ifa_metric;
1315 ifam->ifam_flags = ifa->ifa_flags;
1316 IFA_UNLOCK(ifa);
1317 ifam->ifam_addrs = info.rti_addrs;
1318 }
1319 if ((cmd == RTM_ADD && pass == 2) ||
1320 (cmd == RTM_DELETE && pass == 1)) {
1321 struct rt_msghdr *rtm;
1322
1323 if (rt == NULL)
1324 continue;
1325 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1326 info.rti_info[RTAX_DST] = sa = rt_key(rt);
1327 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1328 if ((m = rt_msg1(cmd, &info)) == NULL)
1329 continue;
1330 rtm = mtod(m, struct rt_msghdr *);
1331 rtm->rtm_index = ifp->if_index;
1332 rtm->rtm_flags |= rt->rt_flags;
1333 rtm->rtm_errno = error;
1334 rtm->rtm_addrs = info.rti_addrs;
1335 }
1336 route_proto.sp_protocol = sa ? sa->sa_family : 0;
1337 raw_input(m, &route_proto, &route_src, &route_dst);
1338 }
1339}
1340
1341/*
1342 * This is the analogue to the rt_newaddrmsg which performs the same
1343 * function but for multicast group memberhips. This is easier since
1344 * there is no route state to worry about.
1345 */
1346void
1347rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma)
1348{
1349 struct rt_addrinfo info;
1350 struct mbuf *m = 0;
1351 struct ifnet *ifp = ifma->ifma_ifp;
1352 struct ifma_msghdr *ifmam;
1353 struct sockproto route_proto = { PF_ROUTE, 0 };
1354
1355 if (route_cb.any_count == 0)
1356 return;
1357
1358 /* Lock ifp for if_lladdr */
1359 ifnet_lock_shared(ifp);
1360 bzero((caddr_t)&info, sizeof (info));
1361 IFMA_LOCK(ifma);
1362 info.rti_info[RTAX_IFA] = ifma->ifma_addr;
1363 /* lladdr doesn't need lock */
1364 info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr;
1365
1366 /*
1367 * If a link-layer address is present, present it as a ``gateway''
1368 * (similarly to how ARP entries, e.g., are presented).
1369 */
1370 info.rti_info[RTAX_GATEWAY] = (ifma->ifma_ll != NULL) ?
1371 ifma->ifma_ll->ifma_addr : NULL;
1372 if ((m = rt_msg1(cmd, &info)) == NULL) {
1373 IFMA_UNLOCK(ifma);
1374 ifnet_lock_done(ifp);
1375 return;
1376 }
1377 ifmam = mtod(m, struct ifma_msghdr *);
1378 ifmam->ifmam_index = ifp->if_index;
1379 ifmam->ifmam_addrs = info.rti_addrs;
1380 route_proto.sp_protocol = ifma->ifma_addr->sa_family;
1381 IFMA_UNLOCK(ifma);
1382 ifnet_lock_done(ifp);
1383 raw_input(m, &route_proto, &route_src, &route_dst);
1384}
1385
1386const char *
1387rtm2str(int cmd)
1388{
1389 const char *c = "RTM_?";
1390
1391 switch (cmd) {
1392 case RTM_ADD:
1393 c = "RTM_ADD";
1394 break;
1395 case RTM_DELETE:
1396 c = "RTM_DELETE";
1397 break;
1398 case RTM_CHANGE:
1399 c = "RTM_CHANGE";
1400 break;
1401 case RTM_GET:
1402 c = "RTM_GET";
1403 break;
1404 case RTM_LOSING:
1405 c = "RTM_LOSING";
1406 break;
1407 case RTM_REDIRECT:
1408 c = "RTM_REDIRECT";
1409 break;
1410 case RTM_MISS:
1411 c = "RTM_MISS";
1412 break;
1413 case RTM_LOCK:
1414 c = "RTM_LOCK";
1415 break;
1416 case RTM_OLDADD:
1417 c = "RTM_OLDADD";
1418 break;
1419 case RTM_OLDDEL:
1420 c = "RTM_OLDDEL";
1421 break;
1422 case RTM_RESOLVE:
1423 c = "RTM_RESOLVE";
1424 break;
1425 case RTM_NEWADDR:
1426 c = "RTM_NEWADDR";
1427 break;
1428 case RTM_DELADDR:
1429 c = "RTM_DELADDR";
1430 break;
1431 case RTM_IFINFO:
1432 c = "RTM_IFINFO";
1433 break;
1434 case RTM_NEWMADDR:
1435 c = "RTM_NEWMADDR";
1436 break;
1437 case RTM_DELMADDR:
1438 c = "RTM_DELMADDR";
1439 break;
1440 case RTM_GET_SILENT:
1441 c = "RTM_GET_SILENT";
1442 break;
1443 case RTM_IFINFO2:
1444 c = "RTM_IFINFO2";
1445 break;
1446 case RTM_NEWMADDR2:
1447 c = "RTM_NEWMADDR2";
1448 break;
1449 case RTM_GET2:
1450 c = "RTM_GET2";
1451 break;
1452 case RTM_GET_EXT:
1453 c = "RTM_GET_EXT";
1454 break;
1455 }
1456
1457 return (c);
1458}
1459
1460/*
1461 * This is used in dumping the kernel table via sysctl().
1462 */
1463static int
1464sysctl_dumpentry(struct radix_node *rn, void *vw)
1465{
1466 struct walkarg *w = vw;
1467 struct rtentry *rt = (struct rtentry *)rn;
1468 int error = 0, size;
1469 struct rt_addrinfo info;
1470 kauth_cred_t cred;
1471
1472 cred = kauth_cred_proc_ref(current_proc());
1473
1474 RT_LOCK(rt);
1475 if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg))
1476 goto done;
1477 bzero((caddr_t)&info, sizeof (info));
1478 info.rti_info[RTAX_DST] = rt_key(rt);
1479 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1480 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1481 info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
1482
1483 if (w->w_op != NET_RT_DUMP2) {
1484 size = rt_msg2(RTM_GET, &info, NULL, w, &cred);
1485 if (w->w_req != NULL && w->w_tmem != NULL) {
1486 struct rt_msghdr *rtm =
1487 (struct rt_msghdr *)(void *)w->w_tmem;
1488
1489 rtm->rtm_flags = rt->rt_flags;
1490 rtm->rtm_use = rt->rt_use;
1491 rt_getmetrics(rt, &rtm->rtm_rmx);
1492 rtm->rtm_index = rt->rt_ifp->if_index;
1493 rtm->rtm_pid = 0;
1494 rtm->rtm_seq = 0;
1495 rtm->rtm_errno = 0;
1496 rtm->rtm_addrs = info.rti_addrs;
1497 error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
1498 }
1499 } else {
1500 size = rt_msg2(RTM_GET2, &info, NULL, w, &cred);
1501 if (w->w_req != NULL && w->w_tmem != NULL) {
1502 struct rt_msghdr2 *rtm =
1503 (struct rt_msghdr2 *)(void *)w->w_tmem;
1504
1505 rtm->rtm_flags = rt->rt_flags;
1506 rtm->rtm_use = rt->rt_use;
1507 rt_getmetrics(rt, &rtm->rtm_rmx);
1508 rtm->rtm_index = rt->rt_ifp->if_index;
1509 rtm->rtm_refcnt = rt->rt_refcnt;
1510 if (rt->rt_parent)
1511 rtm->rtm_parentflags = rt->rt_parent->rt_flags;
1512 else
1513 rtm->rtm_parentflags = 0;
1514 rtm->rtm_reserved = 0;
1515 rtm->rtm_addrs = info.rti_addrs;
1516 error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
1517 }
1518 }
1519
1520done:
1521 RT_UNLOCK(rt);
1522 kauth_cred_unref(&cred);
1523 return (error);
1524}
1525
1526/*
1527 * This is used for dumping extended information from route entries.
1528 */
1529static int
1530sysctl_dumpentry_ext(struct radix_node *rn, void *vw)
1531{
1532 struct walkarg *w = vw;
1533 struct rtentry *rt = (struct rtentry *)rn;
1534 int error = 0, size;
1535 struct rt_addrinfo info;
1536 kauth_cred_t cred;
1537
1538 cred = kauth_cred_proc_ref(current_proc());
1539
1540 RT_LOCK(rt);
1541 if (w->w_op == NET_RT_DUMPX_FLAGS && !(rt->rt_flags & w->w_arg))
1542 goto done;
1543 bzero(&info, sizeof (info));
1544 info.rti_info[RTAX_DST] = rt_key(rt);
1545 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1546 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1547 info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
1548
1549 size = rt_msg2(RTM_GET_EXT, &info, NULL, w, &cred);
1550 if (w->w_req != NULL && w->w_tmem != NULL) {
1551 struct rt_msghdr_ext *ertm =
1552 (struct rt_msghdr_ext *)(void *)w->w_tmem;
1553
1554 ertm->rtm_flags = rt->rt_flags;
1555 ertm->rtm_use = rt->rt_use;
1556 rt_getmetrics(rt, &ertm->rtm_rmx);
1557 ertm->rtm_index = rt->rt_ifp->if_index;
1558 ertm->rtm_pid = 0;
1559 ertm->rtm_seq = 0;
1560 ertm->rtm_errno = 0;
1561 ertm->rtm_addrs = info.rti_addrs;
1562 if (rt->rt_llinfo_get_ri == NULL) {
1563 bzero(&ertm->rtm_ri, sizeof (ertm->rtm_ri));
1564 ertm->rtm_ri.ri_rssi = IFNET_RSSI_UNKNOWN;
1565 ertm->rtm_ri.ri_lqm = IFNET_LQM_THRESH_OFF;
1566 ertm->rtm_ri.ri_npm = IFNET_NPM_THRESH_UNKNOWN;
1567 } else {
1568 rt->rt_llinfo_get_ri(rt, &ertm->rtm_ri);
1569 }
1570 error = SYSCTL_OUT(w->w_req, (caddr_t)ertm, size);
1571 }
1572
1573done:
1574 RT_UNLOCK(rt);
1575 kauth_cred_unref(&cred);
1576 return (error);
1577}
1578
1579/*
1580 * rdar://9307819
1581 * To avoid to call copyout() while holding locks and to cause problems
1582 * in the paging path, sysctl_iflist() and sysctl_iflist2() contstruct
1583 * the list in two passes. In the first pass we compute the total
1584 * length of the data we are going to copyout, then we release
1585 * all locks to allocate a temporary buffer that gets filled
1586 * in the second pass.
1587 *
1588 * Note that we are verifying the assumption that _MALLOC returns a buffer
1589 * that is at least 32 bits aligned and that the messages and addresses are
1590 * 32 bits aligned.
1591 */
1592static int
1593sysctl_iflist(int af, struct walkarg *w)
1594{
1595 struct ifnet *ifp;
1596 struct ifaddr *ifa;
1597 struct rt_addrinfo info;
1598 int len, error = 0;
1599 int pass = 0;
1600 int total_len = 0, current_len = 0;
1601 char *total_buffer = NULL, *cp = NULL;
1602 kauth_cred_t cred;
1603
1604 cred = kauth_cred_proc_ref(current_proc());
1605
1606 bzero((caddr_t)&info, sizeof (info));
1607
1608 for (pass = 0; pass < 2; pass++) {
1609 ifnet_head_lock_shared();
1610
1611 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1612 if (error)
1613 break;
1614 if (w->w_arg && w->w_arg != ifp->if_index)
1615 continue;
1616 ifnet_lock_shared(ifp);
1617 /*
1618 * Holding ifnet lock here prevents the link address
1619 * from changing contents, so no need to hold the ifa
1620 * lock. The link address is always present; it's
1621 * never freed.
1622 */
1623 ifa = ifp->if_lladdr;
1624 info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1625 len = rt_msg2(RTM_IFINFO, &info, NULL, NULL, &cred);
1626 if (pass == 0) {
1627 total_len += len;
1628 } else {
1629 struct if_msghdr *ifm;
1630
1631 if (current_len + len > total_len) {
1632 ifnet_lock_done(ifp);
1633 error = ENOBUFS;
1634 break;
1635 }
1636 info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1637 len = rt_msg2(RTM_IFINFO, &info,
1638 (caddr_t)cp, NULL, &cred);
1639 info.rti_info[RTAX_IFP] = NULL;
1640
1641 ifm = (struct if_msghdr *)(void *)cp;
1642 ifm->ifm_index = ifp->if_index;
1643 ifm->ifm_flags = (u_short)ifp->if_flags;
1644 if_data_internal_to_if_data(ifp, &ifp->if_data,
1645 &ifm->ifm_data);
1646 ifm->ifm_addrs = info.rti_addrs;
1647
1648 cp += len;
1649 VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1650 current_len += len;
1651 }
1652 while ((ifa = ifa->ifa_link.tqe_next) != NULL) {
1653 IFA_LOCK(ifa);
1654 if (af && af != ifa->ifa_addr->sa_family) {
1655 IFA_UNLOCK(ifa);
1656 continue;
1657 }
1658 info.rti_info[RTAX_IFA] = ifa->ifa_addr;
1659 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1660 info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
1661 len = rt_msg2(RTM_NEWADDR, &info, NULL, NULL,
1662 &cred);
1663 if (pass == 0) {
1664 total_len += len;
1665 } else {
1666 struct ifa_msghdr *ifam;
1667
1668 if (current_len + len > total_len) {
1669 IFA_UNLOCK(ifa);
1670 error = ENOBUFS;
1671 break;
1672 }
1673 len = rt_msg2(RTM_NEWADDR, &info,
1674 (caddr_t)cp, NULL, &cred);
1675
1676 ifam = (struct ifa_msghdr *)(void *)cp;
1677 ifam->ifam_index =
1678 ifa->ifa_ifp->if_index;
1679 ifam->ifam_flags = ifa->ifa_flags;
1680 ifam->ifam_metric = ifa->ifa_metric;
1681 ifam->ifam_addrs = info.rti_addrs;
1682
1683 cp += len;
1684 VERIFY(IS_P2ALIGNED(cp,
1685 sizeof (u_int32_t)));
1686 current_len += len;
1687 }
1688 IFA_UNLOCK(ifa);
1689 }
1690 ifnet_lock_done(ifp);
1691 info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
1692 info.rti_info[RTAX_BRD] = NULL;
1693 }
1694
1695 ifnet_head_done();
1696
1697 if (error != 0) {
1698 if (error == ENOBUFS)
1699 printf("%s: current_len (%d) + len (%d) > "
1700 "total_len (%d)\n", __func__, current_len,
1701 len, total_len);
1702 break;
1703 }
1704
1705 if (pass == 0) {
1706 /* Better to return zero length buffer than ENOBUFS */
1707 if (total_len == 0)
1708 total_len = 1;
1709 total_len += total_len >> 3;
1710 total_buffer = _MALLOC(total_len, M_RTABLE,
1711 M_ZERO | M_WAITOK);
1712 if (total_buffer == NULL) {
1713 printf("%s: _MALLOC(%d) failed\n", __func__,
1714 total_len);
1715 error = ENOBUFS;
1716 break;
1717 }
1718 cp = total_buffer;
1719 VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1720 } else {
1721 error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
1722 if (error)
1723 break;
1724 }
1725 }
1726
1727 if (total_buffer != NULL)
1728 _FREE(total_buffer, M_RTABLE);
1729
1730 kauth_cred_unref(&cred);
1731 return (error);
1732}
1733
1734static int
1735sysctl_iflist2(int af, struct walkarg *w)
1736{
1737 struct ifnet *ifp;
1738 struct ifaddr *ifa;
1739 struct rt_addrinfo info;
1740 int len, error = 0;
1741 int pass = 0;
1742 int total_len = 0, current_len = 0;
1743 char *total_buffer = NULL, *cp = NULL;
1744 kauth_cred_t cred;
1745
1746 cred = kauth_cred_proc_ref(current_proc());
1747
1748 bzero((caddr_t)&info, sizeof (info));
1749
1750 for (pass = 0; pass < 2; pass++) {
1751 struct ifmultiaddr *ifma;
1752
1753 ifnet_head_lock_shared();
1754
1755 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1756 if (error)
1757 break;
1758 if (w->w_arg && w->w_arg != ifp->if_index)
1759 continue;
1760 ifnet_lock_shared(ifp);
1761 /*
1762 * Holding ifnet lock here prevents the link address
1763 * from changing contents, so no need to hold the ifa
1764 * lock. The link address is always present; it's
1765 * never freed.
1766 */
1767 ifa = ifp->if_lladdr;
1768 info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1769 len = rt_msg2(RTM_IFINFO2, &info, NULL, NULL, &cred);
1770 if (pass == 0) {
1771 total_len += len;
1772 } else {
1773 struct if_msghdr2 *ifm;
1774
1775 if (current_len + len > total_len) {
1776 ifnet_lock_done(ifp);
1777 error = ENOBUFS;
1778 break;
1779 }
1780 info.rti_info[RTAX_IFP] = ifa->ifa_addr;
1781 len = rt_msg2(RTM_IFINFO2, &info,
1782 (caddr_t)cp, NULL, &cred);
1783 info.rti_info[RTAX_IFP] = NULL;
1784
1785 ifm = (struct if_msghdr2 *)(void *)cp;
1786 ifm->ifm_addrs = info.rti_addrs;
1787 ifm->ifm_flags = (u_short)ifp->if_flags;
1788 ifm->ifm_index = ifp->if_index;
1789 ifm->ifm_snd_len = IFCQ_LEN(&ifp->if_snd);
1790 ifm->ifm_snd_maxlen = IFCQ_MAXLEN(&ifp->if_snd);
1791 ifm->ifm_snd_drops =
1792 ifp->if_snd.ifcq_dropcnt.packets;
1793 ifm->ifm_timer = ifp->if_timer;
1794 if_data_internal_to_if_data64(ifp,
1795 &ifp->if_data, &ifm->ifm_data);
1796
1797 cp += len;
1798 VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1799 current_len += len;
1800 }
1801 while ((ifa = ifa->ifa_link.tqe_next) != NULL) {
1802 IFA_LOCK(ifa);
1803 if (af && af != ifa->ifa_addr->sa_family) {
1804 IFA_UNLOCK(ifa);
1805 continue;
1806 }
1807 info.rti_info[RTAX_IFA] = ifa->ifa_addr;
1808 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1809 info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
1810 len = rt_msg2(RTM_NEWADDR, &info, NULL, NULL,
1811 &cred);
1812 if (pass == 0) {
1813 total_len += len;
1814 } else {
1815 struct ifa_msghdr *ifam;
1816
1817 if (current_len + len > total_len) {
1818 IFA_UNLOCK(ifa);
1819 error = ENOBUFS;
1820 break;
1821 }
1822 len = rt_msg2(RTM_NEWADDR, &info,
1823 (caddr_t)cp, NULL, &cred);
1824
1825 ifam = (struct ifa_msghdr *)(void *)cp;
1826 ifam->ifam_index =
1827 ifa->ifa_ifp->if_index;
1828 ifam->ifam_flags = ifa->ifa_flags;
1829 ifam->ifam_metric = ifa->ifa_metric;
1830 ifam->ifam_addrs = info.rti_addrs;
1831
1832 cp += len;
1833 VERIFY(IS_P2ALIGNED(cp,
1834 sizeof (u_int32_t)));
1835 current_len += len;
1836 }
1837 IFA_UNLOCK(ifa);
1838 }
1839 if (error) {
1840 ifnet_lock_done(ifp);
1841 break;
1842 }
1843
1844 for (ifma = LIST_FIRST(&ifp->if_multiaddrs);
1845 ifma != NULL; ifma = LIST_NEXT(ifma, ifma_link)) {
1846 struct ifaddr *ifa0;
1847
1848 IFMA_LOCK(ifma);
1849 if (af && af != ifma->ifma_addr->sa_family) {
1850 IFMA_UNLOCK(ifma);
1851 continue;
1852 }
1853 bzero((caddr_t)&info, sizeof (info));
1854 info.rti_info[RTAX_IFA] = ifma->ifma_addr;
1855 /*
1856 * Holding ifnet lock here prevents the link
1857 * address from changing contents, so no need
1858 * to hold the ifa0 lock. The link address is
1859 * always present; it's never freed.
1860 */
1861 ifa0 = ifp->if_lladdr;
1862 info.rti_info[RTAX_IFP] = ifa0->ifa_addr;
1863 if (ifma->ifma_ll != NULL)
1864 info.rti_info[RTAX_GATEWAY] =
1865 ifma->ifma_ll->ifma_addr;
1866 len = rt_msg2(RTM_NEWMADDR2, &info, NULL, NULL,
1867 &cred);
1868 if (pass == 0) {
1869 total_len += len;
1870 } else {
1871 struct ifma_msghdr2 *ifmam;
1872
1873 if (current_len + len > total_len) {
1874 IFMA_UNLOCK(ifma);
1875 error = ENOBUFS;
1876 break;
1877 }
1878 len = rt_msg2(RTM_NEWMADDR2, &info,
1879 (caddr_t)cp, NULL, &cred);
1880
1881 ifmam =
1882 (struct ifma_msghdr2 *)(void *)cp;
1883 ifmam->ifmam_addrs = info.rti_addrs;
1884 ifmam->ifmam_flags = 0;
1885 ifmam->ifmam_index =
1886 ifma->ifma_ifp->if_index;
1887 ifmam->ifmam_refcount =
1888 ifma->ifma_reqcnt;
1889
1890 cp += len;
1891 VERIFY(IS_P2ALIGNED(cp,
1892 sizeof (u_int32_t)));
1893 current_len += len;
1894 }
1895 IFMA_UNLOCK(ifma);
1896 }
1897 ifnet_lock_done(ifp);
1898 info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
1899 info.rti_info[RTAX_BRD] = NULL;
1900 }
1901 ifnet_head_done();
1902
1903 if (error) {
1904 if (error == ENOBUFS)
1905 printf("%s: current_len (%d) + len (%d) > "
1906 "total_len (%d)\n", __func__, current_len,
1907 len, total_len);
1908 break;
1909 }
1910
1911 if (pass == 0) {
1912 /* Better to return zero length buffer than ENOBUFS */
1913 if (total_len == 0)
1914 total_len = 1;
1915 total_len += total_len >> 3;
1916 total_buffer = _MALLOC(total_len, M_RTABLE,
1917 M_ZERO | M_WAITOK);
1918 if (total_buffer == NULL) {
1919 printf("%s: _MALLOC(%d) failed\n", __func__,
1920 total_len);
1921 error = ENOBUFS;
1922 break;
1923 }
1924 cp = total_buffer;
1925 VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1926 } else {
1927 error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
1928 if (error)
1929 break;
1930 }
1931 }
1932
1933 if (total_buffer != NULL)
1934 _FREE(total_buffer, M_RTABLE);
1935
1936 kauth_cred_unref(&cred);
1937 return (error);
1938}
1939
1940
1941static int
1942sysctl_rtstat(struct sysctl_req *req)
1943{
1944 return (SYSCTL_OUT(req, &rtstat, sizeof (struct rtstat)));
1945}
1946
1947static int
1948sysctl_rttrash(struct sysctl_req *req)
1949{
1950 return (SYSCTL_OUT(req, &rttrash, sizeof (rttrash)));
1951}
1952
1953static int
1954sysctl_rtsock SYSCTL_HANDLER_ARGS
1955{
1956#pragma unused(oidp)
1957 int *name = (int *)arg1;
1958 u_int namelen = arg2;
1959 struct radix_node_head *rnh;
1960 int i, error = EINVAL;
1961 u_char af;
1962 struct walkarg w;
1963
1964 name ++;
1965 namelen--;
1966 if (req->newptr)
1967 return (EPERM);
1968 if (namelen != 3)
1969 return (EINVAL);
1970 af = name[0];
1971 Bzero(&w, sizeof (w));
1972 w.w_op = name[1];
1973 w.w_arg = name[2];
1974 w.w_req = req;
1975
1976 switch (w.w_op) {
1977
1978 case NET_RT_DUMP:
1979 case NET_RT_DUMP2:
1980 case NET_RT_FLAGS:
1981 lck_mtx_lock(rnh_lock);
1982 for (i = 1; i <= AF_MAX; i++)
1983 if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
1984 (error = rnh->rnh_walktree(rnh,
1985 sysctl_dumpentry, &w)))
1986 break;
1987 lck_mtx_unlock(rnh_lock);
1988 break;
1989 case NET_RT_DUMPX:
1990 case NET_RT_DUMPX_FLAGS:
1991 lck_mtx_lock(rnh_lock);
1992 for (i = 1; i <= AF_MAX; i++)
1993 if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
1994 (error = rnh->rnh_walktree(rnh,
1995 sysctl_dumpentry_ext, &w)))
1996 break;
1997 lck_mtx_unlock(rnh_lock);
1998 break;
1999 case NET_RT_IFLIST:
2000 error = sysctl_iflist(af, &w);
2001 break;
2002 case NET_RT_IFLIST2:
2003 error = sysctl_iflist2(af, &w);
2004 break;
2005 case NET_RT_STAT:
2006 error = sysctl_rtstat(req);
2007 break;
2008 case NET_RT_TRASH:
2009 error = sysctl_rttrash(req);
2010 break;
2011 }
2012 if (w.w_tmem != NULL)
2013 FREE(w.w_tmem, M_RTABLE);
2014 return (error);
2015}
2016
2017/*
2018 * Definitions of protocols supported in the ROUTE domain.
2019 */
2020static struct protosw routesw[] = {
2021{
2022 .pr_type = SOCK_RAW,
2023 .pr_protocol = 0,
2024 .pr_flags = PR_ATOMIC|PR_ADDR,
2025 .pr_output = route_output,
2026 .pr_ctlinput = raw_ctlinput,
2027 .pr_init = raw_init,
2028 .pr_usrreqs = &route_usrreqs,
2029}
2030};
2031
2032static int route_proto_count = (sizeof (routesw) / sizeof (struct protosw));
2033
2034struct domain routedomain_s = {
2035 .dom_family = PF_ROUTE,
2036 .dom_name = "route",
2037 .dom_init = route_dinit,
2038};
2039
2040static void
2041route_dinit(struct domain *dp)
2042{
2043 struct protosw *pr;
2044 int i;
2045
2046 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
2047 VERIFY(routedomain == NULL);
2048
2049 routedomain = dp;
2050
2051 for (i = 0, pr = &routesw[0]; i < route_proto_count; i++, pr++)
2052 net_add_proto(pr, dp, 1);
2053
2054 route_init();
2055}