]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 1999-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
30 * support for mandatory and extensible security protections. This notice
31 * is included in support of clause 2.2 (b) of the Apple Public License,
32 * Version 2.0.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/socket.h>
41 #include <sys/domain.h>
42 #include <sys/user.h>
43 #include <sys/random.h>
44 #include <net/if_dl.h>
45 #include <net/if.h>
46 #include <net/route.h>
47 #include <net/if_var.h>
48 #include <net/dlil.h>
49 #include <net/if_arp.h>
50 #include <sys/kern_event.h>
51 #include <sys/kdebug.h>
52 #include <sys/mcache.h>
53
54 #include <kern/assert.h>
55 #include <kern/task.h>
56 #include <kern/thread.h>
57 #include <kern/sched_prim.h>
58 #include <kern/locks.h>
59 #include <kern/zalloc.h>
60 #include <net/kpi_protocol.h>
61
62 #include <net/if_types.h>
63 #include <net/if_llreach.h>
64 #include <net/kpi_interfacefilter.h>
65
66 #if INET
67 #include <netinet/in_var.h>
68 #include <netinet/igmp_var.h>
69 #endif /* INET */
70
71 #if INET6
72 #include <netinet6/in6_var.h>
73 #include <netinet6/nd6.h>
74 #include <netinet6/mld6_var.h>
75 #endif /* INET6 */
76
77 #if NETAT
78 #include <netat/at_var.h>
79 #endif /* NETAT */
80
81 #include <libkern/OSAtomic.h>
82
83 #include <machine/machine_routines.h>
84
85 #include <mach/thread_act.h>
86 #include <mach/sdt.h>
87
88 #if CONFIG_MACF_NET
89 #include <security/mac_framework.h>
90 #endif /* MAC_NET */
91
92 #if PF
93 #include <net/pfvar.h>
94 #endif /* PF */
95
96 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
97 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
98 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
99 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
100 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
101
102
103 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
104 #define MAX_LINKADDR 4 /* LONGWORDS */
105 #define M_NKE M_IFADDR
106
107 #if 1
108 #define DLIL_PRINTF printf
109 #else
110 #define DLIL_PRINTF kprintf
111 #endif
112
113 #define _CASSERT(x) \
114 switch (0) { case 0: case (x): ; }
115
116 #define IF_DATA_REQUIRE_ALIGNED_64(f) \
117 _CASSERT(!(offsetof(struct if_data_internal, f) % sizeof (u_int64_t)))
118
119 #define IFNET_IF_DATA_REQUIRE_ALIGNED_64(f) \
120 _CASSERT(!(offsetof(struct ifnet, if_data.f) % sizeof (u_int64_t)))
121
122 #define IFNET_IF_TC_REQUIRE_ALIGNED_64(f) \
123 _CASSERT(!(offsetof(struct ifnet, if_tc.f) % sizeof (u_int64_t)))
124
125 enum {
126 kProtoKPI_v1 = 1,
127 kProtoKPI_v2 = 2
128 };
129
130 /*
131 * List of if_proto structures in if_proto_hash[] is protected by
132 * the ifnet lock. The rest of the fields are initialized at protocol
133 * attach time and never change, thus no lock required as long as
134 * a reference to it is valid, via if_proto_ref().
135 */
136 struct if_proto {
137 SLIST_ENTRY(if_proto) next_hash;
138 u_int32_t refcount;
139 u_int32_t detached;
140 struct ifnet *ifp;
141 protocol_family_t protocol_family;
142 int proto_kpi;
143 union {
144 struct {
145 proto_media_input input;
146 proto_media_preout pre_output;
147 proto_media_event event;
148 proto_media_ioctl ioctl;
149 proto_media_detached detached;
150 proto_media_resolve_multi resolve_multi;
151 proto_media_send_arp send_arp;
152 } v1;
153 struct {
154 proto_media_input_v2 input;
155 proto_media_preout pre_output;
156 proto_media_event event;
157 proto_media_ioctl ioctl;
158 proto_media_detached detached;
159 proto_media_resolve_multi resolve_multi;
160 proto_media_send_arp send_arp;
161 } v2;
162 } kpi;
163 };
164
165 SLIST_HEAD(proto_hash_entry, if_proto);
166
167 #define DLIL_SDLMAXLEN 64
168 #define DLIL_SDLDATALEN \
169 (DLIL_SDLMAXLEN - offsetof(struct sockaddr_dl, sdl_data[0]))
170
171 struct dlil_ifnet {
172 struct ifnet dl_if; /* public ifnet */
173 /*
174 * dlil private fields, protected by dl_if_lock
175 */
176 decl_lck_mtx_data(, dl_if_lock);
177 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet link */
178 u_int32_t dl_if_flags; /* flags (below) */
179 u_int32_t dl_if_refcnt; /* refcnt */
180 void (*dl_if_trace)(struct dlil_ifnet *, int); /* ref trace callback */
181 void *dl_if_uniqueid; /* unique interface id */
182 size_t dl_if_uniqueid_len; /* length of the unique id */
183 char dl_if_namestorage[IFNAMSIZ]; /* interface name storage */
184 struct {
185 struct ifaddr ifa; /* lladdr ifa */
186 u_int8_t asdl[DLIL_SDLMAXLEN]; /* addr storage */
187 u_int8_t msdl[DLIL_SDLMAXLEN]; /* mask storage */
188 } dl_if_lladdr;
189 ctrace_t dl_if_attach; /* attach PC stacktrace */
190 ctrace_t dl_if_detach; /* detach PC stacktrace */
191 };
192
193 /* Values for dl_if_flags (private to DLIL) */
194 #define DLIF_INUSE 0x1 /* DLIL ifnet recycler, ifnet in use */
195 #define DLIF_REUSE 0x2 /* DLIL ifnet recycles, ifnet is not new */
196 #define DLIF_DEBUG 0x4 /* has debugging info */
197
198 #define IF_REF_TRACE_HIST_SIZE 8 /* size of ref trace history */
199
200 /* For gdb */
201 __private_extern__ unsigned int if_ref_trace_hist_size = IF_REF_TRACE_HIST_SIZE;
202
203 struct dlil_ifnet_dbg {
204 struct dlil_ifnet dldbg_dlif; /* dlil_ifnet */
205 u_int16_t dldbg_if_refhold_cnt; /* # ifnet references */
206 u_int16_t dldbg_if_refrele_cnt; /* # ifnet releases */
207 /*
208 * Circular lists of ifnet_{reference,release} callers.
209 */
210 ctrace_t dldbg_if_refhold[IF_REF_TRACE_HIST_SIZE];
211 ctrace_t dldbg_if_refrele[IF_REF_TRACE_HIST_SIZE];
212 };
213
214 #define DLIL_TO_IFP(s) (&s->dl_if)
215 #define IFP_TO_DLIL(s) ((struct dlil_ifnet *)s)
216
217 struct ifnet_filter {
218 TAILQ_ENTRY(ifnet_filter) filt_next;
219 u_int32_t filt_skip;
220 ifnet_t filt_ifp;
221 const char *filt_name;
222 void *filt_cookie;
223 protocol_family_t filt_protocol;
224 iff_input_func filt_input;
225 iff_output_func filt_output;
226 iff_event_func filt_event;
227 iff_ioctl_func filt_ioctl;
228 iff_detached_func filt_detached;
229 };
230
231 struct proto_input_entry;
232
233 static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
234 static lck_grp_t *dlil_lock_group;
235 lck_grp_t *ifnet_lock_group;
236 static lck_grp_t *ifnet_head_lock_group;
237 lck_attr_t *ifnet_lock_attr;
238 decl_lck_rw_data(, ifnet_head_lock);
239 decl_lck_mtx_data(, dlil_ifnet_lock);
240 u_int32_t dlil_filter_count = 0;
241 extern u_int32_t ipv4_ll_arp_aware;
242
243 #if DEBUG
244 static unsigned int ifnet_debug = 1; /* debugging (enabled) */
245 #else
246 static unsigned int ifnet_debug; /* debugging (disabled) */
247 #endif /* !DEBUG */
248 static unsigned int dlif_size; /* size of dlil_ifnet to allocate */
249 static unsigned int dlif_bufsize; /* size of dlif_size + headroom */
250 static struct zone *dlif_zone; /* zone for dlil_ifnet */
251
252 #define DLIF_ZONE_MAX 64 /* maximum elements in zone */
253 #define DLIF_ZONE_NAME "ifnet" /* zone name */
254
255 static unsigned int dlif_filt_size; /* size of ifnet_filter */
256 static struct zone *dlif_filt_zone; /* zone for ifnet_filter */
257
258 #define DLIF_FILT_ZONE_MAX 8 /* maximum elements in zone */
259 #define DLIF_FILT_ZONE_NAME "ifnet_filter" /* zone name */
260
261 static unsigned int dlif_inp_size; /* size of dlil_threading_info */
262 static struct zone *dlif_inp_zone; /* zone for dlil_threading_info */
263
264 #define DLIF_INP_ZONE_MAX DLIF_ZONE_MAX /* maximum elements in zone */
265 #define DLIF_INP_ZONE_NAME "ifnet_thread" /* zone name */
266
267 static unsigned int dlif_phash_size; /* size of ifnet proto hash table */
268 static struct zone *dlif_phash_zone; /* zone for ifnet proto hash table */
269
270 #define DLIF_PHASH_ZONE_MAX DLIF_ZONE_MAX /* maximum elements in zone */
271 #define DLIF_PHASH_ZONE_NAME "ifnet_proto_hash" /* zone name */
272
273 static unsigned int dlif_proto_size; /* size of if_proto */
274 static struct zone *dlif_proto_zone; /* zone for if_proto */
275
276 #define DLIF_PROTO_ZONE_MAX (DLIF_ZONE_MAX*2) /* maximum elements in zone */
277 #define DLIF_PROTO_ZONE_NAME "ifnet_proto" /* zone name */
278
279 /*
280 * Updating this variable should be done by first acquiring the global
281 * radix node head (rnh_lock), in tandem with settting/clearing the
282 * PR_AGGDRAIN for routedomain.
283 */
284 u_int32_t ifnet_aggressive_drainers;
285 static u_int32_t net_rtref;
286
287 static struct dlil_threading_info dlil_lo_thread;
288 __private_extern__ struct dlil_threading_info *dlil_lo_thread_ptr = &dlil_lo_thread;
289
290 static struct mbuf *dlil_lo_input_mbuf_head = NULL;
291 static struct mbuf *dlil_lo_input_mbuf_tail = NULL;
292
293 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
294 static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
295 static void dlil_if_trace(struct dlil_ifnet *, int);
296 static void if_proto_ref(struct if_proto *);
297 static void if_proto_free(struct if_proto *);
298 static struct if_proto *find_attached_proto(struct ifnet *, u_int32_t);
299 static int dlil_ifp_proto_count(struct ifnet *);
300 static void if_flt_monitor_busy(struct ifnet *);
301 static void if_flt_monitor_unbusy(struct ifnet *);
302 static void if_flt_monitor_enter(struct ifnet *);
303 static void if_flt_monitor_leave(struct ifnet *);
304 static int dlil_interface_filters_input(struct ifnet *, struct mbuf **,
305 char **, protocol_family_t);
306 static int dlil_interface_filters_output(struct ifnet *, struct mbuf **,
307 protocol_family_t);
308 static struct ifaddr *dlil_alloc_lladdr(struct ifnet *,
309 const struct sockaddr_dl *);
310 static int ifnet_lookup(struct ifnet *);
311 static void if_purgeaddrs(struct ifnet *);
312
313 static errno_t ifproto_media_input_v1(struct ifnet *, protocol_family_t,
314 struct mbuf *, char *);
315 static errno_t ifproto_media_input_v2(struct ifnet *, protocol_family_t,
316 struct mbuf *);
317 static errno_t ifproto_media_preout(struct ifnet *, protocol_family_t,
318 mbuf_t *, const struct sockaddr *, void *, char *, char *);
319 static void ifproto_media_event(struct ifnet *, protocol_family_t,
320 const struct kev_msg *);
321 static errno_t ifproto_media_ioctl(struct ifnet *, protocol_family_t,
322 unsigned long, void *);
323 static errno_t ifproto_media_resolve_multi(ifnet_t, const struct sockaddr *,
324 struct sockaddr_dl *, size_t);
325 static errno_t ifproto_media_send_arp(struct ifnet *, u_short,
326 const struct sockaddr_dl *, const struct sockaddr *,
327 const struct sockaddr_dl *, const struct sockaddr *);
328
329 static errno_t ifp_if_output(struct ifnet *, struct mbuf *);
330 static errno_t ifp_if_demux(struct ifnet *, struct mbuf *, char *,
331 protocol_family_t *);
332 static errno_t ifp_if_add_proto(struct ifnet *, protocol_family_t,
333 const struct ifnet_demux_desc *, u_int32_t);
334 static errno_t ifp_if_del_proto(struct ifnet *, protocol_family_t);
335 static errno_t ifp_if_check_multi(struct ifnet *, const struct sockaddr *);
336 static errno_t ifp_if_framer(struct ifnet *, struct mbuf **,
337 const struct sockaddr *, const char *, const char *);
338 static errno_t ifp_if_ioctl(struct ifnet *, unsigned long, void *);
339 static errno_t ifp_if_set_bpf_tap(struct ifnet *, bpf_tap_mode, bpf_packet_func);
340 static void ifp_if_free(struct ifnet *);
341 static void ifp_if_event(struct ifnet *, const struct kev_msg *);
342
343 static void dlil_input_thread_func(struct dlil_threading_info *inpthread);
344 static int dlil_create_input_thread(ifnet_t, struct dlil_threading_info *);
345
346 static void ifnet_delayed_thread_func(void);
347 static void ifnet_detach_final(struct ifnet *);
348 static void ifnet_detaching_enqueue(struct ifnet *);
349 static struct ifnet *ifnet_detaching_dequeue(void);
350
351 static void ifp_src_route_copyout(struct ifnet *, struct route *);
352 static void ifp_src_route_copyin(struct ifnet *, struct route *);
353 #if INET6
354 static void ifp_src_route6_copyout(struct ifnet *, struct route_in6 *);
355 static void ifp_src_route6_copyin(struct ifnet *, struct route_in6 *);
356 #endif /* INET6 */
357
358 /* The following are protected by dlil_ifnet_lock */
359 static TAILQ_HEAD(, ifnet) ifnet_detaching_head;
360 static u_int32_t ifnet_detaching_cnt;
361 static void *ifnet_delayed_run; /* wait channel for detaching thread */
362
363 extern void bpfdetach(struct ifnet*);
364 extern void proto_input_run(void);
365
366 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
367
368 #if DEBUG
369 static int dlil_verbose = 1;
370 #else
371 static int dlil_verbose = 0;
372 #endif /* DEBUG */
373 static int dlil_multithreaded_input = 1;
374 static int cur_dlil_input_threads = 0;
375 #if IFNET_INPUT_SANITY_CHK
376 static int dlil_lo_input_mbuf_count = 0;
377 /* sanity checking of input packet lists received */
378 static int dlil_input_sanity_check = 0;
379 #endif
380
381 SYSCTL_DECL(_net_link_generic_system);
382
383 SYSCTL_INT(_net_link_generic_system, OID_AUTO, dlil_verbose, CTLFLAG_RW,
384 &dlil_verbose, 0, "Log DLIL error messages");
385
386 SYSCTL_INT(_net_link_generic_system, OID_AUTO, multi_threaded_input, CTLFLAG_RW,
387 &dlil_multithreaded_input , 0, "Uses multiple input thread for DLIL input");
388
389 #if IFNET_INPUT_SANITY_CHK
390 SYSCTL_INT(_net_link_generic_system, OID_AUTO, dlil_input_sanity_check,
391 CTLFLAG_RW, &dlil_input_sanity_check , 0,
392 "Turn on sanity checking in DLIL input");
393 #endif
394
395 unsigned int net_affinity = 1;
396 static kern_return_t dlil_affinity_set(struct thread *, u_int32_t);
397
398 extern u_int32_t inject_buckets;
399
400 static lck_grp_attr_t *dlil_grp_attributes = NULL;
401 static lck_attr_t *dlil_lck_attributes = NULL;
402 static lck_grp_t *dlil_input_lock_grp = NULL;
403
404 #define PROTO_HASH_SLOTS 0x5
405
406 /*
407 * Internal functions.
408 */
409
410 static int
411 proto_hash_value(u_int32_t protocol_family)
412 {
413 /*
414 * dlil_proto_unplumb_all() depends on the mapping between
415 * the hash bucket index and the protocol family defined
416 * here; future changes must be applied there as well.
417 */
418 switch(protocol_family) {
419 case PF_INET:
420 return (0);
421 case PF_INET6:
422 return (1);
423 case PF_APPLETALK:
424 return (2);
425 case PF_VLAN:
426 return (3);
427 case PF_UNSPEC:
428 default:
429 return (4);
430 }
431 }
432
433 /*
434 * Caller must already be holding ifnet lock.
435 */
436 static struct if_proto *
437 find_attached_proto(struct ifnet *ifp, u_int32_t protocol_family)
438 {
439 struct if_proto *proto = NULL;
440 u_int32_t i = proto_hash_value(protocol_family);
441
442 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
443
444 if (ifp->if_proto_hash != NULL)
445 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
446
447 while (proto != NULL && proto->protocol_family != protocol_family)
448 proto = SLIST_NEXT(proto, next_hash);
449
450 if (proto != NULL)
451 if_proto_ref(proto);
452
453 return (proto);
454 }
455
456 static void
457 if_proto_ref(struct if_proto *proto)
458 {
459 atomic_add_32(&proto->refcount, 1);
460 }
461
462 extern void if_rtproto_del(struct ifnet *ifp, int protocol);
463
464 static void
465 if_proto_free(struct if_proto *proto)
466 {
467 u_int32_t oldval;
468 struct ifnet *ifp = proto->ifp;
469 u_int32_t proto_family = proto->protocol_family;
470 struct kev_dl_proto_data ev_pr_data;
471
472 oldval = atomic_add_32_ov(&proto->refcount, -1);
473 if (oldval > 1)
474 return;
475
476 /* No more reference on this, protocol must have been detached */
477 VERIFY(proto->detached);
478
479 if (proto->proto_kpi == kProtoKPI_v1) {
480 if (proto->kpi.v1.detached)
481 proto->kpi.v1.detached(ifp, proto->protocol_family);
482 }
483 if (proto->proto_kpi == kProtoKPI_v2) {
484 if (proto->kpi.v2.detached)
485 proto->kpi.v2.detached(ifp, proto->protocol_family);
486 }
487
488 /*
489 * Cleanup routes that may still be in the routing table for that
490 * interface/protocol pair.
491 */
492 if_rtproto_del(ifp, proto_family);
493
494 /*
495 * The reserved field carries the number of protocol still attached
496 * (subject to change)
497 */
498 ifnet_lock_shared(ifp);
499 ev_pr_data.proto_family = proto_family;
500 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
501 ifnet_lock_done(ifp);
502
503 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
504 (struct net_event_data *)&ev_pr_data,
505 sizeof(struct kev_dl_proto_data));
506
507 zfree(dlif_proto_zone, proto);
508 }
509
510 __private_extern__ void
511 ifnet_lock_assert(struct ifnet *ifp, ifnet_lock_assert_t what)
512 {
513 unsigned int type = 0;
514 int ass = 1;
515
516 switch (what) {
517 case IFNET_LCK_ASSERT_EXCLUSIVE:
518 type = LCK_RW_ASSERT_EXCLUSIVE;
519 break;
520
521 case IFNET_LCK_ASSERT_SHARED:
522 type = LCK_RW_ASSERT_SHARED;
523 break;
524
525 case IFNET_LCK_ASSERT_OWNED:
526 type = LCK_RW_ASSERT_HELD;
527 break;
528
529 case IFNET_LCK_ASSERT_NOTOWNED:
530 /* nothing to do here for RW lock; bypass assert */
531 ass = 0;
532 break;
533
534 default:
535 panic("bad ifnet assert type: %d", what);
536 /* NOTREACHED */
537 }
538 if (ass)
539 lck_rw_assert(&ifp->if_lock, type);
540 }
541
542 __private_extern__ void
543 ifnet_lock_shared(struct ifnet *ifp)
544 {
545 lck_rw_lock_shared(&ifp->if_lock);
546 }
547
548 __private_extern__ void
549 ifnet_lock_exclusive(struct ifnet *ifp)
550 {
551 lck_rw_lock_exclusive(&ifp->if_lock);
552 }
553
554 __private_extern__ void
555 ifnet_lock_done(struct ifnet *ifp)
556 {
557 lck_rw_done(&ifp->if_lock);
558 }
559
560 __private_extern__ void
561 ifnet_head_lock_shared(void)
562 {
563 lck_rw_lock_shared(&ifnet_head_lock);
564 }
565
566 __private_extern__ void
567 ifnet_head_lock_exclusive(void)
568 {
569 lck_rw_lock_exclusive(&ifnet_head_lock);
570 }
571
572 __private_extern__ void
573 ifnet_head_done(void)
574 {
575 lck_rw_done(&ifnet_head_lock);
576 }
577
578 /*
579 * Caller must already be holding ifnet lock.
580 */
581 static int
582 dlil_ifp_proto_count(struct ifnet * ifp)
583 {
584 int i, count = 0;
585
586 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
587
588 if (ifp->if_proto_hash == NULL)
589 goto done;
590
591 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
592 struct if_proto *proto;
593 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
594 count++;
595 }
596 }
597 done:
598 return (count);
599 }
600
601 __private_extern__ void
602 dlil_post_msg(struct ifnet *ifp, u_int32_t event_subclass,
603 u_int32_t event_code, struct net_event_data *event_data,
604 u_int32_t event_data_len)
605 {
606 struct net_event_data ev_data;
607 struct kev_msg ev_msg;
608
609 bzero(&ev_msg, sizeof (ev_msg));
610 bzero(&ev_data, sizeof (ev_data));
611 /*
612 * a net event always starts with a net_event_data structure
613 * but the caller can generate a simple net event or
614 * provide a longer event structure to post
615 */
616 ev_msg.vendor_code = KEV_VENDOR_APPLE;
617 ev_msg.kev_class = KEV_NETWORK_CLASS;
618 ev_msg.kev_subclass = event_subclass;
619 ev_msg.event_code = event_code;
620
621 if (event_data == NULL) {
622 event_data = &ev_data;
623 event_data_len = sizeof(struct net_event_data);
624 }
625
626 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
627 event_data->if_family = ifp->if_family;
628 event_data->if_unit = (u_int32_t) ifp->if_unit;
629
630 ev_msg.dv[0].data_length = event_data_len;
631 ev_msg.dv[0].data_ptr = event_data;
632 ev_msg.dv[1].data_length = 0;
633
634 dlil_event_internal(ifp, &ev_msg);
635 }
636
637 static int
638 dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inputthread)
639 {
640 int error;
641
642 bzero(inputthread, sizeof(*inputthread));
643 /* loopback ifp may not be configured at dlil_init time. */
644 if (ifp == lo_ifp) {
645 (void) strlcat(inputthread->input_name,
646 "dlil_input_main_thread_mtx", DLIL_THREADNAME_LEN);
647 } else {
648 (void) snprintf(inputthread->input_name, DLIL_THREADNAME_LEN,
649 "dlil_input_%s%d_mtx", ifp->if_name, ifp->if_unit);
650 }
651
652 inputthread->lck_grp = lck_grp_alloc_init(inputthread->input_name,
653 dlil_grp_attributes);
654 lck_mtx_init(&inputthread->input_lck, inputthread->lck_grp,
655 dlil_lck_attributes);
656
657 error= kernel_thread_start((thread_continue_t)dlil_input_thread_func,
658 inputthread, &inputthread->input_thread);
659 if (error == 0) {
660 ml_thread_policy(inputthread->input_thread, MACHINE_GROUP,
661 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
662 /*
663 * Except for the loopback dlil input thread, we create
664 * an affinity set so that the matching workloop thread
665 * can be scheduled on the same processor set.
666 */
667 if (net_affinity && inputthread != dlil_lo_thread_ptr) {
668 struct thread *tp = inputthread->input_thread;
669 u_int32_t tag;
670 /*
671 * Randomize to reduce the probability
672 * of affinity tag namespace collision.
673 */
674 read_random(&tag, sizeof (tag));
675 if (dlil_affinity_set(tp, tag) == KERN_SUCCESS) {
676 thread_reference(tp);
677 inputthread->tag = tag;
678 inputthread->net_affinity = TRUE;
679 }
680 }
681 } else {
682 panic("%s: couldn't create thread", __func__);
683 /* NOTREACHED */
684 }
685 OSAddAtomic(1, &cur_dlil_input_threads);
686 #if DLIL_DEBUG
687 printf("%s: threadinfo: %p input_thread=%p threads: cur=%d max=%d\n",
688 __func__, inputthread, inputthread->input_thread,
689 dlil_multithreaded_input, cur_dlil_input_threads);
690 #endif
691 return (error);
692 }
693
694 static kern_return_t
695 dlil_affinity_set(struct thread *tp, u_int32_t tag)
696 {
697 thread_affinity_policy_data_t policy;
698
699 bzero(&policy, sizeof (policy));
700 policy.affinity_tag = tag;
701 return (thread_policy_set(tp, THREAD_AFFINITY_POLICY,
702 (thread_policy_t)&policy, THREAD_AFFINITY_POLICY_COUNT));
703 }
704
705 void
706 dlil_init(void)
707 {
708 thread_t thread = THREAD_NULL;
709
710 /*
711 * The following fields must be 64-bit aligned for atomic operations.
712 */
713 IF_DATA_REQUIRE_ALIGNED_64(ifi_ipackets);
714 IF_DATA_REQUIRE_ALIGNED_64(ifi_ierrors)
715 IF_DATA_REQUIRE_ALIGNED_64(ifi_opackets);
716 IF_DATA_REQUIRE_ALIGNED_64(ifi_oerrors);
717 IF_DATA_REQUIRE_ALIGNED_64(ifi_collisions);
718 IF_DATA_REQUIRE_ALIGNED_64(ifi_ibytes);
719 IF_DATA_REQUIRE_ALIGNED_64(ifi_obytes);
720 IF_DATA_REQUIRE_ALIGNED_64(ifi_imcasts);
721 IF_DATA_REQUIRE_ALIGNED_64(ifi_omcasts);
722 IF_DATA_REQUIRE_ALIGNED_64(ifi_iqdrops);
723 IF_DATA_REQUIRE_ALIGNED_64(ifi_noproto);
724
725 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_ipackets);
726 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_ierrors)
727 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_opackets);
728 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_oerrors);
729 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_collisions);
730 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_ibytes);
731 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_obytes);
732 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_imcasts);
733 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_omcasts);
734 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_iqdrops);
735 IFNET_IF_DATA_REQUIRE_ALIGNED_64(ifi_noproto);
736
737 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ibkpackets);
738 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ibkbytes);
739 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_obkpackets);
740 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_obkbytes);
741 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ivipackets);
742 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ivibytes);
743 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ovipackets);
744 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ovibytes);
745 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ivopackets);
746 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ivobytes);
747 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ovopackets);
748 IFNET_IF_TC_REQUIRE_ALIGNED_64(ifi_ovobytes);
749
750 /*
751 * These IF_HWASSIST_ flags must be equal to their IFNET_* counterparts.
752 */
753 _CASSERT(IF_HWASSIST_CSUM_IP == IFNET_CSUM_IP);
754 _CASSERT(IF_HWASSIST_CSUM_TCP == IFNET_CSUM_TCP);
755 _CASSERT(IF_HWASSIST_CSUM_UDP == IFNET_CSUM_UDP);
756 _CASSERT(IF_HWASSIST_CSUM_IP_FRAGS == IFNET_CSUM_FRAGMENT);
757 _CASSERT(IF_HWASSIST_CSUM_FRAGMENT == IFNET_IP_FRAGMENT);
758 _CASSERT(IF_HWASSIST_CSUM_TCP_SUM16 == IFNET_CSUM_SUM16);
759 _CASSERT(IF_HWASSIST_VLAN_TAGGING == IFNET_VLAN_TAGGING);
760 _CASSERT(IF_HWASSIST_VLAN_MTU == IFNET_VLAN_MTU);
761 _CASSERT(IF_HWASSIST_TSO_V4 == IFNET_TSO_IPV4);
762 _CASSERT(IF_HWASSIST_TSO_V6 == IFNET_TSO_IPV6);
763
764 /*
765 * Make sure we have at least IF_LLREACH_MAXLEN in the llreach info.
766 */
767 _CASSERT(IF_LLREACH_MAXLEN <= IF_LLREACHINFO_ADDRLEN);
768
769 PE_parse_boot_argn("net_affinity", &net_affinity,
770 sizeof (net_affinity));
771
772 PE_parse_boot_argn("net_rtref", &net_rtref, sizeof (net_rtref));
773
774 PE_parse_boot_argn("ifnet_debug", &ifnet_debug, sizeof (ifnet_debug));
775
776 dlif_size = (ifnet_debug == 0) ? sizeof (struct dlil_ifnet) :
777 sizeof (struct dlil_ifnet_dbg);
778 /* Enforce 64-bit alignment for dlil_ifnet structure */
779 dlif_bufsize = dlif_size + sizeof (void *) + sizeof (u_int64_t);
780 dlif_bufsize = P2ROUNDUP(dlif_bufsize, sizeof (u_int64_t));
781 dlif_zone = zinit(dlif_bufsize, DLIF_ZONE_MAX * dlif_bufsize,
782 0, DLIF_ZONE_NAME);
783 if (dlif_zone == NULL) {
784 panic("%s: failed allocating %s", __func__, DLIF_ZONE_NAME);
785 /* NOTREACHED */
786 }
787 zone_change(dlif_zone, Z_EXPAND, TRUE);
788 zone_change(dlif_zone, Z_CALLERACCT, FALSE);
789
790 dlif_filt_size = sizeof (struct ifnet_filter);
791 dlif_filt_zone = zinit(dlif_filt_size,
792 DLIF_FILT_ZONE_MAX * dlif_filt_size, 0, DLIF_FILT_ZONE_NAME);
793 if (dlif_filt_zone == NULL) {
794 panic("%s: failed allocating %s", __func__,
795 DLIF_FILT_ZONE_NAME);
796 /* NOTREACHED */
797 }
798 zone_change(dlif_filt_zone, Z_EXPAND, TRUE);
799 zone_change(dlif_filt_zone, Z_CALLERACCT, FALSE);
800
801 dlif_inp_size = sizeof (struct dlil_threading_info);
802 dlif_inp_zone = zinit(dlif_inp_size,
803 DLIF_INP_ZONE_MAX * dlif_inp_size, 0, DLIF_INP_ZONE_NAME);
804 if (dlif_inp_zone == NULL) {
805 panic("%s: failed allocating %s", __func__, DLIF_INP_ZONE_NAME);
806 /* NOTREACHED */
807 }
808 zone_change(dlif_inp_zone, Z_EXPAND, TRUE);
809 zone_change(dlif_inp_zone, Z_CALLERACCT, FALSE);
810
811 dlif_phash_size = sizeof (struct proto_hash_entry) * PROTO_HASH_SLOTS;
812 dlif_phash_zone = zinit(dlif_phash_size,
813 DLIF_PHASH_ZONE_MAX * dlif_phash_size, 0, DLIF_PHASH_ZONE_NAME);
814 if (dlif_phash_zone == NULL) {
815 panic("%s: failed allocating %s", __func__,
816 DLIF_PHASH_ZONE_NAME);
817 /* NOTREACHED */
818 }
819 zone_change(dlif_phash_zone, Z_EXPAND, TRUE);
820 zone_change(dlif_phash_zone, Z_CALLERACCT, FALSE);
821
822 dlif_proto_size = sizeof (struct if_proto);
823 dlif_proto_zone = zinit(dlif_proto_size,
824 DLIF_PROTO_ZONE_MAX * dlif_proto_size, 0, DLIF_PROTO_ZONE_NAME);
825 if (dlif_proto_zone == NULL) {
826 panic("%s: failed allocating %s", __func__,
827 DLIF_PROTO_ZONE_NAME);
828 /* NOTREACHED */
829 }
830 zone_change(dlif_proto_zone, Z_EXPAND, TRUE);
831 zone_change(dlif_proto_zone, Z_CALLERACCT, FALSE);
832
833 ifnet_llreach_init();
834
835 TAILQ_INIT(&dlil_ifnet_head);
836 TAILQ_INIT(&ifnet_head);
837 TAILQ_INIT(&ifnet_detaching_head);
838
839 /* Setup the lock groups we will use */
840 dlil_grp_attributes = lck_grp_attr_alloc_init();
841
842 dlil_lock_group = lck_grp_alloc_init("dlil internal locks",
843 dlil_grp_attributes);
844 ifnet_lock_group = lck_grp_alloc_init("ifnet locks",
845 dlil_grp_attributes);
846 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock",
847 dlil_grp_attributes);
848 dlil_input_lock_grp = lck_grp_alloc_init("dlil input lock",
849 dlil_grp_attributes);
850
851 /* Setup the lock attributes we will use */
852 dlil_lck_attributes = lck_attr_alloc_init();
853
854 ifnet_lock_attr = lck_attr_alloc_init();
855
856 lck_rw_init(&ifnet_head_lock, ifnet_head_lock_group,
857 dlil_lck_attributes);
858 lck_mtx_init(&dlil_ifnet_lock, dlil_lock_group, dlil_lck_attributes);
859
860 lck_attr_free(dlil_lck_attributes);
861 dlil_lck_attributes = NULL;
862
863 ifa_init();
864
865 /*
866 * Create and start up the first dlil input thread once everything
867 * is initialized.
868 */
869 dlil_create_input_thread(lo_ifp, dlil_lo_thread_ptr);
870
871 if (kernel_thread_start((thread_continue_t)ifnet_delayed_thread_func,
872 NULL, &thread) != 0) {
873 panic("%s: couldn't create detach thread", __func__);
874 /* NOTREACHED */
875 }
876 thread_deallocate(thread);
877
878 #if PF
879 /* Initialize the packet filter */
880 pfinit();
881 #endif /* PF */
882 }
883
884 static void
885 if_flt_monitor_busy(struct ifnet *ifp)
886 {
887 lck_mtx_assert(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
888
889 ++ifp->if_flt_busy;
890 VERIFY(ifp->if_flt_busy != 0);
891 }
892
893 static void
894 if_flt_monitor_unbusy(struct ifnet *ifp)
895 {
896 if_flt_monitor_leave(ifp);
897 }
898
899 static void
900 if_flt_monitor_enter(struct ifnet *ifp)
901 {
902 lck_mtx_assert(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
903
904 while (ifp->if_flt_busy) {
905 ++ifp->if_flt_waiters;
906 (void) msleep(&ifp->if_flt_head, &ifp->if_flt_lock,
907 (PZERO - 1), "if_flt_monitor", NULL);
908 }
909 if_flt_monitor_busy(ifp);
910 }
911
912 static void
913 if_flt_monitor_leave(struct ifnet *ifp)
914 {
915 lck_mtx_assert(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
916
917 VERIFY(ifp->if_flt_busy != 0);
918 --ifp->if_flt_busy;
919
920 if (ifp->if_flt_busy == 0 && ifp->if_flt_waiters > 0) {
921 ifp->if_flt_waiters = 0;
922 wakeup(&ifp->if_flt_head);
923 }
924 }
925
926 __private_extern__ int
927 dlil_attach_filter(struct ifnet *ifp, const struct iff_filter *if_filter,
928 interface_filter_t *filter_ref)
929 {
930 int retval = 0;
931 struct ifnet_filter *filter = NULL;
932
933 ifnet_head_lock_shared();
934 /* Check that the interface is in the global list */
935 if (!ifnet_lookup(ifp)) {
936 retval = ENXIO;
937 goto done;
938 }
939
940 filter = zalloc(dlif_filt_zone);
941 if (filter == NULL) {
942 retval = ENOMEM;
943 goto done;
944 }
945 bzero(filter, dlif_filt_size);
946
947 /* refcnt held above during lookup */
948 filter->filt_ifp = ifp;
949 filter->filt_cookie = if_filter->iff_cookie;
950 filter->filt_name = if_filter->iff_name;
951 filter->filt_protocol = if_filter->iff_protocol;
952 filter->filt_input = if_filter->iff_input;
953 filter->filt_output = if_filter->iff_output;
954 filter->filt_event = if_filter->iff_event;
955 filter->filt_ioctl = if_filter->iff_ioctl;
956 filter->filt_detached = if_filter->iff_detached;
957
958 lck_mtx_lock(&ifp->if_flt_lock);
959 if_flt_monitor_enter(ifp);
960
961 lck_mtx_assert(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
962 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
963
964 if_flt_monitor_leave(ifp);
965 lck_mtx_unlock(&ifp->if_flt_lock);
966
967 *filter_ref = filter;
968
969 /*
970 * Bump filter count and route_generation ID to let TCP
971 * know it shouldn't do TSO on this connection
972 */
973 OSAddAtomic(1, &dlil_filter_count);
974 if (use_routegenid)
975 routegenid_update();
976
977 if (dlil_verbose) {
978 printf("%s%d: %s filter attached\n", ifp->if_name,
979 ifp->if_unit, if_filter->iff_name);
980 }
981 done:
982 ifnet_head_done();
983 if (retval != 0 && ifp != NULL) {
984 DLIL_PRINTF("%s%d: failed to attach %s (err=%d)\n",
985 ifp->if_name, ifp->if_unit, if_filter->iff_name, retval);
986 }
987 if (retval != 0 && filter != NULL)
988 zfree(dlif_filt_zone, filter);
989
990 return (retval);
991 }
992
993 static int
994 dlil_detach_filter_internal(interface_filter_t filter, int detached)
995 {
996 int retval = 0;
997
998 if (detached == 0) {
999 ifnet_t ifp = NULL;
1000
1001 ifnet_head_lock_shared();
1002 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1003 interface_filter_t entry = NULL;
1004
1005 lck_mtx_lock(&ifp->if_flt_lock);
1006 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
1007 if (entry != filter || entry->filt_skip)
1008 continue;
1009 /*
1010 * We've found a match; since it's possible
1011 * that the thread gets blocked in the monitor,
1012 * we do the lock dance. Interface should
1013 * not be detached since we still have a use
1014 * count held during filter attach.
1015 */
1016 entry->filt_skip = 1; /* skip input/output */
1017 lck_mtx_unlock(&ifp->if_flt_lock);
1018 ifnet_head_done();
1019
1020 lck_mtx_lock(&ifp->if_flt_lock);
1021 if_flt_monitor_enter(ifp);
1022 lck_mtx_assert(&ifp->if_flt_lock,
1023 LCK_MTX_ASSERT_OWNED);
1024
1025 /* Remove the filter from the list */
1026 TAILQ_REMOVE(&ifp->if_flt_head, filter,
1027 filt_next);
1028
1029 if_flt_monitor_leave(ifp);
1030 lck_mtx_unlock(&ifp->if_flt_lock);
1031 if (dlil_verbose) {
1032 printf("%s%d: %s filter detached\n",
1033 ifp->if_name, ifp->if_unit,
1034 filter->filt_name);
1035 }
1036 goto destroy;
1037 }
1038 lck_mtx_unlock(&ifp->if_flt_lock);
1039 }
1040 ifnet_head_done();
1041
1042 /* filter parameter is not a valid filter ref */
1043 retval = EINVAL;
1044 goto done;
1045 }
1046
1047 if (dlil_verbose)
1048 printf("%s filter detached\n", filter->filt_name);
1049
1050 destroy:
1051
1052 /* Call the detached function if there is one */
1053 if (filter->filt_detached)
1054 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
1055
1056 /* Free the filter */
1057 zfree(dlif_filt_zone, filter);
1058
1059 /*
1060 * Decrease filter count and route_generation ID to let TCP
1061 * know it should reevalute doing TSO or not
1062 */
1063 OSAddAtomic(-1, &dlil_filter_count);
1064 if (use_routegenid)
1065 routegenid_update();
1066
1067 done:
1068 if (retval != 0) {
1069 DLIL_PRINTF("failed to detach %s filter (err=%d)\n",
1070 filter->filt_name, retval);
1071 }
1072 return (retval);
1073 }
1074
1075 __private_extern__ void
1076 dlil_detach_filter(interface_filter_t filter)
1077 {
1078 if (filter == NULL)
1079 return;
1080 dlil_detach_filter_internal(filter, 0);
1081 }
1082
1083 static void
1084 dlil_input_thread_func(struct dlil_threading_info *inputthread)
1085 {
1086 while (1) {
1087 struct mbuf *m = NULL, *m_loop = NULL;
1088 #if IFNET_INPUT_SANITY_CHK
1089 int loop_cnt = 0, mbuf_cnt;
1090 int count;
1091 struct mbuf *m1;
1092 #endif /* IFNET_INPUT_SANITY_CHK */
1093
1094 lck_mtx_lock_spin(&inputthread->input_lck);
1095
1096 /* Wait until there is work to be done */
1097 while (!(inputthread->input_waiting & ~DLIL_INPUT_RUNNING)) {
1098 inputthread->input_waiting &= ~DLIL_INPUT_RUNNING;
1099 msleep(&inputthread->input_waiting,
1100 &inputthread->input_lck, 0,
1101 inputthread->input_name, 0);
1102 }
1103
1104 lck_mtx_assert(&inputthread->input_lck, LCK_MTX_ASSERT_OWNED);
1105
1106 m = inputthread->mbuf_head;
1107 inputthread->mbuf_head = NULL;
1108 inputthread->mbuf_tail = NULL;
1109
1110 if (inputthread->input_waiting & DLIL_INPUT_TERMINATE) {
1111 lck_mtx_unlock(&inputthread->input_lck);
1112
1113 if (m != NULL)
1114 mbuf_freem_list(m);
1115
1116 OSAddAtomic(-1, &cur_dlil_input_threads);
1117
1118 lck_mtx_destroy(&inputthread->input_lck,
1119 inputthread->lck_grp);
1120 lck_grp_free(inputthread->lck_grp);
1121
1122 zfree(dlif_inp_zone, inputthread);
1123
1124 /* for the extra refcnt from kernel_thread_start() */
1125 thread_deallocate(current_thread());
1126
1127 /* this is the end */
1128 thread_terminate(current_thread());
1129 /* NOTREACHED */
1130 return;
1131 }
1132
1133 inputthread->input_waiting |= DLIL_INPUT_RUNNING;
1134 inputthread->input_waiting &= ~DLIL_INPUT_WAITING;
1135
1136 if (inputthread == dlil_lo_thread_ptr) {
1137 m_loop = dlil_lo_input_mbuf_head;
1138 dlil_lo_input_mbuf_head = NULL;
1139 dlil_lo_input_mbuf_tail = NULL;
1140 }
1141
1142 #if IFNET_INPUT_SANITY_CHK
1143 if (dlil_input_sanity_check != 0) {
1144 mbuf_cnt = inputthread->mbuf_count;
1145 inputthread->mbuf_count = 0;
1146 if (inputthread == dlil_lo_thread_ptr) {
1147 loop_cnt = dlil_lo_input_mbuf_count;
1148 dlil_lo_input_mbuf_count = 0;
1149 }
1150
1151 lck_mtx_unlock(&inputthread->input_lck);
1152
1153 for (m1 = m, count = 0; m1; m1 = mbuf_nextpkt(m1)) {
1154 count++;
1155 }
1156 if (count != mbuf_cnt) {
1157 panic("%s - thread=%p reg. loop queue "
1158 "has %d packets, should have %d\n",
1159 __func__, inputthread, count, mbuf_cnt);
1160 /* NOTREACHED */
1161 }
1162
1163 if (inputthread == dlil_lo_thread_ptr) {
1164 for (m1 = m_loop, count = 0; m1;
1165 m1 = mbuf_nextpkt(m1)) {
1166 count++;
1167 }
1168 if (count != loop_cnt) {
1169 panic("%s - thread=%p loop queue "
1170 "has %d packets, should have %d\n",
1171 __func__, inputthread, count,
1172 loop_cnt);
1173 /* NOTREACHED */
1174 }
1175 }
1176 } else
1177 #endif /* IFNET_INPUT_SANITY_CHK */
1178 {
1179 lck_mtx_unlock(&inputthread->input_lck);
1180 }
1181
1182
1183 /*
1184 * NOTE warning %%% attention !!!!
1185 * We should think about putting some thread starvation
1186 * safeguards if we deal with long chains of packets.
1187 */
1188 if (m_loop) {
1189 if (inputthread == dlil_lo_thread_ptr) {
1190 dlil_input_packet_list(lo_ifp, m_loop);
1191 }
1192 #if IFNET_INPUT_SANITY_CHK
1193 else {
1194 panic("%s - thread=%p loop queue has %d "
1195 "packets, should have none!\n", __func__,
1196 inputthread, loop_cnt);
1197 /* NOTREACHED */
1198 }
1199 #endif /* IFNET_INPUT_SANITY_CHK */
1200 }
1201
1202 if (m != NULL)
1203 dlil_input_packet_list(0, m);
1204
1205 lck_mtx_lock_spin(&inputthread->input_lck);
1206
1207 if (inputthread->input_waiting &
1208 (DLIL_PROTO_WAITING | DLIL_PROTO_REGISTER)) {
1209 lck_mtx_unlock(&inputthread->input_lck);
1210 proto_input_run();
1211 } else {
1212 lck_mtx_unlock(&inputthread->input_lck);
1213 }
1214 }
1215 }
1216
1217 errno_t
1218 ifnet_input(ifnet_t ifp, mbuf_t m_head,
1219 const struct ifnet_stat_increment_param *stats)
1220 {
1221 struct thread *tp = current_thread();
1222 mbuf_t m_tail;
1223 struct dlil_threading_info *inp;
1224 #if IFNET_INPUT_SANITY_CHK
1225 u_int32_t pkt_count = 0;
1226 #endif /* IFNET_INPUT_SANITY_CHK */
1227
1228 if (ifp == NULL || m_head == NULL) {
1229 if (m_head != NULL)
1230 mbuf_freem_list(m_head);
1231 return (EINVAL);
1232 }
1233
1234 m_tail = m_head;
1235 while (1) {
1236 #if IFNET_INPUT_SANITY_CHK
1237 if (dlil_input_sanity_check != 0) {
1238 ifnet_t rcvif;
1239
1240 rcvif = mbuf_pkthdr_rcvif(m_tail);
1241 pkt_count++;
1242
1243 if (rcvif == NULL ||
1244 (ifp->if_type != IFT_LOOP && rcvif != ifp) ||
1245 !(mbuf_flags(m_head) & MBUF_PKTHDR)) {
1246 panic("%s - invalid mbuf %p\n",
1247 __func__, m_tail);
1248 /* NOTREACHED */
1249 }
1250 }
1251 #endif /* IFNET_INPUT_SANITY_CHK */
1252 if (mbuf_nextpkt(m_tail) == NULL)
1253 break;
1254 m_tail = mbuf_nextpkt(m_tail);
1255 }
1256
1257 inp = ifp->if_input_thread;
1258
1259 if (dlil_multithreaded_input == 0 || inp == NULL)
1260 inp = dlil_lo_thread_ptr;
1261
1262 /*
1263 * If there is a matching dlil input thread associated with an
1264 * affinity set, associate this workloop thread with the same set.
1265 * We will only do this once.
1266 */
1267 lck_mtx_lock_spin(&inp->input_lck);
1268 if (inp->net_affinity && inp->workloop_thread == NULL) {
1269 u_int32_t tag = inp->tag;
1270 inp->workloop_thread = tp;
1271 lck_mtx_unlock(&inp->input_lck);
1272
1273 /* Associated the current thread with the new affinity tag */
1274 (void) dlil_affinity_set(tp, tag);
1275
1276 /*
1277 * Take a reference on the workloop (current) thread; during
1278 * detach, we will need to refer to it in order ot tear down
1279 * its affinity.
1280 */
1281 thread_reference(tp);
1282 lck_mtx_lock_spin(&inp->input_lck);
1283 }
1284
1285 /* WARNING
1286 * Because of loopbacked multicast we cannot stuff the ifp in
1287 * the rcvif of the packet header: loopback has its own dlil
1288 * input queue
1289 */
1290
1291 if (inp == dlil_lo_thread_ptr && ifp->if_type == IFT_LOOP) {
1292 if (dlil_lo_input_mbuf_head == NULL)
1293 dlil_lo_input_mbuf_head = m_head;
1294 else if (dlil_lo_input_mbuf_tail != NULL)
1295 dlil_lo_input_mbuf_tail->m_nextpkt = m_head;
1296 dlil_lo_input_mbuf_tail = m_tail;
1297 #if IFNET_INPUT_SANITY_CHK
1298 if (dlil_input_sanity_check != 0) {
1299 dlil_lo_input_mbuf_count += pkt_count;
1300 inp->input_mbuf_cnt += pkt_count;
1301 inp->input_wake_cnt++;
1302
1303 lck_mtx_assert(&inp->input_lck, LCK_MTX_ASSERT_OWNED);
1304 }
1305 #endif
1306 } else {
1307 if (inp->mbuf_head == NULL)
1308 inp->mbuf_head = m_head;
1309 else if (inp->mbuf_tail != NULL)
1310 inp->mbuf_tail->m_nextpkt = m_head;
1311 inp->mbuf_tail = m_tail;
1312 #if IFNET_INPUT_SANITY_CHK
1313 if (dlil_input_sanity_check != 0) {
1314 inp->mbuf_count += pkt_count;
1315 inp->input_mbuf_cnt += pkt_count;
1316 inp->input_wake_cnt++;
1317
1318 lck_mtx_assert(&inp->input_lck, LCK_MTX_ASSERT_OWNED);
1319 }
1320 #endif
1321 }
1322
1323 inp->input_waiting |= DLIL_INPUT_WAITING;
1324 if ((inp->input_waiting & DLIL_INPUT_RUNNING) == 0) {
1325 wakeup((caddr_t)&inp->input_waiting);
1326 }
1327 lck_mtx_unlock(&inp->input_lck);
1328
1329 if (stats) {
1330 atomic_add_64(&ifp->if_data.ifi_ipackets, stats->packets_in);
1331 atomic_add_64(&ifp->if_data.ifi_ibytes, stats->bytes_in);
1332 atomic_add_64(&ifp->if_data.ifi_ierrors, stats->errors_in);
1333
1334 atomic_add_64(&ifp->if_data.ifi_opackets, stats->packets_out);
1335 atomic_add_64(&ifp->if_data.ifi_obytes, stats->bytes_out);
1336 atomic_add_64(&ifp->if_data.ifi_oerrors, stats->errors_out);
1337
1338 atomic_add_64(&ifp->if_data.ifi_collisions, stats->collisions);
1339 atomic_add_64(&ifp->if_data.ifi_iqdrops, stats->dropped);
1340 }
1341
1342 return (0);
1343 }
1344
1345 static int
1346 dlil_interface_filters_input(struct ifnet *ifp, struct mbuf **m_p,
1347 char **frame_header_p, protocol_family_t protocol_family)
1348 {
1349 struct ifnet_filter *filter;
1350
1351 /*
1352 * Pass the inbound packet to the interface filters
1353 */
1354 lck_mtx_lock_spin(&ifp->if_flt_lock);
1355 /* prevent filter list from changing in case we drop the lock */
1356 if_flt_monitor_busy(ifp);
1357 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1358 int result;
1359
1360 if (!filter->filt_skip && filter->filt_input != NULL &&
1361 (filter->filt_protocol == 0 ||
1362 filter->filt_protocol == protocol_family)) {
1363 lck_mtx_unlock(&ifp->if_flt_lock);
1364
1365 result = (*filter->filt_input)(filter->filt_cookie,
1366 ifp, protocol_family, m_p, frame_header_p);
1367
1368 lck_mtx_lock_spin(&ifp->if_flt_lock);
1369 if (result != 0) {
1370 /* we're done with the filter list */
1371 if_flt_monitor_unbusy(ifp);
1372 lck_mtx_unlock(&ifp->if_flt_lock);
1373 return (result);
1374 }
1375 }
1376 }
1377 /* we're done with the filter list */
1378 if_flt_monitor_unbusy(ifp);
1379 lck_mtx_unlock(&ifp->if_flt_lock);
1380
1381 /*
1382 * Strip away M_PROTO1 bit prior to sending packet up the stack as
1383 * it is meant to be local to a subsystem -- if_bridge for M_PROTO1
1384 */
1385 if (*m_p != NULL)
1386 (*m_p)->m_flags &= ~M_PROTO1;
1387
1388 return (0);
1389 }
1390
1391 static int
1392 dlil_interface_filters_output(struct ifnet *ifp, struct mbuf **m_p,
1393 protocol_family_t protocol_family)
1394 {
1395 struct ifnet_filter *filter;
1396
1397 /*
1398 * Pass the outbound packet to the interface filters
1399 */
1400 lck_mtx_lock_spin(&ifp->if_flt_lock);
1401 /* prevent filter list from changing in case we drop the lock */
1402 if_flt_monitor_busy(ifp);
1403 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1404 int result;
1405
1406 if (!filter->filt_skip && filter->filt_output != NULL &&
1407 (filter->filt_protocol == 0 ||
1408 filter->filt_protocol == protocol_family)) {
1409 lck_mtx_unlock(&ifp->if_flt_lock);
1410
1411 result = filter->filt_output(filter->filt_cookie, ifp,
1412 protocol_family, m_p);
1413
1414 lck_mtx_lock_spin(&ifp->if_flt_lock);
1415 if (result != 0) {
1416 /* we're done with the filter list */
1417 if_flt_monitor_unbusy(ifp);
1418 lck_mtx_unlock(&ifp->if_flt_lock);
1419 return (result);
1420 }
1421 }
1422 }
1423 /* we're done with the filter list */
1424 if_flt_monitor_unbusy(ifp);
1425 lck_mtx_unlock(&ifp->if_flt_lock);
1426
1427 return (0);
1428 }
1429
1430 static void
1431 dlil_ifproto_input(struct if_proto * ifproto, mbuf_t m)
1432 {
1433 int error;
1434
1435 if (ifproto->proto_kpi == kProtoKPI_v1) {
1436 /* Version 1 protocols get one packet at a time */
1437 while (m != NULL) {
1438 char * frame_header;
1439 mbuf_t next_packet;
1440
1441 next_packet = m->m_nextpkt;
1442 m->m_nextpkt = NULL;
1443 frame_header = m->m_pkthdr.header;
1444 m->m_pkthdr.header = NULL;
1445 error = (*ifproto->kpi.v1.input)(ifproto->ifp,
1446 ifproto->protocol_family, m, frame_header);
1447 if (error != 0 && error != EJUSTRETURN)
1448 m_freem(m);
1449 m = next_packet;
1450 }
1451 } else if (ifproto->proto_kpi == kProtoKPI_v2) {
1452 /* Version 2 protocols support packet lists */
1453 error = (*ifproto->kpi.v2.input)(ifproto->ifp,
1454 ifproto->protocol_family, m);
1455 if (error != 0 && error != EJUSTRETURN)
1456 m_freem_list(m);
1457 }
1458 return;
1459 }
1460
1461 __private_extern__ void
1462 dlil_input_packet_list(struct ifnet * ifp_param, struct mbuf *m)
1463 {
1464 int error = 0;
1465 protocol_family_t protocol_family;
1466 mbuf_t next_packet;
1467 ifnet_t ifp = ifp_param;
1468 char * frame_header;
1469 struct if_proto * last_ifproto = NULL;
1470 mbuf_t pkt_first = NULL;
1471 mbuf_t * pkt_next = NULL;
1472
1473 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
1474
1475
1476 while (m != NULL) {
1477 struct if_proto *ifproto = NULL;
1478 int iorefcnt = 0;
1479
1480 if (ifp_param == NULL)
1481 ifp = m->m_pkthdr.rcvif;
1482
1483 /* Check if this mbuf looks valid */
1484 MBUF_INPUT_CHECK(m, ifp);
1485
1486 next_packet = m->m_nextpkt;
1487 m->m_nextpkt = NULL;
1488 frame_header = m->m_pkthdr.header;
1489 m->m_pkthdr.header = NULL;
1490
1491 /* Get an IO reference count if the interface is not
1492 * loopback and it is attached.
1493 */
1494 if (ifp != lo_ifp) {
1495 if (!ifnet_is_attached(ifp, 1)) {
1496 m_freem(m);
1497 goto next;
1498 }
1499 iorefcnt = 1;
1500 }
1501
1502 switch (m->m_pkthdr.prio) {
1503 case MBUF_TC_BK:
1504 atomic_add_64(&ifp->if_tc.ifi_ibkpackets, 1);
1505 atomic_add_64(&ifp->if_tc.ifi_ibkbytes, m->m_pkthdr.len);
1506 break;
1507 case MBUF_TC_VI:
1508 atomic_add_64(&ifp->if_tc.ifi_ivipackets, 1);
1509 atomic_add_64(&ifp->if_tc.ifi_ivibytes, m->m_pkthdr.len);
1510 break;
1511 case MBUF_TC_VO:
1512 atomic_add_64(&ifp->if_tc.ifi_ivopackets, 1);
1513 atomic_add_64(&ifp->if_tc.ifi_ivobytes, m->m_pkthdr.len);
1514 break;
1515 default:
1516 break;
1517 }
1518
1519 /* find which protocol family this packet is for */
1520 ifnet_lock_shared(ifp);
1521 error = (*ifp->if_demux)(ifp, m, frame_header,
1522 &protocol_family);
1523 ifnet_lock_done(ifp);
1524 if (error != 0) {
1525 if (error == EJUSTRETURN)
1526 goto next;
1527 protocol_family = 0;
1528 }
1529
1530 if (m->m_flags & (M_BCAST|M_MCAST))
1531 atomic_add_64(&ifp->if_imcasts, 1);
1532
1533 /* run interface filters, exclude VLAN packets PR-3586856 */
1534 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1535 error = dlil_interface_filters_input(ifp, &m,
1536 &frame_header, protocol_family);
1537 if (error != 0) {
1538 if (error != EJUSTRETURN)
1539 m_freem(m);
1540 goto next;
1541 }
1542 }
1543 if (error != 0 || ((m->m_flags & M_PROMISC) != 0) ) {
1544 m_freem(m);
1545 goto next;
1546 }
1547
1548 /* Lookup the protocol attachment to this interface */
1549 if (protocol_family == 0) {
1550 ifproto = NULL;
1551 } else if (last_ifproto != NULL && last_ifproto->ifp == ifp &&
1552 (last_ifproto->protocol_family == protocol_family)) {
1553 VERIFY(ifproto == NULL);
1554 ifproto = last_ifproto;
1555 if_proto_ref(last_ifproto);
1556 } else {
1557 VERIFY(ifproto == NULL);
1558 ifnet_lock_shared(ifp);
1559 /* callee holds a proto refcnt upon success */
1560 ifproto = find_attached_proto(ifp, protocol_family);
1561 ifnet_lock_done(ifp);
1562 }
1563 if (ifproto == NULL) {
1564 /* no protocol for this packet, discard */
1565 m_freem(m);
1566 goto next;
1567 }
1568 if (ifproto != last_ifproto) {
1569 if (last_ifproto != NULL) {
1570 /* pass up the list for the previous protocol */
1571 dlil_ifproto_input(last_ifproto, pkt_first);
1572 pkt_first = NULL;
1573 if_proto_free(last_ifproto);
1574 }
1575 last_ifproto = ifproto;
1576 if_proto_ref(ifproto);
1577 }
1578 /* extend the list */
1579 m->m_pkthdr.header = frame_header;
1580 if (pkt_first == NULL) {
1581 pkt_first = m;
1582 } else {
1583 *pkt_next = m;
1584 }
1585 pkt_next = &m->m_nextpkt;
1586
1587 next:
1588 if (next_packet == NULL && last_ifproto != NULL) {
1589 /* pass up the last list of packets */
1590 dlil_ifproto_input(last_ifproto, pkt_first);
1591 if_proto_free(last_ifproto);
1592 last_ifproto = NULL;
1593 }
1594 if (ifproto != NULL) {
1595 if_proto_free(ifproto);
1596 ifproto = NULL;
1597 }
1598
1599 m = next_packet;
1600
1601 /* update the driver's multicast filter, if needed */
1602 if (ifp->if_updatemcasts > 0 && if_mcasts_update(ifp) == 0)
1603 ifp->if_updatemcasts = 0;
1604 if (iorefcnt == 1)
1605 ifnet_decr_iorefcnt(ifp);
1606 }
1607
1608 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
1609 return;
1610 }
1611
1612 errno_t
1613 if_mcasts_update(struct ifnet *ifp)
1614 {
1615 errno_t err;
1616
1617 err = ifnet_ioctl(ifp, 0, SIOCADDMULTI, NULL);
1618 if (err == EAFNOSUPPORT)
1619 err = 0;
1620 printf("%s%d: %s %d suspended link-layer multicast membership(s) "
1621 "(err=%d)\n", ifp->if_name, ifp->if_unit,
1622 (err == 0 ? "successfully restored" : "failed to restore"),
1623 ifp->if_updatemcasts, err);
1624
1625 /* just return success */
1626 return (0);
1627 }
1628
1629 static int
1630 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
1631 {
1632 struct ifnet_filter *filter;
1633
1634 /* Get an io ref count if the interface is attached */
1635 if (!ifnet_is_attached(ifp, 1))
1636 goto done;
1637
1638 /*
1639 * Pass the event to the interface filters
1640 */
1641 lck_mtx_lock_spin(&ifp->if_flt_lock);
1642 /* prevent filter list from changing in case we drop the lock */
1643 if_flt_monitor_busy(ifp);
1644 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1645 if (filter->filt_event != NULL) {
1646 lck_mtx_unlock(&ifp->if_flt_lock);
1647
1648 filter->filt_event(filter->filt_cookie, ifp,
1649 filter->filt_protocol, event);
1650
1651 lck_mtx_lock_spin(&ifp->if_flt_lock);
1652 }
1653 }
1654 /* we're done with the filter list */
1655 if_flt_monitor_unbusy(ifp);
1656 lck_mtx_unlock(&ifp->if_flt_lock);
1657
1658 ifnet_lock_shared(ifp);
1659 if (ifp->if_proto_hash != NULL) {
1660 int i;
1661
1662 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
1663 struct if_proto *proto;
1664
1665 SLIST_FOREACH(proto, &ifp->if_proto_hash[i],
1666 next_hash) {
1667 proto_media_event eventp =
1668 (proto->proto_kpi == kProtoKPI_v1 ?
1669 proto->kpi.v1.event :
1670 proto->kpi.v2.event);
1671
1672 if (eventp != NULL) {
1673 if_proto_ref(proto);
1674 ifnet_lock_done(ifp);
1675
1676 eventp(ifp, proto->protocol_family,
1677 event);
1678
1679 ifnet_lock_shared(ifp);
1680 if_proto_free(proto);
1681 }
1682 }
1683 }
1684 }
1685 ifnet_lock_done(ifp);
1686
1687 /* Pass the event to the interface */
1688 if (ifp->if_event != NULL)
1689 ifp->if_event(ifp, event);
1690
1691 /* Release the io ref count */
1692 ifnet_decr_iorefcnt(ifp);
1693
1694 done:
1695 return (kev_post_msg(event));
1696 }
1697
1698 errno_t
1699 ifnet_event(ifnet_t ifp, struct kern_event_msg *event)
1700 {
1701 struct kev_msg kev_msg;
1702 int result = 0;
1703
1704 if (ifp == NULL || event == NULL)
1705 return (EINVAL);
1706
1707 bzero(&kev_msg, sizeof (kev_msg));
1708 kev_msg.vendor_code = event->vendor_code;
1709 kev_msg.kev_class = event->kev_class;
1710 kev_msg.kev_subclass = event->kev_subclass;
1711 kev_msg.event_code = event->event_code;
1712 kev_msg.dv[0].data_ptr = &event->event_data[0];
1713 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
1714 kev_msg.dv[1].data_length = 0;
1715
1716 result = dlil_event_internal(ifp, &kev_msg);
1717
1718 return (result);
1719 }
1720
1721 #if CONFIG_MACF_NET
1722 #include <netinet/ip6.h>
1723 #include <netinet/ip.h>
1724 static int
1725 dlil_get_socket_type(struct mbuf **mp, int family, int raw)
1726 {
1727 struct mbuf *m;
1728 struct ip *ip;
1729 struct ip6_hdr *ip6;
1730 int type = SOCK_RAW;
1731
1732 if (!raw) {
1733 switch (family) {
1734 case PF_INET:
1735 m = m_pullup(*mp, sizeof(struct ip));
1736 if (m == NULL)
1737 break;
1738 *mp = m;
1739 ip = mtod(m, struct ip *);
1740 if (ip->ip_p == IPPROTO_TCP)
1741 type = SOCK_STREAM;
1742 else if (ip->ip_p == IPPROTO_UDP)
1743 type = SOCK_DGRAM;
1744 break;
1745 case PF_INET6:
1746 m = m_pullup(*mp, sizeof(struct ip6_hdr));
1747 if (m == NULL)
1748 break;
1749 *mp = m;
1750 ip6 = mtod(m, struct ip6_hdr *);
1751 if (ip6->ip6_nxt == IPPROTO_TCP)
1752 type = SOCK_STREAM;
1753 else if (ip6->ip6_nxt == IPPROTO_UDP)
1754 type = SOCK_DGRAM;
1755 break;
1756 }
1757 }
1758
1759 return (type);
1760 }
1761 #endif
1762
1763 static void
1764 if_inc_traffic_class_out(ifnet_t ifp, mbuf_t m)
1765 {
1766 if (!(m->m_flags & M_PKTHDR))
1767 return;
1768
1769 switch (m->m_pkthdr.prio) {
1770 case MBUF_TC_BK:
1771 atomic_add_64(&ifp->if_tc.ifi_obkpackets, 1);
1772 atomic_add_64(&ifp->if_tc.ifi_obkbytes, m->m_pkthdr.len);
1773 break;
1774 case MBUF_TC_VI:
1775 atomic_add_64(&ifp->if_tc.ifi_ovipackets, 1);
1776 atomic_add_64(&ifp->if_tc.ifi_ovibytes, m->m_pkthdr.len);
1777 break;
1778 case MBUF_TC_VO:
1779 atomic_add_64(&ifp->if_tc.ifi_ovopackets, 1);
1780 atomic_add_64(&ifp->if_tc.ifi_ovobytes, m->m_pkthdr.len);
1781 break;
1782 default:
1783 break;
1784 }
1785 }
1786
1787 /*
1788 * dlil_output
1789 *
1790 * Caller should have a lock on the protocol domain if the protocol
1791 * doesn't support finer grained locking. In most cases, the lock
1792 * will be held from the socket layer and won't be released until
1793 * we return back to the socket layer.
1794 *
1795 * This does mean that we must take a protocol lock before we take
1796 * an interface lock if we're going to take both. This makes sense
1797 * because a protocol is likely to interact with an ifp while it
1798 * is under the protocol lock.
1799 */
1800 errno_t
1801 dlil_output(ifnet_t ifp, protocol_family_t proto_family, mbuf_t packetlist,
1802 void *route, const struct sockaddr *dest, int raw)
1803 {
1804 char *frame_type = NULL;
1805 char *dst_linkaddr = NULL;
1806 int retval = 0;
1807 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1808 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1809 struct if_proto *proto = NULL;
1810 mbuf_t m;
1811 mbuf_t send_head = NULL;
1812 mbuf_t *send_tail = &send_head;
1813 int iorefcnt = 0;
1814
1815 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1816
1817 /* Get an io refcnt if the interface is attached to prevent ifnet_detach
1818 * from happening while this operation is in progress */
1819 if (!ifnet_is_attached(ifp, 1)) {
1820 retval = ENXIO;
1821 goto cleanup;
1822 }
1823 iorefcnt = 1;
1824
1825 /* update the driver's multicast filter, if needed */
1826 if (ifp->if_updatemcasts > 0 && if_mcasts_update(ifp) == 0)
1827 ifp->if_updatemcasts = 0;
1828
1829 frame_type = frame_type_buffer;
1830 dst_linkaddr = dst_linkaddr_buffer;
1831
1832 if (raw == 0) {
1833 ifnet_lock_shared(ifp);
1834 /* callee holds a proto refcnt upon success */
1835 proto = find_attached_proto(ifp, proto_family);
1836 if (proto == NULL) {
1837 ifnet_lock_done(ifp);
1838 retval = ENXIO;
1839 goto cleanup;
1840 }
1841 ifnet_lock_done(ifp);
1842 }
1843
1844 preout_again:
1845 if (packetlist == NULL)
1846 goto cleanup;
1847
1848 m = packetlist;
1849 packetlist = packetlist->m_nextpkt;
1850 m->m_nextpkt = NULL;
1851
1852 if (raw == 0) {
1853 proto_media_preout preoutp = (proto->proto_kpi == kProtoKPI_v1 ?
1854 proto->kpi.v1.pre_output : proto->kpi.v2.pre_output);
1855 retval = 0;
1856 if (preoutp != NULL) {
1857 retval = preoutp(ifp, proto_family, &m, dest, route,
1858 frame_type, dst_linkaddr);
1859
1860 if (retval != 0) {
1861 if (retval == EJUSTRETURN)
1862 goto preout_again;
1863 m_freem(m);
1864 goto cleanup;
1865 }
1866 }
1867 }
1868
1869 #if CONFIG_MACF_NET
1870 retval = mac_ifnet_check_transmit(ifp, m, proto_family,
1871 dlil_get_socket_type(&m, proto_family, raw));
1872 if (retval) {
1873 m_freem(m);
1874 goto cleanup;
1875 }
1876 #endif
1877
1878 do {
1879 #if CONFIG_DTRACE
1880 if (proto_family == PF_INET) {
1881 struct ip *ip = mtod(m, struct ip*);
1882 DTRACE_IP6(send, struct mbuf *, m, struct inpcb *, NULL,
1883 struct ip *, ip, struct ifnet *, ifp,
1884 struct ip *, ip, struct ip6_hdr *, NULL);
1885
1886 } else if (proto_family == PF_INET6) {
1887 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr*);
1888 DTRACE_IP6(send, struct mbuf*, m, struct inpcb *, NULL,
1889 struct ip6_hdr *, ip6, struct ifnet*, ifp,
1890 struct ip*, NULL, struct ip6_hdr *, ip6);
1891 }
1892 #endif /* CONFIG_DTRACE */
1893
1894 if (raw == 0 && ifp->if_framer) {
1895 int rcvif_set = 0;
1896
1897 /*
1898 * If this is a broadcast packet that needs to be
1899 * looped back into the system, set the inbound ifp
1900 * to that of the outbound ifp. This will allow
1901 * us to determine that it is a legitimate packet
1902 * for the system. Only set the ifp if it's not
1903 * already set, just to be safe.
1904 */
1905 if ((m->m_flags & (M_BCAST | M_LOOP)) &&
1906 m->m_pkthdr.rcvif == NULL) {
1907 m->m_pkthdr.rcvif = ifp;
1908 rcvif_set = 1;
1909 }
1910
1911 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr,
1912 frame_type);
1913 if (retval) {
1914 if (retval != EJUSTRETURN)
1915 m_freem(m);
1916 goto next;
1917 }
1918
1919 /*
1920 * Clear the ifp if it was set above, and to be
1921 * safe, only if it is still the same as the
1922 * outbound ifp we have in context. If it was
1923 * looped back, then a copy of it was sent to the
1924 * loopback interface with the rcvif set, and we
1925 * are clearing the one that will go down to the
1926 * layer below.
1927 */
1928 if (rcvif_set && m->m_pkthdr.rcvif == ifp)
1929 m->m_pkthdr.rcvif = NULL;
1930 }
1931
1932 /*
1933 * Let interface filters (if any) do their thing ...
1934 */
1935 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1936 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1937 retval = dlil_interface_filters_output(ifp,
1938 &m, proto_family);
1939 if (retval != 0) {
1940 if (retval != EJUSTRETURN)
1941 m_freem(m);
1942 goto next;
1943 }
1944 }
1945 /*
1946 * Strip away M_PROTO1 bit prior to sending packet to the driver
1947 * as this field may be used by the driver
1948 */
1949 m->m_flags &= ~M_PROTO1;
1950
1951 /*
1952 * If the underlying interface is not capable of handling a
1953 * packet whose data portion spans across physically disjoint
1954 * pages, we need to "normalize" the packet so that we pass
1955 * down a chain of mbufs where each mbuf points to a span that
1956 * resides in the system page boundary. If the packet does
1957 * not cross page(s), the following is a no-op.
1958 */
1959 if (!(ifp->if_hwassist & IFNET_MULTIPAGES)) {
1960 if ((m = m_normalize(m)) == NULL)
1961 goto next;
1962 }
1963
1964 /*
1965 * If this is a TSO packet, make sure the interface still
1966 * advertise TSO capability.
1967 */
1968
1969 if ((m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) &&
1970 !(ifp->if_hwassist & IFNET_TSO_IPV4)) {
1971 retval = EMSGSIZE;
1972 m_freem(m);
1973 goto cleanup;
1974 }
1975
1976 if ((m->m_pkthdr.csum_flags & CSUM_TSO_IPV6) &&
1977 !(ifp->if_hwassist & IFNET_TSO_IPV6)) {
1978 retval = EMSGSIZE;
1979 m_freem(m);
1980 goto cleanup;
1981 }
1982
1983 /*
1984 * Finally, call the driver.
1985 */
1986 if ((ifp->if_eflags & IFEF_SENDLIST) != 0) {
1987 *send_tail = m;
1988 send_tail = &m->m_nextpkt;
1989 } else {
1990 if_inc_traffic_class_out(ifp, m);
1991 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START,
1992 0,0,0,0,0);
1993 retval = ifp->if_output(ifp, m);
1994 if (retval && dlil_verbose) {
1995 printf("%s: output error on %s%d retval = %d\n",
1996 __func__, ifp->if_name, ifp->if_unit,
1997 retval);
1998 }
1999 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END,
2000 0,0,0,0,0);
2001 }
2002 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
2003
2004 next:
2005 m = packetlist;
2006 if (m) {
2007 packetlist = packetlist->m_nextpkt;
2008 m->m_nextpkt = NULL;
2009 }
2010 } while (m);
2011
2012 if (send_head) {
2013 if_inc_traffic_class_out(ifp, send_head);
2014
2015 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
2016 retval = ifp->if_output(ifp, send_head);
2017 if (retval && dlil_verbose) {
2018 printf("%s: output error on %s%d retval = %d\n",
2019 __func__, ifp->if_name, ifp->if_unit, retval);
2020 }
2021 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
2022 }
2023
2024 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
2025
2026 cleanup:
2027 if (proto != NULL)
2028 if_proto_free(proto);
2029 if (packetlist) /* if any packets are left, clean up */
2030 mbuf_freem_list(packetlist);
2031 if (retval == EJUSTRETURN)
2032 retval = 0;
2033 if (iorefcnt == 1)
2034 ifnet_decr_iorefcnt(ifp);
2035
2036 return (retval);
2037 }
2038
2039 errno_t
2040 ifnet_ioctl(ifnet_t ifp, protocol_family_t proto_fam, u_long ioctl_code,
2041 void *ioctl_arg)
2042 {
2043 struct ifnet_filter *filter;
2044 int retval = EOPNOTSUPP;
2045 int result = 0;
2046
2047 if (ifp == NULL || ioctl_code == 0)
2048 return (EINVAL);
2049
2050 /* Get an io ref count if the interface is attached */
2051 if (!ifnet_is_attached(ifp, 1))
2052 return (EOPNOTSUPP);
2053
2054 /* Run the interface filters first.
2055 * We want to run all filters before calling the protocol,
2056 * interface family, or interface.
2057 */
2058 lck_mtx_lock_spin(&ifp->if_flt_lock);
2059 /* prevent filter list from changing in case we drop the lock */
2060 if_flt_monitor_busy(ifp);
2061 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
2062 if (filter->filt_ioctl != NULL && (filter->filt_protocol == 0 ||
2063 filter->filt_protocol == proto_fam)) {
2064 lck_mtx_unlock(&ifp->if_flt_lock);
2065
2066 result = filter->filt_ioctl(filter->filt_cookie, ifp,
2067 proto_fam, ioctl_code, ioctl_arg);
2068
2069 lck_mtx_lock_spin(&ifp->if_flt_lock);
2070
2071 /* Only update retval if no one has handled the ioctl */
2072 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
2073 if (result == ENOTSUP)
2074 result = EOPNOTSUPP;
2075 retval = result;
2076 if (retval != 0 && retval != EOPNOTSUPP) {
2077 /* we're done with the filter list */
2078 if_flt_monitor_unbusy(ifp);
2079 lck_mtx_unlock(&ifp->if_flt_lock);
2080 goto cleanup;
2081 }
2082 }
2083 }
2084 }
2085 /* we're done with the filter list */
2086 if_flt_monitor_unbusy(ifp);
2087 lck_mtx_unlock(&ifp->if_flt_lock);
2088
2089 /* Allow the protocol to handle the ioctl */
2090 if (proto_fam != 0) {
2091 struct if_proto *proto;
2092
2093 /* callee holds a proto refcnt upon success */
2094 ifnet_lock_shared(ifp);
2095 proto = find_attached_proto(ifp, proto_fam);
2096 ifnet_lock_done(ifp);
2097 if (proto != NULL) {
2098 proto_media_ioctl ioctlp =
2099 (proto->proto_kpi == kProtoKPI_v1 ?
2100 proto->kpi.v1.ioctl : proto->kpi.v2.ioctl);
2101 result = EOPNOTSUPP;
2102 if (ioctlp != NULL)
2103 result = ioctlp(ifp, proto_fam, ioctl_code,
2104 ioctl_arg);
2105 if_proto_free(proto);
2106
2107 /* Only update retval if no one has handled the ioctl */
2108 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
2109 if (result == ENOTSUP)
2110 result = EOPNOTSUPP;
2111 retval = result;
2112 if (retval && retval != EOPNOTSUPP)
2113 goto cleanup;
2114 }
2115 }
2116 }
2117
2118 /* retval is either 0 or EOPNOTSUPP */
2119
2120 /*
2121 * Let the interface handle this ioctl.
2122 * If it returns EOPNOTSUPP, ignore that, we may have
2123 * already handled this in the protocol or family.
2124 */
2125 if (ifp->if_ioctl)
2126 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
2127
2128 /* Only update retval if no one has handled the ioctl */
2129 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
2130 if (result == ENOTSUP)
2131 result = EOPNOTSUPP;
2132 retval = result;
2133 if (retval && retval != EOPNOTSUPP) {
2134 goto cleanup;
2135 }
2136 }
2137
2138 cleanup:
2139 if (retval == EJUSTRETURN)
2140 retval = 0;
2141
2142 ifnet_decr_iorefcnt(ifp);
2143
2144 return (retval);
2145 }
2146
2147 __private_extern__ errno_t
2148 dlil_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func callback)
2149 {
2150 errno_t error = 0;
2151
2152
2153 if (ifp->if_set_bpf_tap) {
2154 /* Get an io reference on the interface if it is attached */
2155 if (!ifnet_is_attached(ifp, 1))
2156 return ENXIO;
2157 error = ifp->if_set_bpf_tap(ifp, mode, callback);
2158 ifnet_decr_iorefcnt(ifp);
2159 }
2160 return (error);
2161 }
2162
2163 errno_t
2164 dlil_resolve_multi(struct ifnet *ifp, const struct sockaddr *proto_addr,
2165 struct sockaddr *ll_addr, size_t ll_len)
2166 {
2167 errno_t result = EOPNOTSUPP;
2168 struct if_proto *proto;
2169 const struct sockaddr *verify;
2170 proto_media_resolve_multi resolvep;
2171
2172 if (!ifnet_is_attached(ifp, 1))
2173 return result;
2174
2175 bzero(ll_addr, ll_len);
2176
2177 /* Call the protocol first; callee holds a proto refcnt upon success */
2178 ifnet_lock_shared(ifp);
2179 proto = find_attached_proto(ifp, proto_addr->sa_family);
2180 ifnet_lock_done(ifp);
2181 if (proto != NULL) {
2182 resolvep = (proto->proto_kpi == kProtoKPI_v1 ?
2183 proto->kpi.v1.resolve_multi : proto->kpi.v2.resolve_multi);
2184 if (resolvep != NULL)
2185 result = resolvep(ifp, proto_addr,
2186 (struct sockaddr_dl*)ll_addr, ll_len);
2187 if_proto_free(proto);
2188 }
2189
2190 /* Let the interface verify the multicast address */
2191 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
2192 if (result == 0)
2193 verify = ll_addr;
2194 else
2195 verify = proto_addr;
2196 result = ifp->if_check_multi(ifp, verify);
2197 }
2198
2199 ifnet_decr_iorefcnt(ifp);
2200 return (result);
2201 }
2202
2203 __private_extern__ errno_t
2204 dlil_send_arp_internal(ifnet_t ifp, u_short arpop,
2205 const struct sockaddr_dl* sender_hw, const struct sockaddr* sender_proto,
2206 const struct sockaddr_dl* target_hw, const struct sockaddr* target_proto)
2207 {
2208 struct if_proto *proto;
2209 errno_t result = 0;
2210
2211 /* callee holds a proto refcnt upon success */
2212 ifnet_lock_shared(ifp);
2213 proto = find_attached_proto(ifp, target_proto->sa_family);
2214 ifnet_lock_done(ifp);
2215 if (proto == NULL) {
2216 result = ENOTSUP;
2217 } else {
2218 proto_media_send_arp arpp;
2219 arpp = (proto->proto_kpi == kProtoKPI_v1 ?
2220 proto->kpi.v1.send_arp : proto->kpi.v2.send_arp);
2221 if (arpp == NULL)
2222 result = ENOTSUP;
2223 else
2224 result = arpp(ifp, arpop, sender_hw, sender_proto,
2225 target_hw, target_proto);
2226 if_proto_free(proto);
2227 }
2228
2229 return (result);
2230 }
2231
2232 static __inline__ int
2233 _is_announcement(const struct sockaddr_in * sender_sin,
2234 const struct sockaddr_in * target_sin)
2235 {
2236 if (sender_sin == NULL) {
2237 return (FALSE);
2238 }
2239 return (sender_sin->sin_addr.s_addr == target_sin->sin_addr.s_addr);
2240 }
2241
2242 __private_extern__ errno_t
2243 dlil_send_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl* sender_hw,
2244 const struct sockaddr* sender_proto, const struct sockaddr_dl* target_hw,
2245 const struct sockaddr* target_proto)
2246 {
2247 errno_t result = 0;
2248 const struct sockaddr_in * sender_sin;
2249 const struct sockaddr_in * target_sin;
2250
2251 if (target_proto == NULL || (sender_proto != NULL &&
2252 sender_proto->sa_family != target_proto->sa_family))
2253 return (EINVAL);
2254
2255 /*
2256 * If this is an ARP request and the target IP is IPv4LL,
2257 * send the request on all interfaces. The exception is
2258 * an announcement, which must only appear on the specific
2259 * interface.
2260 */
2261 sender_sin = (const struct sockaddr_in *)sender_proto;
2262 target_sin = (const struct sockaddr_in *)target_proto;
2263 if (target_proto->sa_family == AF_INET &&
2264 IN_LINKLOCAL(ntohl(target_sin->sin_addr.s_addr)) &&
2265 ipv4_ll_arp_aware != 0 && arpop == ARPOP_REQUEST &&
2266 !_is_announcement(target_sin, sender_sin)) {
2267 ifnet_t *ifp_list;
2268 u_int32_t count;
2269 u_int32_t ifp_on;
2270
2271 result = ENOTSUP;
2272
2273 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
2274 for (ifp_on = 0; ifp_on < count; ifp_on++) {
2275 errno_t new_result;
2276 ifaddr_t source_hw = NULL;
2277 ifaddr_t source_ip = NULL;
2278 struct sockaddr_in source_ip_copy;
2279 struct ifnet *cur_ifp = ifp_list[ifp_on];
2280
2281 /*
2282 * Only arp on interfaces marked for IPv4LL
2283 * ARPing. This may mean that we don't ARP on
2284 * the interface the subnet route points to.
2285 */
2286 if (!(cur_ifp->if_eflags & IFEF_ARPLL))
2287 continue;
2288
2289 /* Find the source IP address */
2290 ifnet_lock_shared(cur_ifp);
2291 source_hw = cur_ifp->if_lladdr;
2292 TAILQ_FOREACH(source_ip, &cur_ifp->if_addrhead,
2293 ifa_link) {
2294 IFA_LOCK(source_ip);
2295 if (source_ip->ifa_addr != NULL &&
2296 source_ip->ifa_addr->sa_family ==
2297 AF_INET) {
2298 /* Copy the source IP address */
2299 source_ip_copy =
2300 *(struct sockaddr_in *)
2301 source_ip->ifa_addr;
2302 IFA_UNLOCK(source_ip);
2303 break;
2304 }
2305 IFA_UNLOCK(source_ip);
2306 }
2307
2308 /* No IP Source, don't arp */
2309 if (source_ip == NULL) {
2310 ifnet_lock_done(cur_ifp);
2311 continue;
2312 }
2313
2314 IFA_ADDREF(source_hw);
2315 ifnet_lock_done(cur_ifp);
2316
2317 /* Send the ARP */
2318 new_result = dlil_send_arp_internal(cur_ifp,
2319 arpop,
2320 (struct sockaddr_dl *)source_hw->ifa_addr,
2321 (struct sockaddr *)&source_ip_copy, NULL,
2322 target_proto);
2323
2324 IFA_REMREF(source_hw);
2325 if (result == ENOTSUP) {
2326 result = new_result;
2327 }
2328 }
2329 ifnet_list_free(ifp_list);
2330 }
2331 } else {
2332 result = dlil_send_arp_internal(ifp, arpop, sender_hw,
2333 sender_proto, target_hw, target_proto);
2334 }
2335
2336 return (result);
2337 }
2338
2339 /*
2340 * Caller must hold ifnet head lock.
2341 */
2342 static int
2343 ifnet_lookup(struct ifnet *ifp)
2344 {
2345 struct ifnet *_ifp;
2346
2347 lck_rw_assert(&ifnet_head_lock, LCK_RW_ASSERT_HELD);
2348 TAILQ_FOREACH(_ifp, &ifnet_head, if_link) {
2349 if (_ifp == ifp)
2350 break;
2351 }
2352 return (_ifp != NULL);
2353 }
2354 /*
2355 * Caller has to pass a non-zero refio argument to get a
2356 * IO reference count. This will prevent ifnet_detach from
2357 * being called when there are outstanding io reference counts.
2358 */
2359 int
2360 ifnet_is_attached(struct ifnet *ifp, int refio)
2361 {
2362 int ret;
2363
2364 lck_mtx_lock_spin(&ifp->if_ref_lock);
2365 if ((ret = ((ifp->if_refflags & (IFRF_ATTACHED | IFRF_DETACHING)) ==
2366 IFRF_ATTACHED))) {
2367 if (refio > 0)
2368 ifp->if_refio++;
2369 }
2370 lck_mtx_unlock(&ifp->if_ref_lock);
2371
2372 return (ret);
2373 }
2374
2375 void
2376 ifnet_decr_iorefcnt(struct ifnet *ifp)
2377 {
2378 lck_mtx_lock_spin(&ifp->if_ref_lock);
2379 VERIFY(ifp->if_refio > 0);
2380 VERIFY((ifp->if_refflags & (IFRF_ATTACHED | IFRF_DETACHING)) != 0);
2381 ifp->if_refio--;
2382
2383 /* if there are no more outstanding io references, wakeup the
2384 * ifnet_detach thread if detaching flag is set.
2385 */
2386 if (ifp->if_refio == 0 &&
2387 (ifp->if_refflags & IFRF_DETACHING) != 0) {
2388 /* Convert the spinlock to a regular mutex if we have
2389 * to wait for any reason while doing a wakeup.
2390 */
2391 lck_mtx_convert_spin(&ifp->if_ref_lock);
2392 wakeup(&(ifp->if_refio));
2393 }
2394 lck_mtx_unlock(&ifp->if_ref_lock);
2395 }
2396
2397 static void
2398 dlil_if_trace(struct dlil_ifnet *dl_if, int refhold)
2399 {
2400 struct dlil_ifnet_dbg *dl_if_dbg = (struct dlil_ifnet_dbg *)dl_if;
2401 ctrace_t *tr;
2402 u_int32_t idx;
2403 u_int16_t *cnt;
2404
2405 if (!(dl_if->dl_if_flags & DLIF_DEBUG)) {
2406 panic("%s: dl_if %p has no debug structure", __func__, dl_if);
2407 /* NOTREACHED */
2408 }
2409
2410 if (refhold) {
2411 cnt = &dl_if_dbg->dldbg_if_refhold_cnt;
2412 tr = dl_if_dbg->dldbg_if_refhold;
2413 } else {
2414 cnt = &dl_if_dbg->dldbg_if_refrele_cnt;
2415 tr = dl_if_dbg->dldbg_if_refrele;
2416 }
2417
2418 idx = atomic_add_16_ov(cnt, 1) % IF_REF_TRACE_HIST_SIZE;
2419 ctrace_record(&tr[idx]);
2420 }
2421
2422 errno_t
2423 dlil_if_ref(struct ifnet *ifp)
2424 {
2425 struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
2426
2427 if (dl_if == NULL)
2428 return (EINVAL);
2429
2430 lck_mtx_lock_spin(&dl_if->dl_if_lock);
2431 ++dl_if->dl_if_refcnt;
2432 if (dl_if->dl_if_refcnt == 0) {
2433 panic("%s: wraparound refcnt for ifp=%p", __func__, ifp);
2434 /* NOTREACHED */
2435 }
2436 if (dl_if->dl_if_trace != NULL)
2437 (*dl_if->dl_if_trace)(dl_if, TRUE);
2438 lck_mtx_unlock(&dl_if->dl_if_lock);
2439
2440 return (0);
2441 }
2442
2443 errno_t
2444 dlil_if_free(struct ifnet *ifp)
2445 {
2446 struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
2447
2448 if (dl_if == NULL)
2449 return (EINVAL);
2450
2451 lck_mtx_lock_spin(&dl_if->dl_if_lock);
2452 if (dl_if->dl_if_refcnt == 0) {
2453 panic("%s: negative refcnt for ifp=%p", __func__, ifp);
2454 /* NOTREACHED */
2455 }
2456 --dl_if->dl_if_refcnt;
2457 if (dl_if->dl_if_trace != NULL)
2458 (*dl_if->dl_if_trace)(dl_if, FALSE);
2459 lck_mtx_unlock(&dl_if->dl_if_lock);
2460
2461 return (0);
2462 }
2463
2464 static errno_t
2465 dlil_attach_protocol_internal(struct if_proto *proto,
2466 const struct ifnet_demux_desc *demux_list, u_int32_t demux_count)
2467 {
2468 struct kev_dl_proto_data ev_pr_data;
2469 struct ifnet *ifp = proto->ifp;
2470 int retval = 0;
2471 u_int32_t hash_value = proto_hash_value(proto->protocol_family);
2472 struct if_proto *prev_proto;
2473 struct if_proto *_proto;
2474
2475 /* callee holds a proto refcnt upon success */
2476 ifnet_lock_exclusive(ifp);
2477 _proto = find_attached_proto(ifp, proto->protocol_family);
2478 if (_proto != NULL) {
2479 ifnet_lock_done(ifp);
2480 if_proto_free(_proto);
2481 return (EEXIST);
2482 }
2483
2484 /*
2485 * Call family module add_proto routine so it can refine the
2486 * demux descriptors as it wishes.
2487 */
2488 retval = ifp->if_add_proto(ifp, proto->protocol_family, demux_list,
2489 demux_count);
2490 if (retval) {
2491 ifnet_lock_done(ifp);
2492 return (retval);
2493 }
2494
2495 /*
2496 * Insert the protocol in the hash
2497 */
2498 prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
2499 while (prev_proto != NULL && SLIST_NEXT(prev_proto, next_hash) != NULL)
2500 prev_proto = SLIST_NEXT(prev_proto, next_hash);
2501 if (prev_proto)
2502 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
2503 else
2504 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value],
2505 proto, next_hash);
2506
2507 /* hold a proto refcnt for attach */
2508 if_proto_ref(proto);
2509
2510 /*
2511 * The reserved field carries the number of protocol still attached
2512 * (subject to change)
2513 */
2514 ev_pr_data.proto_family = proto->protocol_family;
2515 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
2516 ifnet_lock_done(ifp);
2517
2518 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
2519 (struct net_event_data *)&ev_pr_data,
2520 sizeof (struct kev_dl_proto_data));
2521 return (retval);
2522 }
2523
2524 errno_t
2525 ifnet_attach_protocol(ifnet_t ifp, protocol_family_t protocol,
2526 const struct ifnet_attach_proto_param *proto_details)
2527 {
2528 int retval = 0;
2529 struct if_proto *ifproto = NULL;
2530
2531 ifnet_head_lock_shared();
2532 if (ifp == NULL || protocol == 0 || proto_details == NULL) {
2533 retval = EINVAL;
2534 goto end;
2535 }
2536 /* Check that the interface is in the global list */
2537 if (!ifnet_lookup(ifp)) {
2538 retval = ENXIO;
2539 goto end;
2540 }
2541
2542 ifproto = zalloc(dlif_proto_zone);
2543 if (ifproto == NULL) {
2544 retval = ENOMEM;
2545 goto end;
2546 }
2547 bzero(ifproto, dlif_proto_size);
2548
2549 /* refcnt held above during lookup */
2550 ifproto->ifp = ifp;
2551 ifproto->protocol_family = protocol;
2552 ifproto->proto_kpi = kProtoKPI_v1;
2553 ifproto->kpi.v1.input = proto_details->input;
2554 ifproto->kpi.v1.pre_output = proto_details->pre_output;
2555 ifproto->kpi.v1.event = proto_details->event;
2556 ifproto->kpi.v1.ioctl = proto_details->ioctl;
2557 ifproto->kpi.v1.detached = proto_details->detached;
2558 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
2559 ifproto->kpi.v1.send_arp = proto_details->send_arp;
2560
2561 retval = dlil_attach_protocol_internal(ifproto,
2562 proto_details->demux_list, proto_details->demux_count);
2563
2564 if (dlil_verbose) {
2565 printf("%s%d: attached v1 protocol %d\n", ifp->if_name,
2566 ifp->if_unit, protocol);
2567 }
2568
2569 end:
2570 if (retval != 0 && retval != EEXIST && ifp != NULL) {
2571 DLIL_PRINTF("%s%d: failed to attach v1 protocol %d (err=%d)\n",
2572 ifp->if_name, ifp->if_unit, protocol, retval);
2573 }
2574 ifnet_head_done();
2575 if (retval != 0 && ifproto != NULL)
2576 zfree(dlif_proto_zone, ifproto);
2577 return (retval);
2578 }
2579
2580 errno_t
2581 ifnet_attach_protocol_v2(ifnet_t ifp, protocol_family_t protocol,
2582 const struct ifnet_attach_proto_param_v2 *proto_details)
2583 {
2584 int retval = 0;
2585 struct if_proto *ifproto = NULL;
2586
2587 ifnet_head_lock_shared();
2588 if (ifp == NULL || protocol == 0 || proto_details == NULL) {
2589 retval = EINVAL;
2590 goto end;
2591 }
2592 /* Check that the interface is in the global list */
2593 if (!ifnet_lookup(ifp)) {
2594 retval = ENXIO;
2595 goto end;
2596 }
2597
2598 ifproto = zalloc(dlif_proto_zone);
2599 if (ifproto == NULL) {
2600 retval = ENOMEM;
2601 goto end;
2602 }
2603 bzero(ifproto, sizeof(*ifproto));
2604
2605 /* refcnt held above during lookup */
2606 ifproto->ifp = ifp;
2607 ifproto->protocol_family = protocol;
2608 ifproto->proto_kpi = kProtoKPI_v2;
2609 ifproto->kpi.v2.input = proto_details->input;
2610 ifproto->kpi.v2.pre_output = proto_details->pre_output;
2611 ifproto->kpi.v2.event = proto_details->event;
2612 ifproto->kpi.v2.ioctl = proto_details->ioctl;
2613 ifproto->kpi.v2.detached = proto_details->detached;
2614 ifproto->kpi.v2.resolve_multi = proto_details->resolve;
2615 ifproto->kpi.v2.send_arp = proto_details->send_arp;
2616
2617 retval = dlil_attach_protocol_internal(ifproto,
2618 proto_details->demux_list, proto_details->demux_count);
2619
2620 if (dlil_verbose) {
2621 printf("%s%d: attached v2 protocol %d\n", ifp->if_name,
2622 ifp->if_unit, protocol);
2623 }
2624
2625 end:
2626 if (retval != 0 && retval != EEXIST && ifp != NULL) {
2627 DLIL_PRINTF("%s%d: failed to attach v2 protocol %d (err=%d)\n",
2628 ifp->if_name, ifp->if_unit, protocol, retval);
2629 }
2630 ifnet_head_done();
2631 if (retval != 0 && ifproto != NULL)
2632 zfree(dlif_proto_zone, ifproto);
2633 return (retval);
2634 }
2635
2636 errno_t
2637 ifnet_detach_protocol(ifnet_t ifp, protocol_family_t proto_family)
2638 {
2639 struct if_proto *proto = NULL;
2640 int retval = 0;
2641
2642 if (ifp == NULL || proto_family == 0) {
2643 retval = EINVAL;
2644 goto end;
2645 }
2646
2647 ifnet_lock_exclusive(ifp);
2648 /* callee holds a proto refcnt upon success */
2649 proto = find_attached_proto(ifp, proto_family);
2650 if (proto == NULL) {
2651 retval = ENXIO;
2652 ifnet_lock_done(ifp);
2653 goto end;
2654 }
2655
2656 /* call family module del_proto */
2657 if (ifp->if_del_proto)
2658 ifp->if_del_proto(ifp, proto->protocol_family);
2659
2660 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)],
2661 proto, if_proto, next_hash);
2662
2663 if (proto->proto_kpi == kProtoKPI_v1) {
2664 proto->kpi.v1.input = ifproto_media_input_v1;
2665 proto->kpi.v1.pre_output= ifproto_media_preout;
2666 proto->kpi.v1.event = ifproto_media_event;
2667 proto->kpi.v1.ioctl = ifproto_media_ioctl;
2668 proto->kpi.v1.resolve_multi = ifproto_media_resolve_multi;
2669 proto->kpi.v1.send_arp = ifproto_media_send_arp;
2670 } else {
2671 proto->kpi.v2.input = ifproto_media_input_v2;
2672 proto->kpi.v2.pre_output = ifproto_media_preout;
2673 proto->kpi.v2.event = ifproto_media_event;
2674 proto->kpi.v2.ioctl = ifproto_media_ioctl;
2675 proto->kpi.v2.resolve_multi = ifproto_media_resolve_multi;
2676 proto->kpi.v2.send_arp = ifproto_media_send_arp;
2677 }
2678 proto->detached = 1;
2679 ifnet_lock_done(ifp);
2680
2681 if (dlil_verbose) {
2682 printf("%s%d: detached %s protocol %d\n", ifp->if_name,
2683 ifp->if_unit, (proto->proto_kpi == kProtoKPI_v1) ?
2684 "v1" : "v2", proto_family);
2685 }
2686
2687 /* release proto refcnt held during protocol attach */
2688 if_proto_free(proto);
2689
2690 /*
2691 * Release proto refcnt held during lookup; the rest of
2692 * protocol detach steps will happen when the last proto
2693 * reference is released.
2694 */
2695 if_proto_free(proto);
2696
2697 end:
2698 return (retval);
2699 }
2700
2701
2702 static errno_t
2703 ifproto_media_input_v1(struct ifnet *ifp, protocol_family_t protocol,
2704 struct mbuf *packet, char *header)
2705 {
2706 #pragma unused(ifp, protocol, packet, header)
2707 return (ENXIO);
2708 }
2709
2710 static errno_t
2711 ifproto_media_input_v2(struct ifnet *ifp, protocol_family_t protocol,
2712 struct mbuf *packet)
2713 {
2714 #pragma unused(ifp, protocol, packet)
2715 return (ENXIO);
2716
2717 }
2718
2719 static errno_t
2720 ifproto_media_preout(struct ifnet *ifp, protocol_family_t protocol,
2721 mbuf_t *packet, const struct sockaddr *dest, void *route, char *frame_type,
2722 char *link_layer_dest)
2723 {
2724 #pragma unused(ifp, protocol, packet, dest, route, frame_type, link_layer_dest)
2725 return (ENXIO);
2726
2727 }
2728
2729 static void
2730 ifproto_media_event(struct ifnet *ifp, protocol_family_t protocol,
2731 const struct kev_msg *event)
2732 {
2733 #pragma unused(ifp, protocol, event)
2734 }
2735
2736 static errno_t
2737 ifproto_media_ioctl(struct ifnet *ifp, protocol_family_t protocol,
2738 unsigned long command, void *argument)
2739 {
2740 #pragma unused(ifp, protocol, command, argument)
2741 return (ENXIO);
2742 }
2743
2744 static errno_t
2745 ifproto_media_resolve_multi(ifnet_t ifp, const struct sockaddr *proto_addr,
2746 struct sockaddr_dl *out_ll, size_t ll_len)
2747 {
2748 #pragma unused(ifp, proto_addr, out_ll, ll_len)
2749 return (ENXIO);
2750 }
2751
2752 static errno_t
2753 ifproto_media_send_arp(struct ifnet *ifp, u_short arpop,
2754 const struct sockaddr_dl *sender_hw, const struct sockaddr *sender_proto,
2755 const struct sockaddr_dl *target_hw, const struct sockaddr *target_proto)
2756 {
2757 #pragma unused(ifp, arpop, sender_hw, sender_proto, target_hw, target_proto)
2758 return (ENXIO);
2759 }
2760
2761 extern int if_next_index(void);
2762
2763 errno_t
2764 ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr)
2765 {
2766 struct ifnet *tmp_if;
2767 struct ifaddr *ifa;
2768 struct if_data_internal if_data_saved;
2769 struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
2770
2771 if (ifp == NULL)
2772 return (EINVAL);
2773
2774 ifnet_head_lock_exclusive();
2775 /* Verify we aren't already on the list */
2776 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2777 if (tmp_if == ifp) {
2778 ifnet_head_done();
2779 return (EEXIST);
2780 }
2781 }
2782
2783 lck_mtx_lock_spin(&ifp->if_ref_lock);
2784 if (ifp->if_refflags & IFRF_ATTACHED) {
2785 panic("%s: flags mismatch (attached set) ifp=%p",
2786 __func__, ifp);
2787 /* NOTREACHED */
2788 }
2789 lck_mtx_unlock(&ifp->if_ref_lock);
2790
2791 ifnet_lock_exclusive(ifp);
2792
2793 /* Sanity check */
2794 VERIFY(ifp->if_detaching_link.tqe_next == NULL);
2795 VERIFY(ifp->if_detaching_link.tqe_prev == NULL);
2796
2797 if (ll_addr != NULL) {
2798 if (ifp->if_addrlen == 0) {
2799 ifp->if_addrlen = ll_addr->sdl_alen;
2800 } else if (ll_addr->sdl_alen != ifp->if_addrlen) {
2801 ifnet_lock_done(ifp);
2802 ifnet_head_done();
2803 return (EINVAL);
2804 }
2805 }
2806
2807 /*
2808 * Allow interfaces without protocol families to attach
2809 * only if they have the necessary fields filled out.
2810 */
2811 if (ifp->if_add_proto == NULL || ifp->if_del_proto == NULL) {
2812 DLIL_PRINTF("%s: Attempt to attach interface without "
2813 "family module - %d\n", __func__, ifp->if_family);
2814 ifnet_lock_done(ifp);
2815 ifnet_head_done();
2816 return (ENODEV);
2817 }
2818
2819 /* Allocate protocol hash table */
2820 VERIFY(ifp->if_proto_hash == NULL);
2821 ifp->if_proto_hash = zalloc(dlif_phash_zone);
2822 if (ifp->if_proto_hash == NULL) {
2823 ifnet_lock_done(ifp);
2824 ifnet_head_done();
2825 return (ENOBUFS);
2826 }
2827 bzero(ifp->if_proto_hash, dlif_phash_size);
2828
2829 lck_mtx_lock_spin(&ifp->if_flt_lock);
2830 VERIFY(TAILQ_EMPTY(&ifp->if_flt_head));
2831 TAILQ_INIT(&ifp->if_flt_head);
2832 VERIFY(ifp->if_flt_busy == 0);
2833 VERIFY(ifp->if_flt_waiters == 0);
2834 lck_mtx_unlock(&ifp->if_flt_lock);
2835
2836 VERIFY(TAILQ_EMPTY(&ifp->if_prefixhead));
2837 TAILQ_INIT(&ifp->if_prefixhead);
2838
2839 if (!(dl_if->dl_if_flags & DLIF_REUSE)) {
2840 VERIFY(LIST_EMPTY(&ifp->if_multiaddrs));
2841 LIST_INIT(&ifp->if_multiaddrs);
2842 }
2843
2844 VERIFY(ifp->if_allhostsinm == NULL);
2845 VERIFY(TAILQ_EMPTY(&ifp->if_addrhead));
2846 TAILQ_INIT(&ifp->if_addrhead);
2847
2848 if (ifp->if_snd.ifq_maxlen == 0)
2849 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2850
2851 if (ifp->if_index == 0) {
2852 int idx = if_next_index();
2853
2854 if (idx == -1) {
2855 ifp->if_index = 0;
2856 ifnet_lock_done(ifp);
2857 ifnet_head_done();
2858 return (ENOBUFS);
2859 }
2860 ifp->if_index = idx;
2861 }
2862 /* There should not be anything occupying this slot */
2863 VERIFY(ifindex2ifnet[ifp->if_index] == NULL);
2864
2865 /* allocate (if needed) and initialize a link address */
2866 VERIFY(!(dl_if->dl_if_flags & DLIF_REUSE) || ifp->if_lladdr != NULL);
2867 ifa = dlil_alloc_lladdr(ifp, ll_addr);
2868 if (ifa == NULL) {
2869 ifnet_lock_done(ifp);
2870 ifnet_head_done();
2871 return (ENOBUFS);
2872 }
2873
2874 VERIFY(ifnet_addrs[ifp->if_index - 1] == NULL);
2875 ifnet_addrs[ifp->if_index - 1] = ifa;
2876
2877 /* make this address the first on the list */
2878 IFA_LOCK(ifa);
2879 /* hold a reference for ifnet_addrs[] */
2880 IFA_ADDREF_LOCKED(ifa);
2881 /* if_attach_link_ifa() holds a reference for ifa_link */
2882 if_attach_link_ifa(ifp, ifa);
2883 IFA_UNLOCK(ifa);
2884
2885 #if CONFIG_MACF_NET
2886 mac_ifnet_label_associate(ifp);
2887 #endif
2888
2889 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2890 ifindex2ifnet[ifp->if_index] = ifp;
2891
2892 /* Hold a reference to the underlying dlil_ifnet */
2893 ifnet_reference(ifp);
2894
2895 /*
2896 * A specific dlil input thread is created per Ethernet/cellular
2897 * interface. pseudo interfaces or other types of interfaces use
2898 * the main ("loopback") thread.
2899 *
2900 * If the sysctl "net.link.generic.system.multi_threaded_input" is set
2901 * to zero, all packets will be handled by the main loopback thread,
2902 * reverting to 10.4.x behaviour.
2903 */
2904 if (dlil_multithreaded_input &&
2905 (ifp->if_type == IFT_ETHER || ifp->if_type == IFT_CELLULAR)) {
2906 int err;
2907
2908 ifp->if_input_thread = zalloc(dlif_inp_zone);
2909 if (ifp->if_input_thread == NULL) {
2910 panic("%s: ifp=%p couldn't alloc threading",
2911 __func__, ifp);
2912 /* NOTREACHED */
2913 }
2914 bzero(ifp->if_input_thread, dlif_inp_size);
2915 err = dlil_create_input_thread(ifp, ifp->if_input_thread);
2916 if (err != 0) {
2917 panic("%s: ifp=%p couldn't get a thread. "
2918 "err=%d", __func__, ifp, err);
2919 /* NOTREACHED */
2920 }
2921 #ifdef DLIL_DEBUG
2922 printf("%s: dlil thread for ifp=%p if_index=%d\n",
2923 __func__, ifp, ifp->if_index);
2924 #endif
2925 }
2926
2927 /* Clear stats (save and restore other fields that we care) */
2928 if_data_saved = ifp->if_data;
2929 bzero(&ifp->if_data, sizeof (ifp->if_data));
2930 ifp->if_data.ifi_type = if_data_saved.ifi_type;
2931 ifp->if_data.ifi_typelen = if_data_saved.ifi_typelen;
2932 ifp->if_data.ifi_physical = if_data_saved.ifi_physical;
2933 ifp->if_data.ifi_addrlen = if_data_saved.ifi_addrlen;
2934 ifp->if_data.ifi_hdrlen = if_data_saved.ifi_hdrlen;
2935 ifp->if_data.ifi_mtu = if_data_saved.ifi_mtu;
2936 ifp->if_data.ifi_baudrate = if_data_saved.ifi_baudrate;
2937 ifp->if_data.ifi_hwassist = if_data_saved.ifi_hwassist;
2938 ifp->if_data.ifi_tso_v4_mtu = if_data_saved.ifi_tso_v4_mtu;
2939 ifp->if_data.ifi_tso_v6_mtu = if_data_saved.ifi_tso_v6_mtu;
2940 ifnet_touch_lastchange(ifp);
2941
2942 /* Record attach PC stacktrace */
2943 ctrace_record(&((struct dlil_ifnet *)ifp)->dl_if_attach);
2944
2945 ifp->if_updatemcasts = 0;
2946 if (!LIST_EMPTY(&ifp->if_multiaddrs)) {
2947 struct ifmultiaddr *ifma;
2948 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2949 IFMA_LOCK(ifma);
2950 if (ifma->ifma_addr->sa_family == AF_LINK ||
2951 ifma->ifma_addr->sa_family == AF_UNSPEC)
2952 ifp->if_updatemcasts++;
2953 IFMA_UNLOCK(ifma);
2954 }
2955
2956 printf("%s%d: attached with %d suspended link-layer multicast "
2957 "membership(s)\n", ifp->if_name, ifp->if_unit,
2958 ifp->if_updatemcasts);
2959 }
2960
2961 ifnet_lock_done(ifp);
2962 ifnet_head_done();
2963
2964 lck_mtx_lock(&ifp->if_cached_route_lock);
2965 /* Enable forwarding cached route */
2966 ifp->if_fwd_cacheok = 1;
2967 /* Clean up any existing cached routes */
2968 if (ifp->if_fwd_route.ro_rt != NULL)
2969 rtfree(ifp->if_fwd_route.ro_rt);
2970 bzero(&ifp->if_fwd_route, sizeof (ifp->if_fwd_route));
2971 if (ifp->if_src_route.ro_rt != NULL)
2972 rtfree(ifp->if_src_route.ro_rt);
2973 bzero(&ifp->if_src_route, sizeof (ifp->if_src_route));
2974 if (ifp->if_src_route6.ro_rt != NULL)
2975 rtfree(ifp->if_src_route6.ro_rt);
2976 bzero(&ifp->if_src_route6, sizeof (ifp->if_src_route6));
2977 lck_mtx_unlock(&ifp->if_cached_route_lock);
2978
2979 ifnet_llreach_ifattach(ifp, (dl_if->dl_if_flags & DLIF_REUSE));
2980
2981 /*
2982 * Allocate and attach IGMPv3/MLDv2 interface specific variables
2983 * and trees; do this before the ifnet is marked as attached.
2984 * The ifnet keeps the reference to the info structures even after
2985 * the ifnet is detached, since the network-layer records still
2986 * refer to the info structures even after that. This also
2987 * makes it possible for them to still function after the ifnet
2988 * is recycled or reattached.
2989 */
2990 #if INET
2991 if (IGMP_IFINFO(ifp) == NULL) {
2992 IGMP_IFINFO(ifp) = igmp_domifattach(ifp, M_WAITOK);
2993 VERIFY(IGMP_IFINFO(ifp) != NULL);
2994 } else {
2995 VERIFY(IGMP_IFINFO(ifp)->igi_ifp == ifp);
2996 igmp_domifreattach(IGMP_IFINFO(ifp));
2997 }
2998 #endif /* INET */
2999 #if INET6
3000 if (MLD_IFINFO(ifp) == NULL) {
3001 MLD_IFINFO(ifp) = mld_domifattach(ifp, M_WAITOK);
3002 VERIFY(MLD_IFINFO(ifp) != NULL);
3003 } else {
3004 VERIFY(MLD_IFINFO(ifp)->mli_ifp == ifp);
3005 mld_domifreattach(MLD_IFINFO(ifp));
3006 }
3007 #endif /* INET6 */
3008
3009 /*
3010 * Finally, mark this ifnet as attached.
3011 */
3012 lck_mtx_lock(rnh_lock);
3013 ifnet_lock_exclusive(ifp);
3014 lck_mtx_lock_spin(&ifp->if_ref_lock);
3015 ifp->if_refflags = IFRF_ATTACHED;
3016 lck_mtx_unlock(&ifp->if_ref_lock);
3017 if (net_rtref) {
3018 /* boot-args override; enable idle notification */
3019 (void) ifnet_set_idle_flags_locked(ifp, IFRF_IDLE_NOTIFY,
3020 IFRF_IDLE_NOTIFY);
3021 } else {
3022 /* apply previous request(s) to set the idle flags, if any */
3023 (void) ifnet_set_idle_flags_locked(ifp, ifp->if_idle_new_flags,
3024 ifp->if_idle_new_flags_mask);
3025
3026 }
3027 ifnet_lock_done(ifp);
3028 lck_mtx_unlock(rnh_lock);
3029
3030 #if PF
3031 /*
3032 * Attach packet filter to this interface, if enabled.
3033 */
3034 pf_ifnet_hook(ifp, 1);
3035 #endif /* PF */
3036
3037 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, NULL, 0);
3038
3039 if (dlil_verbose) {
3040 printf("%s%d: attached%s\n", ifp->if_name, ifp->if_unit,
3041 (dl_if->dl_if_flags & DLIF_REUSE) ? " (recycled)" : "");
3042 }
3043
3044 return (0);
3045 }
3046
3047 /*
3048 * Prepare the storage for the first/permanent link address, which must
3049 * must have the same lifetime as the ifnet itself. Although the link
3050 * address gets removed from if_addrhead and ifnet_addrs[] at detach time,
3051 * its location in memory must never change as it may still be referred
3052 * to by some parts of the system afterwards (unfortunate implementation
3053 * artifacts inherited from BSD.)
3054 *
3055 * Caller must hold ifnet lock as writer.
3056 */
3057 static struct ifaddr *
3058 dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr)
3059 {
3060 struct ifaddr *ifa, *oifa;
3061 struct sockaddr_dl *asdl, *msdl;
3062 char workbuf[IFNAMSIZ*2];
3063 int namelen, masklen, socksize;
3064 struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
3065
3066 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
3067 VERIFY(ll_addr == NULL || ll_addr->sdl_alen == ifp->if_addrlen);
3068
3069 namelen = snprintf(workbuf, sizeof (workbuf), "%s%d",
3070 ifp->if_name, ifp->if_unit);
3071 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
3072 socksize = masklen + ifp->if_addrlen;
3073 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof (u_int32_t) - 1)))
3074 if ((u_int32_t)socksize < sizeof (struct sockaddr_dl))
3075 socksize = sizeof(struct sockaddr_dl);
3076 socksize = ROUNDUP(socksize);
3077 #undef ROUNDUP
3078
3079 ifa = ifp->if_lladdr;
3080 if (socksize > DLIL_SDLMAXLEN ||
3081 (ifa != NULL && ifa != &dl_if->dl_if_lladdr.ifa)) {
3082 /*
3083 * Rare, but in the event that the link address requires
3084 * more storage space than DLIL_SDLMAXLEN, allocate the
3085 * largest possible storages for address and mask, such
3086 * that we can reuse the same space when if_addrlen grows.
3087 * This same space will be used when if_addrlen shrinks.
3088 */
3089 if (ifa == NULL || ifa == &dl_if->dl_if_lladdr.ifa) {
3090 int ifasize = sizeof (*ifa) + 2 * SOCK_MAXADDRLEN;
3091 ifa = _MALLOC(ifasize, M_IFADDR, M_WAITOK | M_ZERO);
3092 if (ifa == NULL)
3093 return (NULL);
3094 ifa_lock_init(ifa);
3095 /* Don't set IFD_ALLOC, as this is permanent */
3096 ifa->ifa_debug = IFD_LINK;
3097 }
3098 IFA_LOCK(ifa);
3099 /* address and mask sockaddr_dl locations */
3100 asdl = (struct sockaddr_dl *)(ifa + 1);
3101 bzero(asdl, SOCK_MAXADDRLEN);
3102 msdl = (struct sockaddr_dl *)((char *)asdl + SOCK_MAXADDRLEN);
3103 bzero(msdl, SOCK_MAXADDRLEN);
3104 } else {
3105 VERIFY(ifa == NULL || ifa == &dl_if->dl_if_lladdr.ifa);
3106 /*
3107 * Use the storage areas for address and mask within the
3108 * dlil_ifnet structure. This is the most common case.
3109 */
3110 if (ifa == NULL) {
3111 ifa = &dl_if->dl_if_lladdr.ifa;
3112 ifa_lock_init(ifa);
3113 /* Don't set IFD_ALLOC, as this is permanent */
3114 ifa->ifa_debug = IFD_LINK;
3115 }
3116 IFA_LOCK(ifa);
3117 /* address and mask sockaddr_dl locations */
3118 asdl = (struct sockaddr_dl *)&dl_if->dl_if_lladdr.asdl;
3119 bzero(asdl, sizeof (dl_if->dl_if_lladdr.asdl));
3120 msdl = (struct sockaddr_dl *)&dl_if->dl_if_lladdr.msdl;
3121 bzero(msdl, sizeof (dl_if->dl_if_lladdr.msdl));
3122 }
3123
3124 /* hold a permanent reference for the ifnet itself */
3125 IFA_ADDREF_LOCKED(ifa);
3126 oifa = ifp->if_lladdr;
3127 ifp->if_lladdr = ifa;
3128
3129 VERIFY(ifa->ifa_debug == IFD_LINK);
3130 ifa->ifa_ifp = ifp;
3131 ifa->ifa_rtrequest = link_rtrequest;
3132 ifa->ifa_addr = (struct sockaddr *)asdl;
3133 asdl->sdl_len = socksize;
3134 asdl->sdl_family = AF_LINK;
3135 bcopy(workbuf, asdl->sdl_data, namelen);
3136 asdl->sdl_nlen = namelen;
3137 asdl->sdl_index = ifp->if_index;
3138 asdl->sdl_type = ifp->if_type;
3139 if (ll_addr != NULL) {
3140 asdl->sdl_alen = ll_addr->sdl_alen;
3141 bcopy(CONST_LLADDR(ll_addr), LLADDR(asdl), asdl->sdl_alen);
3142 } else {
3143 asdl->sdl_alen = 0;
3144 }
3145 ifa->ifa_netmask = (struct sockaddr*)msdl;
3146 msdl->sdl_len = masklen;
3147 while (namelen != 0)
3148 msdl->sdl_data[--namelen] = 0xff;
3149 IFA_UNLOCK(ifa);
3150
3151 if (oifa != NULL)
3152 IFA_REMREF(oifa);
3153
3154 return (ifa);
3155 }
3156
3157 static void
3158 if_purgeaddrs(struct ifnet *ifp)
3159 {
3160 #if INET
3161 in_purgeaddrs(ifp);
3162 #endif /* INET */
3163 #if INET6
3164 in6_purgeaddrs(ifp);
3165 #endif /* INET6 */
3166 #if NETAT
3167 at_purgeaddrs(ifp);
3168 #endif
3169 }
3170
3171 errno_t
3172 ifnet_detach(ifnet_t ifp)
3173 {
3174 if (ifp == NULL)
3175 return (EINVAL);
3176
3177 ifnet_head_lock_exclusive();
3178 lck_mtx_lock(rnh_lock);
3179 ifnet_lock_exclusive(ifp);
3180
3181 /*
3182 * Check to see if this interface has previously triggered
3183 * aggressive protocol draining; if so, decrement the global
3184 * refcnt and clear PR_AGGDRAIN on the route domain if
3185 * there are no more of such an interface around.
3186 */
3187 (void) ifnet_set_idle_flags_locked(ifp, 0, ~0);
3188
3189 lck_mtx_lock_spin(&ifp->if_ref_lock);
3190 if (!(ifp->if_refflags & IFRF_ATTACHED)) {
3191 lck_mtx_unlock(&ifp->if_ref_lock);
3192 ifnet_lock_done(ifp);
3193 lck_mtx_unlock(rnh_lock);
3194 ifnet_head_done();
3195 return (EINVAL);
3196 } else if (ifp->if_refflags & IFRF_DETACHING) {
3197 /* Interface has already been detached */
3198 lck_mtx_unlock(&ifp->if_ref_lock);
3199 ifnet_lock_done(ifp);
3200 lck_mtx_unlock(rnh_lock);
3201 ifnet_head_done();
3202 return (ENXIO);
3203 }
3204 /* Indicate this interface is being detached */
3205 ifp->if_refflags &= ~IFRF_ATTACHED;
3206 ifp->if_refflags |= IFRF_DETACHING;
3207 lck_mtx_unlock(&ifp->if_ref_lock);
3208
3209 if (dlil_verbose)
3210 printf("%s%d: detaching\n", ifp->if_name, ifp->if_unit);
3211
3212 /*
3213 * Remove ifnet from the ifnet_head, ifindex2ifnet[]; it will
3214 * no longer be visible during lookups from this point.
3215 */
3216 VERIFY(ifindex2ifnet[ifp->if_index] == ifp);
3217 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
3218 ifp->if_link.tqe_next = NULL;
3219 ifp->if_link.tqe_prev = NULL;
3220 ifindex2ifnet[ifp->if_index] = NULL;
3221
3222 /* Record detach PC stacktrace */
3223 ctrace_record(&((struct dlil_ifnet *)ifp)->dl_if_detach);
3224
3225 ifnet_lock_done(ifp);
3226 lck_mtx_unlock(rnh_lock);
3227 ifnet_head_done();
3228
3229 /* Let BPF know we're detaching */
3230 bpfdetach(ifp);
3231
3232 /* Mark the interface as DOWN */
3233 if_down(ifp);
3234
3235 /* Disable forwarding cached route */
3236 lck_mtx_lock(&ifp->if_cached_route_lock);
3237 ifp->if_fwd_cacheok = 0;
3238 lck_mtx_unlock(&ifp->if_cached_route_lock);
3239
3240 /*
3241 * Drain any deferred IGMPv3/MLDv2 query responses, but keep the
3242 * references to the info structures and leave them attached to
3243 * this ifnet.
3244 */
3245 #if INET
3246 igmp_domifdetach(ifp);
3247 #endif /* INET */
3248 #if INET6
3249 mld_domifdetach(ifp);
3250 #endif /* INET6 */
3251
3252 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, NULL, 0);
3253
3254 /* Let worker thread take care of the rest, to avoid reentrancy */
3255 lck_mtx_lock(&dlil_ifnet_lock);
3256 ifnet_detaching_enqueue(ifp);
3257 lck_mtx_unlock(&dlil_ifnet_lock);
3258
3259 return (0);
3260 }
3261
3262 static void
3263 ifnet_detaching_enqueue(struct ifnet *ifp)
3264 {
3265 lck_mtx_assert(&dlil_ifnet_lock, LCK_MTX_ASSERT_OWNED);
3266
3267 ++ifnet_detaching_cnt;
3268 VERIFY(ifnet_detaching_cnt != 0);
3269 TAILQ_INSERT_TAIL(&ifnet_detaching_head, ifp, if_detaching_link);
3270 wakeup((caddr_t)&ifnet_delayed_run);
3271 }
3272
3273 static struct ifnet *
3274 ifnet_detaching_dequeue(void)
3275 {
3276 struct ifnet *ifp;
3277
3278 lck_mtx_assert(&dlil_ifnet_lock, LCK_MTX_ASSERT_OWNED);
3279
3280 ifp = TAILQ_FIRST(&ifnet_detaching_head);
3281 VERIFY(ifnet_detaching_cnt != 0 || ifp == NULL);
3282 if (ifp != NULL) {
3283 VERIFY(ifnet_detaching_cnt != 0);
3284 --ifnet_detaching_cnt;
3285 TAILQ_REMOVE(&ifnet_detaching_head, ifp, if_detaching_link);
3286 ifp->if_detaching_link.tqe_next = NULL;
3287 ifp->if_detaching_link.tqe_prev = NULL;
3288 }
3289 return (ifp);
3290 }
3291
3292 static void
3293 ifnet_delayed_thread_func(void)
3294 {
3295 struct ifnet *ifp;
3296
3297 for (;;) {
3298 lck_mtx_lock(&dlil_ifnet_lock);
3299 while (ifnet_detaching_cnt == 0) {
3300 (void) msleep(&ifnet_delayed_run, &dlil_ifnet_lock,
3301 (PZERO - 1), "ifnet_delayed_thread", NULL);
3302 }
3303
3304 VERIFY(TAILQ_FIRST(&ifnet_detaching_head) != NULL);
3305
3306 /* Take care of detaching ifnet */
3307 ifp = ifnet_detaching_dequeue();
3308 if (ifp != NULL) {
3309 lck_mtx_unlock(&dlil_ifnet_lock);
3310 ifnet_detach_final(ifp);
3311 } else {
3312 lck_mtx_unlock(&dlil_ifnet_lock);
3313 }
3314 }
3315 }
3316
3317 static void
3318 ifnet_detach_final(struct ifnet *ifp)
3319 {
3320 struct ifnet_filter *filter, *filter_next;
3321 struct ifnet_filter_head fhead;
3322 struct dlil_threading_info *inputthread;
3323 struct ifaddr *ifa;
3324 ifnet_detached_func if_free;
3325 int i;
3326
3327 lck_mtx_lock(&ifp->if_ref_lock);
3328 if (!(ifp->if_refflags & IFRF_DETACHING)) {
3329 panic("%s: flags mismatch (detaching not set) ifp=%p",
3330 __func__, ifp);
3331 /* NOTREACHED */
3332 }
3333
3334 /* Wait until the existing IO references get released
3335 * before we proceed with ifnet_detach
3336 */
3337 while (ifp->if_refio > 0) {
3338 printf("%s: Waiting for IO references on %s%d interface "
3339 "to be released\n", __func__, ifp->if_name, ifp->if_unit);
3340 (void) msleep(&(ifp->if_refio), &ifp->if_ref_lock,
3341 (PZERO - 1), "ifnet_ioref_wait", NULL);
3342 }
3343 lck_mtx_unlock(&ifp->if_ref_lock);
3344
3345 /* Detach interface filters */
3346 lck_mtx_lock(&ifp->if_flt_lock);
3347 if_flt_monitor_enter(ifp);
3348
3349 lck_mtx_assert(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
3350 fhead = ifp->if_flt_head;
3351 TAILQ_INIT(&ifp->if_flt_head);
3352
3353 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
3354 filter_next = TAILQ_NEXT(filter, filt_next);
3355 lck_mtx_unlock(&ifp->if_flt_lock);
3356
3357 dlil_detach_filter_internal(filter, 1);
3358 lck_mtx_lock(&ifp->if_flt_lock);
3359 }
3360 if_flt_monitor_leave(ifp);
3361 lck_mtx_unlock(&ifp->if_flt_lock);
3362
3363 /* Tell upper layers to drop their network addresses */
3364 if_purgeaddrs(ifp);
3365
3366 ifnet_lock_exclusive(ifp);
3367
3368 /* Uplumb all protocols */
3369 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
3370 struct if_proto *proto;
3371
3372 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
3373 while (proto != NULL) {
3374 protocol_family_t family = proto->protocol_family;
3375 ifnet_lock_done(ifp);
3376 proto_unplumb(family, ifp);
3377 ifnet_lock_exclusive(ifp);
3378 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
3379 }
3380 /* There should not be any protocols left */
3381 VERIFY(SLIST_EMPTY(&ifp->if_proto_hash[i]));
3382 }
3383 zfree(dlif_phash_zone, ifp->if_proto_hash);
3384 ifp->if_proto_hash = NULL;
3385
3386 /* Detach (permanent) link address from if_addrhead */
3387 ifa = TAILQ_FIRST(&ifp->if_addrhead);
3388 VERIFY(ifnet_addrs[ifp->if_index - 1] == ifa);
3389 IFA_LOCK(ifa);
3390 if_detach_link_ifa(ifp, ifa);
3391 IFA_UNLOCK(ifa);
3392
3393 /* Remove (permanent) link address from ifnet_addrs[] */
3394 IFA_REMREF(ifa);
3395 ifnet_addrs[ifp->if_index - 1] = NULL;
3396
3397 /* This interface should not be on {ifnet_head,detaching} */
3398 VERIFY(ifp->if_link.tqe_next == NULL);
3399 VERIFY(ifp->if_link.tqe_prev == NULL);
3400 VERIFY(ifp->if_detaching_link.tqe_next == NULL);
3401 VERIFY(ifp->if_detaching_link.tqe_prev == NULL);
3402
3403 /* Prefix list should be empty by now */
3404 VERIFY(TAILQ_EMPTY(&ifp->if_prefixhead));
3405
3406 /* The slot should have been emptied */
3407 VERIFY(ifindex2ifnet[ifp->if_index] == NULL);
3408
3409 /* There should not be any addresses left */
3410 VERIFY(TAILQ_EMPTY(&ifp->if_addrhead));
3411
3412 /*
3413 * If thread affinity was set for the workloop thread, we will need
3414 * to tear down the affinity and release the extra reference count
3415 * taken at attach time;
3416 */
3417 if ((inputthread = ifp->if_input_thread) != NULL) {
3418 if (inputthread->net_affinity) {
3419 struct thread *tp;
3420
3421 if (inputthread == dlil_lo_thread_ptr) {
3422 panic("%s: Thread affinity should not be "
3423 "enabled on the loopback dlil input "
3424 "thread", __func__);
3425 /* NOTREACHED */
3426 }
3427
3428 lck_mtx_lock_spin(&inputthread->input_lck);
3429 tp = inputthread->workloop_thread;
3430 inputthread->workloop_thread = NULL;
3431 inputthread->tag = 0;
3432 inputthread->net_affinity = FALSE;
3433 lck_mtx_unlock(&inputthread->input_lck);
3434
3435 /* Tear down workloop thread affinity */
3436 if (tp != NULL) {
3437 (void) dlil_affinity_set(tp,
3438 THREAD_AFFINITY_TAG_NULL);
3439 thread_deallocate(tp);
3440 }
3441
3442 /* Tear down dlil input thread affinity */
3443 tp = inputthread->input_thread;
3444 (void) dlil_affinity_set(tp, THREAD_AFFINITY_TAG_NULL);
3445 thread_deallocate(tp);
3446 }
3447
3448 /* cleanup ifp dlil input thread, if any */
3449 ifp->if_input_thread = NULL;
3450
3451 if (inputthread != dlil_lo_thread_ptr) {
3452 #ifdef DLIL_DEBUG
3453 printf("%s: wakeup thread threadinfo: %p "
3454 "input_thread=%p threads: cur=%d max=%d\n",
3455 __func__, inputthread, inputthread->input_thread,
3456 dlil_multithreaded_input, cur_dlil_input_threads);
3457 #endif
3458 lck_mtx_lock_spin(&inputthread->input_lck);
3459
3460 inputthread->input_waiting |= DLIL_INPUT_TERMINATE;
3461 if (!(inputthread->input_waiting & DLIL_INPUT_RUNNING))
3462 wakeup((caddr_t)&inputthread->input_waiting);
3463
3464 lck_mtx_unlock(&inputthread->input_lck);
3465 }
3466 }
3467
3468 /* The driver might unload, so point these to ourselves */
3469 if_free = ifp->if_free;
3470 ifp->if_output = ifp_if_output;
3471 ifp->if_ioctl = ifp_if_ioctl;
3472 ifp->if_set_bpf_tap = ifp_if_set_bpf_tap;
3473 ifp->if_free = ifp_if_free;
3474 ifp->if_demux = ifp_if_demux;
3475 ifp->if_event = ifp_if_event;
3476 ifp->if_framer = ifp_if_framer;
3477 ifp->if_add_proto = ifp_if_add_proto;
3478 ifp->if_del_proto = ifp_if_del_proto;
3479 ifp->if_check_multi = ifp_if_check_multi;
3480
3481 ifnet_lock_done(ifp);
3482
3483 #if PF
3484 /*
3485 * Detach this interface from packet filter, if enabled.
3486 */
3487 pf_ifnet_hook(ifp, 0);
3488 #endif /* PF */
3489
3490 /* Filter list should be empty */
3491 lck_mtx_lock_spin(&ifp->if_flt_lock);
3492 VERIFY(TAILQ_EMPTY(&ifp->if_flt_head));
3493 VERIFY(ifp->if_flt_busy == 0);
3494 VERIFY(ifp->if_flt_waiters == 0);
3495 lck_mtx_unlock(&ifp->if_flt_lock);
3496
3497 /* Last chance to cleanup any cached route */
3498 lck_mtx_lock(&ifp->if_cached_route_lock);
3499 VERIFY(!ifp->if_fwd_cacheok);
3500 if (ifp->if_fwd_route.ro_rt != NULL)
3501 rtfree(ifp->if_fwd_route.ro_rt);
3502 bzero(&ifp->if_fwd_route, sizeof (ifp->if_fwd_route));
3503 if (ifp->if_src_route.ro_rt != NULL)
3504 rtfree(ifp->if_src_route.ro_rt);
3505 bzero(&ifp->if_src_route, sizeof (ifp->if_src_route));
3506 if (ifp->if_src_route6.ro_rt != NULL)
3507 rtfree(ifp->if_src_route6.ro_rt);
3508 bzero(&ifp->if_src_route6, sizeof (ifp->if_src_route6));
3509 lck_mtx_unlock(&ifp->if_cached_route_lock);
3510
3511 ifnet_llreach_ifdetach(ifp);
3512
3513 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, NULL, 0);
3514
3515 if (if_free != NULL)
3516 if_free(ifp);
3517
3518 /*
3519 * Finally, mark this ifnet as detached.
3520 */
3521 lck_mtx_lock_spin(&ifp->if_ref_lock);
3522 if (!(ifp->if_refflags & IFRF_DETACHING)) {
3523 panic("%s: flags mismatch (detaching not set) ifp=%p",
3524 __func__, ifp);
3525 /* NOTREACHED */
3526 }
3527 ifp->if_refflags &= ~IFRF_DETACHING;
3528 lck_mtx_unlock(&ifp->if_ref_lock);
3529
3530 if (dlil_verbose)
3531 printf("%s%d: detached\n", ifp->if_name, ifp->if_unit);
3532
3533 /* Release reference held during ifnet attach */
3534 ifnet_release(ifp);
3535 }
3536
3537 static errno_t
3538 ifp_if_output(struct ifnet *ifp, struct mbuf *m)
3539 {
3540 #pragma unused(ifp)
3541 m_freem(m);
3542 return (0);
3543 }
3544
3545 static errno_t
3546 ifp_if_demux(struct ifnet *ifp, struct mbuf *m, char *fh, protocol_family_t *pf)
3547 {
3548 #pragma unused(ifp, fh, pf)
3549 m_freem(m);
3550 return (EJUSTRETURN);
3551 }
3552
3553 static errno_t
3554 ifp_if_add_proto(struct ifnet *ifp, protocol_family_t pf,
3555 const struct ifnet_demux_desc *da, u_int32_t dc)
3556 {
3557 #pragma unused(ifp, pf, da, dc)
3558 return (EINVAL);
3559 }
3560
3561 static errno_t
3562 ifp_if_del_proto(struct ifnet *ifp, protocol_family_t pf)
3563 {
3564 #pragma unused(ifp, pf)
3565 return (EINVAL);
3566 }
3567
3568 static errno_t
3569 ifp_if_check_multi(struct ifnet *ifp, const struct sockaddr *sa)
3570 {
3571 #pragma unused(ifp, sa)
3572 return (EOPNOTSUPP);
3573 }
3574
3575 static errno_t
3576 ifp_if_framer(struct ifnet *ifp, struct mbuf **m,
3577 const struct sockaddr *sa, const char *ll, const char *t)
3578 {
3579 #pragma unused(ifp, m, sa, ll, t)
3580 m_freem(*m);
3581 *m = NULL;
3582 return (EJUSTRETURN);
3583 }
3584
3585 static errno_t
3586 ifp_if_ioctl(struct ifnet *ifp, unsigned long cmd, void *arg)
3587 {
3588 #pragma unused(ifp, cmd, arg)
3589 return (EOPNOTSUPP);
3590 }
3591
3592 static errno_t
3593 ifp_if_set_bpf_tap(struct ifnet *ifp, bpf_tap_mode tm, bpf_packet_func f)
3594 {
3595 #pragma unused(ifp, tm, f)
3596 /* XXX not sure what to do here */
3597 return (0);
3598 }
3599
3600 static void
3601 ifp_if_free(struct ifnet *ifp)
3602 {
3603 #pragma unused(ifp)
3604 }
3605
3606 static void
3607 ifp_if_event(struct ifnet *ifp, const struct kev_msg *e)
3608 {
3609 #pragma unused(ifp, e)
3610 }
3611
3612 __private_extern__
3613 int dlil_if_acquire(u_int32_t family, const void *uniqueid,
3614 size_t uniqueid_len, struct ifnet **ifp)
3615 {
3616 struct ifnet *ifp1 = NULL;
3617 struct dlil_ifnet *dlifp1 = NULL;
3618 void *buf, *base, **pbuf;
3619 int ret = 0;
3620
3621 lck_mtx_lock(&dlil_ifnet_lock);
3622 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
3623 ifp1 = (struct ifnet *)dlifp1;
3624
3625 if (ifp1->if_family != family)
3626 continue;
3627
3628 lck_mtx_lock(&dlifp1->dl_if_lock);
3629 /* same uniqueid and same len or no unique id specified */
3630 if ((uniqueid_len == dlifp1->dl_if_uniqueid_len) &&
3631 !bcmp(uniqueid, dlifp1->dl_if_uniqueid, uniqueid_len)) {
3632 /* check for matching interface in use */
3633 if (dlifp1->dl_if_flags & DLIF_INUSE) {
3634 if (uniqueid_len) {
3635 ret = EBUSY;
3636 lck_mtx_unlock(&dlifp1->dl_if_lock);
3637 goto end;
3638 }
3639 } else {
3640 dlifp1->dl_if_flags |= (DLIF_INUSE|DLIF_REUSE);
3641 lck_mtx_unlock(&dlifp1->dl_if_lock);
3642 *ifp = ifp1;
3643 goto end;
3644 }
3645 }
3646 lck_mtx_unlock(&dlifp1->dl_if_lock);
3647 }
3648
3649 /* no interface found, allocate a new one */
3650 buf = zalloc(dlif_zone);
3651 if (buf == NULL) {
3652 ret = ENOMEM;
3653 goto end;
3654 }
3655 bzero(buf, dlif_bufsize);
3656
3657 /* Get the 64-bit aligned base address for this object */
3658 base = (void *)P2ROUNDUP((intptr_t)buf + sizeof (u_int64_t),
3659 sizeof (u_int64_t));
3660 VERIFY(((intptr_t)base + dlif_size) <= ((intptr_t)buf + dlif_bufsize));
3661
3662 /*
3663 * Wind back a pointer size from the aligned base and
3664 * save the original address so we can free it later.
3665 */
3666 pbuf = (void **)((intptr_t)base - sizeof (void *));
3667 *pbuf = buf;
3668 dlifp1 = base;
3669
3670 if (uniqueid_len) {
3671 MALLOC(dlifp1->dl_if_uniqueid, void *, uniqueid_len,
3672 M_NKE, M_WAITOK);
3673 if (dlifp1->dl_if_uniqueid == NULL) {
3674 zfree(dlif_zone, dlifp1);
3675 ret = ENOMEM;
3676 goto end;
3677 }
3678 bcopy(uniqueid, dlifp1->dl_if_uniqueid, uniqueid_len);
3679 dlifp1->dl_if_uniqueid_len = uniqueid_len;
3680 }
3681
3682 ifp1 = (struct ifnet *)dlifp1;
3683 dlifp1->dl_if_flags = DLIF_INUSE;
3684 if (ifnet_debug) {
3685 dlifp1->dl_if_flags |= DLIF_DEBUG;
3686 dlifp1->dl_if_trace = dlil_if_trace;
3687 }
3688 ifp1->if_name = dlifp1->dl_if_namestorage;
3689 #if CONFIG_MACF_NET
3690 mac_ifnet_label_init(ifp1);
3691 #endif
3692
3693 lck_mtx_init(&dlifp1->dl_if_lock, ifnet_lock_group, ifnet_lock_attr);
3694 lck_rw_init(&ifp1->if_lock, ifnet_lock_group, ifnet_lock_attr);
3695 lck_mtx_init(&ifp1->if_ref_lock, ifnet_lock_group, ifnet_lock_attr);
3696 lck_mtx_init(&ifp1->if_flt_lock, ifnet_lock_group, ifnet_lock_attr);
3697 lck_mtx_init(&ifp1->if_cached_route_lock, ifnet_lock_group,
3698 ifnet_lock_attr);
3699 lck_mtx_init(&ifp1->if_addrconfig_lock, ifnet_lock_group,
3700 ifnet_lock_attr);
3701 lck_rw_init(&ifp1->if_llreach_lock, ifnet_lock_group, ifnet_lock_attr);
3702
3703 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
3704
3705 *ifp = ifp1;
3706
3707 end:
3708 lck_mtx_unlock(&dlil_ifnet_lock);
3709
3710 VERIFY(dlifp1 == NULL || (IS_P2ALIGNED(dlifp1, sizeof (u_int64_t)) &&
3711 IS_P2ALIGNED(&ifp1->if_data, sizeof (u_int64_t))));
3712
3713 return (ret);
3714 }
3715
3716 __private_extern__ void
3717 dlil_if_release(ifnet_t ifp)
3718 {
3719 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
3720
3721 ifnet_lock_exclusive(ifp);
3722 lck_mtx_lock(&dlifp->dl_if_lock);
3723 dlifp->dl_if_flags &= ~DLIF_INUSE;
3724 strncpy(dlifp->dl_if_namestorage, ifp->if_name, IFNAMSIZ);
3725 ifp->if_name = dlifp->dl_if_namestorage;
3726 lck_mtx_unlock(&dlifp->dl_if_lock);
3727 #if CONFIG_MACF_NET
3728 /*
3729 * We can either recycle the MAC label here or in dlil_if_acquire().
3730 * It seems logical to do it here but this means that anything that
3731 * still has a handle on ifp will now see it as unlabeled.
3732 * Since the interface is "dead" that may be OK. Revisit later.
3733 */
3734 mac_ifnet_label_recycle(ifp);
3735 #endif
3736 ifnet_lock_done(ifp);
3737 }
3738
3739 __private_extern__ void
3740 dlil_proto_unplumb_all(struct ifnet *ifp)
3741 {
3742 /*
3743 * if_proto_hash[0-3] are for PF_INET, PF_INET6, PF_APPLETALK
3744 * and PF_VLAN, where each bucket contains exactly one entry;
3745 * PF_VLAN does not need an explicit unplumb.
3746 *
3747 * if_proto_hash[4] is for other protocols; we expect anything
3748 * in this bucket to respond to the DETACHING event (which would
3749 * have happened by now) and do the unplumb then.
3750 */
3751 (void) proto_unplumb(PF_INET, ifp);
3752 #if INET6
3753 (void) proto_unplumb(PF_INET6, ifp);
3754 #endif /* INET6 */
3755 #if NETAT
3756 (void) proto_unplumb(PF_APPLETALK, ifp);
3757 #endif /* NETAT */
3758 }
3759
3760 static void
3761 ifp_src_route_copyout(struct ifnet *ifp, struct route *dst)
3762 {
3763 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3764 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3765
3766 route_copyout(dst, &ifp->if_src_route, sizeof (*dst));
3767
3768 lck_mtx_unlock(&ifp->if_cached_route_lock);
3769 }
3770
3771 static void
3772 ifp_src_route_copyin(struct ifnet *ifp, struct route *src)
3773 {
3774 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3775 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3776
3777 if (ifp->if_fwd_cacheok) {
3778 route_copyin(src, &ifp->if_src_route, sizeof (*src));
3779 } else {
3780 rtfree(src->ro_rt);
3781 src->ro_rt = NULL;
3782 }
3783 lck_mtx_unlock(&ifp->if_cached_route_lock);
3784 }
3785
3786 #if INET6
3787 static void
3788 ifp_src_route6_copyout(struct ifnet *ifp, struct route_in6 *dst)
3789 {
3790 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3791 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3792
3793 route_copyout((struct route *)dst, (struct route *)&ifp->if_src_route6,
3794 sizeof (*dst));
3795
3796 lck_mtx_unlock(&ifp->if_cached_route_lock);
3797 }
3798
3799 static void
3800 ifp_src_route6_copyin(struct ifnet *ifp, struct route_in6 *src)
3801 {
3802 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3803 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3804
3805 if (ifp->if_fwd_cacheok) {
3806 route_copyin((struct route *)src,
3807 (struct route *)&ifp->if_src_route6, sizeof (*src));
3808 } else {
3809 rtfree(src->ro_rt);
3810 src->ro_rt = NULL;
3811 }
3812 lck_mtx_unlock(&ifp->if_cached_route_lock);
3813 }
3814 #endif /* INET6 */
3815
3816 struct rtentry *
3817 ifnet_cached_rtlookup_inet(struct ifnet *ifp, struct in_addr src_ip)
3818 {
3819 struct route src_rt;
3820 struct sockaddr_in *dst = (struct sockaddr_in *)(&src_rt.ro_dst);
3821
3822 ifp_src_route_copyout(ifp, &src_rt);
3823
3824 if (src_rt.ro_rt == NULL || !(src_rt.ro_rt->rt_flags & RTF_UP) ||
3825 src_ip.s_addr != dst->sin_addr.s_addr ||
3826 src_rt.ro_rt->generation_id != route_generation) {
3827 if (src_rt.ro_rt != NULL) {
3828 rtfree(src_rt.ro_rt);
3829 src_rt.ro_rt = NULL;
3830 } else if (dst->sin_family != AF_INET) {
3831 bzero(&src_rt.ro_dst, sizeof (src_rt.ro_dst));
3832 dst->sin_len = sizeof (src_rt.ro_dst);
3833 dst->sin_family = AF_INET;
3834 }
3835 dst->sin_addr = src_ip;
3836
3837 if (src_rt.ro_rt == NULL) {
3838 src_rt.ro_rt = rtalloc1_scoped((struct sockaddr *)dst,
3839 0, 0, ifp->if_index);
3840
3841 if (src_rt.ro_rt != NULL) {
3842 /* retain a ref, copyin consumes one */
3843 struct rtentry *rte = src_rt.ro_rt;
3844 RT_ADDREF(rte);
3845 ifp_src_route_copyin(ifp, &src_rt);
3846 src_rt.ro_rt = rte;
3847 }
3848 }
3849 }
3850
3851 return (src_rt.ro_rt);
3852 }
3853
3854 #if INET6
3855 struct rtentry*
3856 ifnet_cached_rtlookup_inet6(struct ifnet *ifp, struct in6_addr *src_ip6)
3857 {
3858 struct route_in6 src_rt;
3859
3860 ifp_src_route6_copyout(ifp, &src_rt);
3861
3862 if (src_rt.ro_rt == NULL || !(src_rt.ro_rt->rt_flags & RTF_UP) ||
3863 !IN6_ARE_ADDR_EQUAL(src_ip6, &src_rt.ro_dst.sin6_addr) ||
3864 src_rt.ro_rt->generation_id != route_generation) {
3865 if (src_rt.ro_rt != NULL) {
3866 rtfree(src_rt.ro_rt);
3867 src_rt.ro_rt = NULL;
3868 } else if (src_rt.ro_dst.sin6_family != AF_INET6) {
3869 bzero(&src_rt.ro_dst, sizeof (src_rt.ro_dst));
3870 src_rt.ro_dst.sin6_len = sizeof (src_rt.ro_dst);
3871 src_rt.ro_dst.sin6_family = AF_INET6;
3872 }
3873 src_rt.ro_dst.sin6_scope_id = in6_addr2scopeid(ifp, src_ip6);
3874 src_rt.ro_dst.sin6_addr = *src_ip6;
3875
3876 if (src_rt.ro_rt == NULL) {
3877 src_rt.ro_rt = rtalloc1_scoped(
3878 (struct sockaddr *)&src_rt.ro_dst, 0, 0,
3879 ifp->if_index);
3880
3881 if (src_rt.ro_rt != NULL) {
3882 /* retain a ref, copyin consumes one */
3883 struct rtentry *rte = src_rt.ro_rt;
3884 RT_ADDREF(rte);
3885 ifp_src_route6_copyin(ifp, &src_rt);
3886 src_rt.ro_rt = rte;
3887 }
3888 }
3889 }
3890
3891 return (src_rt.ro_rt);
3892 }
3893 #endif /* INET6 */