2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29  *      Copyright (c) 1999 Apple Computer, Inc.  
  31  *      Data Link Inteface Layer 
  35 #include <sys/param.h> 
  36 #include <sys/systm.h> 
  37 #include <sys/kernel.h> 
  38 #include <sys/malloc.h> 
  40 #include <sys/socket.h> 
  41 #include <sys/domain.h> 
  43 #include <net/if_dl.h> 
  45 #include <net/route.h> 
  46 #include <net/if_var.h> 
  48 #include <net/if_arp.h> 
  49 #include <sys/kern_event.h> 
  50 #include <sys/kdebug.h> 
  52 #include <kern/assert.h> 
  53 #include <kern/task.h> 
  54 #include <kern/thread.h> 
  55 #include <kern/sched_prim.h> 
  56 #include <kern/locks.h> 
  58 #include <net/if_types.h> 
  59 #include <net/kpi_interfacefilter.h> 
  61 #include <libkern/OSAtomic.h> 
  63 #include <machine/machine_routines.h> 
  65 #define DBG_LAYER_BEG           DLILDBG_CODE(DBG_DLIL_STATIC, 0) 
  66 #define DBG_LAYER_END           DLILDBG_CODE(DBG_DLIL_STATIC, 2) 
  67 #define DBG_FNC_DLIL_INPUT      DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8)) 
  68 #define DBG_FNC_DLIL_OUTPUT     DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8)) 
  69 #define DBG_FNC_DLIL_IFOUT      DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8)) 
  72 #define MAX_DL_TAGS             16 
  73 #define MAX_DLIL_FILTERS        16 
  74 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */ 
  75 #define MAX_LINKADDR        4 /* LONGWORDS */ 
  76 #define M_NKE M_IFADDR 
  78 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter 
  79 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter 
  82 #define DLIL_PRINTF     printf 
  84 #define DLIL_PRINTF     kprintf 
  93     SLIST_ENTRY(if_proto
)        next_hash
; 
  97     struct domain                        
*dl_domain
; 
  98     protocol_family_t           protocol_family
; 
 102                         dl_input_func                    dl_input
; 
 103                         dl_pre_output_func               dl_pre_output
; 
 104                         dl_event_func                    dl_event
; 
 105                         dl_offer_func                    dl_offer
; 
 106                         dl_ioctl_func                    dl_ioctl
; 
 107                         dl_detached_func                 dl_detached
; 
 110                         proto_media_input                       input
; 
 111                         proto_media_preout                      pre_output
; 
 112                         proto_media_event                       event
; 
 113                         proto_media_ioctl                       ioctl
; 
 114                         proto_media_detached            detached
; 
 115                         proto_media_resolve_multi       resolve_multi
; 
 116                         proto_media_send_arp            send_arp
; 
 121 SLIST_HEAD(proto_hash_entry
, if_proto
); 
 125     /* ifnet and drvr_ext are used by the stack and drivers 
 126     drvr_ext extends the public ifnet and must follow dl_if */ 
 127     struct ifnet        dl_if
;                  /* public ifnet */ 
 129     /* dlil private fields */ 
 130     TAILQ_ENTRY(dlil_ifnet
) dl_if_link
; /* dlil_ifnet are link together */ 
 131                                                                 /* it is not the ifnet list */ 
 132     void                *if_uniqueid
;   /* unique id identifying the interface */ 
 133     size_t              if_uniqueid_len
;/* length of the unique id */ 
 134     char                if_namestorage
[IFNAMSIZ
]; /* interface name storage */ 
 137 struct ifnet_filter 
{ 
 138         TAILQ_ENTRY(ifnet_filter
)       filt_next
; 
 142         const char                                      *filt_name
; 
 144     protocol_family_t                   filt_protocol
; 
 145     iff_input_func                              filt_input
; 
 146     iff_output_func                             filt_output
; 
 147     iff_event_func                              filt_event
; 
 148     iff_ioctl_func                              filt_ioctl
; 
 149     iff_detached_func                   filt_detached
; 
 152 struct if_family_str 
{ 
 153     TAILQ_ENTRY(if_family_str
) if_fam_next
; 
 158 #define DLIL_SHUTDOWN 1 
 160     int (*add_if
)(struct ifnet 
*ifp
); 
 161     int (*del_if
)(struct ifnet 
*ifp
); 
 162     int (*init_if
)(struct ifnet 
*ifp
); 
 163     int (*add_proto
)(struct ifnet 
*ifp
, u_long protocol_family
, struct ddesc_head_str 
*demux_desc_head
); 
 164         ifnet_del_proto_func    del_proto
; 
 165     ifnet_ioctl_func            ifmod_ioctl
; 
 166     int (*shutdown
)(void); 
 169 struct proto_family_str 
{ 
 170         TAILQ_ENTRY(proto_family_str
) proto_fam_next
; 
 175         int (*attach_proto
)(struct ifnet 
*ifp
, u_long protocol_family
); 
 176         int (*detach_proto
)(struct ifnet 
*ifp
, u_long protocol_family
); 
 180         kIfNetUseCount_MayBeZero 
= 0, 
 181         kIfNetUseCount_MustNotBeZero 
= 1 
 184 static TAILQ_HEAD(, dlil_ifnet
) dlil_ifnet_head
; 
 185 static TAILQ_HEAD(, if_family_str
) if_family_head
; 
 186 static TAILQ_HEAD(, proto_family_str
) proto_family_head
; 
 187 static lck_grp_t 
*dlil_lock_group
; 
 188 static lck_grp_t 
*ifnet_lock_group
; 
 189 static lck_grp_t 
*ifnet_head_lock_group
; 
 190 static lck_attr_t 
*ifnet_lock_attr
; 
 191 static lck_mtx_t 
*proto_family_mutex
; 
 192 static lck_rw_t 
*ifnet_head_mutex
; 
 193 static lck_mtx_t 
*dlil_ifnet_mutex
; 
 194 static lck_mtx_t 
*dlil_mutex
; 
 195 static unsigned long dlil_read_count 
= 0; 
 196 static unsigned long dlil_detach_waiting 
= 0; 
 197 extern u_int32_t        ipv4_ll_arp_aware
; 
 199 int dlil_initialized 
= 0; 
 200 lck_spin_t 
*dlil_input_lock
; 
 201 __private_extern__ thread_t     dlil_input_thread_ptr 
= 0; 
 202 int dlil_input_thread_wakeup 
= 0; 
 203 __private_extern__ 
int dlil_output_thread_wakeup 
= 0; 
 204 static struct mbuf 
*dlil_input_mbuf_head 
= NULL
; 
 205 static struct mbuf 
*dlil_input_mbuf_tail 
= NULL
; 
 207 #error dlil_input() needs to be revised to support more than on loopback interface 
 209 static struct mbuf 
*dlil_input_loop_head 
= NULL
; 
 210 static struct mbuf 
*dlil_input_loop_tail 
= NULL
; 
 212 static void dlil_input_thread(void); 
 213 static int dlil_event_internal(struct ifnet 
*ifp
, struct kev_msg 
*msg
); 
 214 struct ifnet 
*ifbyfamily(u_long family
, short unit
); 
 215 static int dlil_detach_filter_internal(interface_filter_t filter
, int detached
); 
 216 static void dlil_call_delayed_detach_thread(void); 
 218 static void     dlil_read_begin(void); 
 219 static void     dlil_read_end(void); 
 220 static int      dlil_write_begin(void); 
 221 static void     dlil_write_end(void); 
 223 static int ifp_use(struct ifnet 
*ifp
, int handle_zero
); 
 224 static int ifp_unuse(struct ifnet 
*ifp
); 
 225 static void ifp_use_reached_zero(struct ifnet 
*ifp
); 
 227 extern void bpfdetach(struct ifnet
*); 
 228 extern void proto_input_run(void); // new run_netisr 
 231 int dlil_input_packet(struct ifnet  
*ifp
, struct mbuf 
*m
, char *frame_header
); 
 233 __private_extern__ 
void link_rtrequest(int, struct rtentry 
*, struct sockaddr 
*); 
 237 extern u_int32_t        inject_buckets
; 
 239 static const u_int32_t dlil_writer_waiting 
= 0x80000000; 
 241 static __inline__ 
void* 
 242 _cast_non_const(const void * ptr
) { 
 252 /* Should these be inline? */ 
 254 dlil_read_begin(void) 
 256         unsigned long new_value
; 
 257         unsigned long old_value
; 
 258         struct uthread 
*uth 
= get_bsdthread_info(current_thread()); 
 260         if (uth
->dlil_incremented_read 
== dlil_writer_waiting
) 
 261                 panic("dlil_read_begin - thread is already a writer"); 
 265                 old_value 
= dlil_read_count
; 
 267                 if ((old_value 
& dlil_writer_waiting
) != 0 && uth
->dlil_incremented_read 
== 0) 
 269                         tsleep(&dlil_read_count
, PRIBIO
, "dlil_read_count", 1); 
 273                 new_value 
= old_value 
+ 1; 
 274         } while (!OSCompareAndSwap((UInt32
)old_value
, (UInt32
)new_value
, (UInt32
*)&dlil_read_count
)); 
 276         uth
->dlil_incremented_read
++; 
 282         struct uthread 
*uth 
= get_bsdthread_info(current_thread()); 
 284         OSDecrementAtomic((UInt32
*)&dlil_read_count
); 
 285         uth
->dlil_incremented_read
--; 
 286         if (dlil_read_count 
== dlil_writer_waiting
) 
 287                 wakeup(_cast_non_const(&dlil_writer_waiting
)); 
 291 dlil_write_begin(void) 
 293         struct uthread 
*uth 
= get_bsdthread_info(current_thread()); 
 295         if (uth
->dlil_incremented_read 
!= 0) { 
 298         lck_mtx_lock(dlil_mutex
); 
 299         OSBitOrAtomic((UInt32
)dlil_writer_waiting
, (UInt32
*)&dlil_read_count
); 
 301         if (dlil_read_count 
== dlil_writer_waiting
) { 
 302                 uth
->dlil_incremented_read 
= dlil_writer_waiting
; 
 306                 tsleep(_cast_non_const(&dlil_writer_waiting
), PRIBIO
, "dlil_writer_waiting", 1); 
 314         struct uthread 
*uth 
= get_bsdthread_info(current_thread()); 
 316         if (uth
->dlil_incremented_read 
!= dlil_writer_waiting
) 
 317                 panic("dlil_write_end - thread is not a writer"); 
 318         OSBitAndAtomic((UInt32
)~dlil_writer_waiting
, (UInt32
*)&dlil_read_count
); 
 319         lck_mtx_unlock(dlil_mutex
); 
 320         uth
->dlil_incremented_read 
= 0; 
 321         wakeup(&dlil_read_count
); 
 324 #define PROTO_HASH_SLOTS        0x5 
 327  * Internal functions. 
 331 proto_hash_value(u_long protocol_family
) 
 333         switch(protocol_family
) { 
 348 struct if_family_str 
*find_family_module(u_long if_family
) 
 350     struct if_family_str  
*mod 
= NULL
; 
 352     TAILQ_FOREACH(mod
, &if_family_head
, if_fam_next
) { 
 353         if (mod
->if_family 
== (if_family 
& 0xffff))  
 361 struct proto_family_str
* 
 362 find_proto_module(u_long proto_family
, u_long if_family
) 
 364         struct proto_family_str  
*mod 
= NULL
; 
 366         TAILQ_FOREACH(mod
, &proto_family_head
, proto_fam_next
) { 
 367                 if ((mod
->proto_family 
== (proto_family 
& 0xffff))  
 368                         && (mod
->if_family 
== (if_family 
& 0xffff)))  
 375 static struct if_proto
* 
 376 find_attached_proto(struct ifnet 
*ifp
, u_long protocol_family
) 
 378         struct if_proto 
*proto 
= NULL
; 
 379         u_long i 
= proto_hash_value(protocol_family
); 
 380         if (ifp
->if_proto_hash
) { 
 381                 proto 
= SLIST_FIRST(&ifp
->if_proto_hash
[i
]); 
 384         while(proto 
&& proto
->protocol_family 
!= protocol_family
) { 
 385                 proto 
= SLIST_NEXT(proto
, next_hash
); 
 392 if_proto_ref(struct if_proto 
*proto
) 
 394         OSAddAtomic(1, (UInt32
*)&proto
->refcount
); 
 398 if_proto_free(struct if_proto 
*proto
) 
 400         int oldval 
= OSAddAtomic(-1, (UInt32
*)&proto
->refcount
); 
 402         if (oldval 
== 1) { /* This was the last reference */ 
 403                 FREE(proto
, M_IFADDR
); 
 407 __private_extern__ 
void 
 409         __unused 
struct ifnet 
*ifp
, 
 414          * Not implemented for rw locks. 
 416          * Function exists so when/if we use mutex we can 
 420         lck_mtx_assert(ifp
->if_lock
, what
); 
 424 __private_extern__ 
void 
 429         lck_rw_lock_shared(ifp
->if_lock
); 
 431         lck_mtx_assert(ifp
->if_lock
, LCK_MTX_ASSERT_NOTOWNED
); 
 432         lck_mtx_lock(ifp
->if_lock
); 
 436 __private_extern__ 
void 
 437 ifnet_lock_exclusive( 
 441         lck_rw_lock_exclusive(ifp
->if_lock
); 
 443         lck_mtx_assert(ifp
->if_lock
, LCK_MTX_ASSERT_NOTOWNED
); 
 444         lck_mtx_lock(ifp
->if_lock
); 
 448 __private_extern__ 
void 
 453         lck_rw_done(ifp
->if_lock
); 
 455         lck_mtx_assert(ifp
->if_lock
, LCK_MTX_ASSERT_OWNED
); 
 456         lck_mtx_unlock(ifp
->if_lock
); 
 460 __private_extern__ 
void 
 461 ifnet_head_lock_shared() 
 463         lck_rw_lock_shared(ifnet_head_mutex
); 
 466 __private_extern__ 
void 
 467 ifnet_head_lock_exclusive() 
 469         lck_rw_lock_exclusive(ifnet_head_mutex
); 
 472 __private_extern__ 
void 
 475         lck_rw_done(ifnet_head_mutex
); 
 481 struct ifnet 
*ifbyfamily(u_long family
, short unit
) 
 485         ifnet_head_lock_shared(); 
 486         TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) 
 487                 if ((family 
== ifp
->if_family
) && (ifp
->if_unit 
== unit
)) 
 494 static int dlil_ifp_proto_count(struct ifnet 
* ifp
)  
 499         if (ifp
->if_proto_hash 
!= NULL
) {        
 500                 for (i 
= 0; i 
< PROTO_HASH_SLOTS
; i
++) { 
 501                         struct if_proto 
*proto
; 
 502                         SLIST_FOREACH(proto
, &ifp
->if_proto_hash
[i
], next_hash
) { 
 511 __private_extern__ 
void 
 512 dlil_post_msg(struct ifnet 
*ifp
, u_long event_subclass
, u_long event_code
,  
 513                    struct net_event_data 
*event_data
, u_long event_data_len
)  
 515         struct net_event_data   ev_data
; 
 516         struct kev_msg                  ev_msg
; 
 519          * a net event always start with a net_event_data structure 
 520          * but the caller can generate a simple net event or 
 521          * provide a longer event structure to post 
 524         ev_msg
.vendor_code    
= KEV_VENDOR_APPLE
; 
 525         ev_msg
.kev_class      
= KEV_NETWORK_CLASS
; 
 526         ev_msg
.kev_subclass   
= event_subclass
; 
 527         ev_msg
.event_code         
= event_code
;     
 529         if (event_data 
== 0) { 
 530                 event_data 
= &ev_data
; 
 531                 event_data_len 
= sizeof(struct net_event_data
); 
 534         strncpy(&event_data
->if_name
[0], ifp
->if_name
, IFNAMSIZ
); 
 535         event_data
->if_family 
= ifp
->if_family
; 
 536         event_data
->if_unit   
= (unsigned long) ifp
->if_unit
; 
 538         ev_msg
.dv
[0].data_length 
= event_data_len
; 
 539         ev_msg
.dv
[0].data_ptr    
= event_data
;   
 540         ev_msg
.dv
[1].data_length 
= 0; 
 542         dlil_event_internal(ifp
, &ev_msg
); 
 545 void dlil_init(void); 
 549         lck_grp_attr_t  
*grp_attributes 
= 0; 
 550         lck_attr_t              
*lck_attributes 
= 0; 
 551         lck_grp_t               
*input_lock_grp 
= 0; 
 553         TAILQ_INIT(&dlil_ifnet_head
); 
 554         TAILQ_INIT(&if_family_head
); 
 555         TAILQ_INIT(&proto_family_head
); 
 556         TAILQ_INIT(&ifnet_head
); 
 558         /* Setup the lock groups we will use */ 
 559         grp_attributes 
= lck_grp_attr_alloc_init(); 
 560         lck_grp_attr_setdefault(grp_attributes
); 
 562         dlil_lock_group 
= lck_grp_alloc_init("dlil internal locks", grp_attributes
); 
 564         ifnet_lock_group 
= lck_grp_alloc_init("ifnet locks", grp_attributes
); 
 566         ifnet_lock_group 
= lck_grp_alloc_init("ifnet locks", grp_attributes
); 
 568         ifnet_head_lock_group 
= lck_grp_alloc_init("ifnet head lock", grp_attributes
); 
 569         input_lock_grp 
= lck_grp_alloc_init("dlil input lock", grp_attributes
); 
 570         lck_grp_attr_free(grp_attributes
); 
 573         /* Setup the lock attributes we will use */ 
 574         lck_attributes 
= lck_attr_alloc_init(); 
 575         lck_attr_setdefault(lck_attributes
); 
 577         ifnet_lock_attr 
= lck_attr_alloc_init(); 
 578         lck_attr_setdefault(ifnet_lock_attr
); 
 580         dlil_input_lock 
= lck_spin_alloc_init(input_lock_grp
, lck_attributes
); 
 583         ifnet_head_mutex 
= lck_rw_alloc_init(ifnet_head_lock_group
, lck_attributes
); 
 584         proto_family_mutex 
= lck_mtx_alloc_init(dlil_lock_group
, lck_attributes
); 
 585         dlil_ifnet_mutex 
= lck_mtx_alloc_init(dlil_lock_group
, lck_attributes
); 
 586         dlil_mutex 
= lck_mtx_alloc_init(dlil_lock_group
, lck_attributes
); 
 588         lck_attr_free(lck_attributes
); 
 592          * Start up the dlil input thread once everything is initialized 
 594         (void) kernel_thread(kernel_task
, dlil_input_thread
); 
 595         (void) kernel_thread(kernel_task
, dlil_call_delayed_detach_thread
); 
 601         const struct iff_filter 
*if_filter
, 
 602         interface_filter_t              
*filter_ref
) 
 605     struct ifnet_filter 
*filter
; 
 607         MALLOC(filter
, struct ifnet_filter 
*, sizeof(*filter
), M_NKE
, M_WAITOK
); 
 610         bzero(filter
, sizeof(*filter
)); 
 613         filter
->filt_ifp 
= ifp
; 
 614         filter
->filt_cookie 
= if_filter
->iff_cookie
; 
 615         filter
->filt_name 
= if_filter
->iff_name
; 
 616         filter
->filt_protocol 
= if_filter
->iff_protocol
; 
 617         filter
->filt_input 
= if_filter
->iff_input
; 
 618         filter
->filt_output 
= if_filter
->iff_output
; 
 619         filter
->filt_event 
= if_filter
->iff_event
; 
 620         filter
->filt_ioctl 
= if_filter
->iff_ioctl
; 
 621         filter
->filt_detached 
= if_filter
->iff_detached
; 
 623         if ((retval 
= dlil_write_begin()) != 0) { 
 624                 /* Failed to acquire the write lock */ 
 628         TAILQ_INSERT_TAIL(&ifp
->if_flt_head
, filter
, filt_next
); 
 630         *filter_ref 
= filter
; 
 635 dlil_detach_filter_internal(interface_filter_t filter
, int detached
) 
 641                 interface_filter_t      entry 
= NULL
; 
 643                 /* Take the write lock */ 
 644                 retval 
= dlil_write_begin(); 
 645                 if (retval 
!= 0 && retval 
!= EDEADLK
) 
 649                  * At this point either we have the write lock (retval == 0) 
 650                  * or we couldn't get it (retval == EDEADLK) because someone 
 651                  * else up the stack is holding the read lock. It is safe to 
 652                  * read, either the read or write is held. Verify the filter 
 653                  * parameter before proceeding. 
 655                 ifnet_head_lock_shared(); 
 656                 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) { 
 657                         TAILQ_FOREACH(entry
, &ifp
->if_flt_head
, filt_next
) { 
 666                 if (entry 
!= filter
) { 
 667                         /* filter parameter is not a valid filter ref */ 
 674                 if (retval 
== EDEADLK
) { 
 675                         /* Perform a delayed detach */ 
 676                         filter
->filt_detaching 
= 1; 
 677                         dlil_detach_waiting 
= 1; 
 678                         wakeup(&dlil_detach_waiting
); 
 682                 /* Remove the filter from the list */ 
 683                 TAILQ_REMOVE(&ifp
->if_flt_head
, filter
, filt_next
); 
 687         /* Call the detached funciton if there is one */ 
 688         if (filter
->filt_detached
) 
 689                 filter
->filt_detached(filter
->filt_cookie
, filter
->filt_ifp
); 
 691         /* Free the filter */ 
 698 dlil_detach_filter(interface_filter_t filter
) 
 702         dlil_detach_filter_internal(filter
, 0); 
 706 dlil_input_thread_continue( 
 708         __unused wait_result_t  wait
) 
 711                 struct mbuf 
*m
, *m_loop
; 
 713                 lck_spin_lock(dlil_input_lock
); 
 714                 m 
= dlil_input_mbuf_head
; 
 715                 dlil_input_mbuf_head 
= NULL
; 
 716                 dlil_input_mbuf_tail 
= NULL
; 
 717                 m_loop 
= dlil_input_loop_head
; 
 718                 dlil_input_loop_head 
= NULL
; 
 719                 dlil_input_loop_tail 
= NULL
; 
 720                 lck_spin_unlock(dlil_input_lock
); 
 723                 * NOTE warning %%% attention !!!! 
 724                 * We should think about putting some thread starvation safeguards if  
 725                 * we deal with long chains of packets. 
 728                         struct mbuf 
*m0 
= m
->m_nextpkt
; 
 729                         void *header 
= m
->m_pkthdr
.header
; 
 732                         m
->m_pkthdr
.header 
= NULL
; 
 733                         (void) dlil_input_packet(m
->m_pkthdr
.rcvif
, m
, header
); 
 738                         struct mbuf 
*m0 
= m
->m_nextpkt
; 
 739                         void *header 
= m
->m_pkthdr
.header
; 
 740                         struct ifnet 
*ifp 
= &loif
[0]; 
 743                         m
->m_pkthdr
.header 
= NULL
; 
 744                         (void) dlil_input_packet(ifp
, m
, header
); 
 750                 if (dlil_input_mbuf_head 
== NULL 
&&  
 751                         dlil_input_loop_head 
== NULL 
&& inject_buckets 
== 0) { 
 752                         assert_wait(&dlil_input_thread_wakeup
, THREAD_UNINT
); 
 753                         (void) thread_block(dlil_input_thread_continue
); 
 759 void dlil_input_thread(void) 
 761         register thread_t self 
= current_thread(); 
 763         ml_thread_policy(self
, MACHINE_GROUP
, 
 764                                          (MACHINE_NETWORK_GROUP
|MACHINE_NETWORK_NETISR
)); 
 766         dlil_initialized 
= 1; 
 767         dlil_input_thread_ptr 
= current_thread(); 
 768         dlil_input_thread_continue(NULL
, THREAD_RESTART
); 
 772 dlil_input_with_stats( 
 776         const struct ifnet_stat_increment_param 
*stats
) 
 779          * Because of loopbacked multicast we cannot stuff the ifp in 
 780          * the rcvif of the packet header: loopback has its own dlil 
 784         lck_spin_lock(dlil_input_lock
); 
 785         if (ifp
->if_type 
!= IFT_LOOP
) { 
 786                 if (dlil_input_mbuf_head 
== NULL
) 
 787                         dlil_input_mbuf_head 
= m_head
; 
 788                 else if (dlil_input_mbuf_tail 
!= NULL
) 
 789                         dlil_input_mbuf_tail
->m_nextpkt 
= m_head
; 
 790                 dlil_input_mbuf_tail 
= m_tail 
? m_tail 
: m_head
; 
 792                 if (dlil_input_loop_head 
== NULL
) 
 793                         dlil_input_loop_head 
= m_head
; 
 794                 else if (dlil_input_loop_tail 
!= NULL
) 
 795                         dlil_input_loop_tail
->m_nextpkt 
= m_head
; 
 796                 dlil_input_loop_tail 
= m_tail 
? m_tail 
: m_head
; 
 799                 ifp
->if_data
.ifi_ipackets 
+= stats
->packets_in
; 
 800                 ifp
->if_data
.ifi_ibytes 
+= stats
->bytes_in
; 
 801                 ifp
->if_data
.ifi_ierrors 
+= stats
->errors_in
; 
 803                 ifp
->if_data
.ifi_opackets 
+= stats
->packets_out
; 
 804                 ifp
->if_data
.ifi_obytes 
+= stats
->bytes_out
; 
 805                 ifp
->if_data
.ifi_oerrors 
+= stats
->errors_out
; 
 807                 ifp
->if_data
.ifi_collisions 
+= stats
->collisions
; 
 808                 ifp
->if_data
.ifi_iqdrops 
+= stats
->dropped
; 
 810         lck_spin_unlock(dlil_input_lock
); 
 812         wakeup((caddr_t
)&dlil_input_thread_wakeup
); 
 818 dlil_input(struct ifnet  
*ifp
, struct mbuf 
*m_head
, struct mbuf 
*m_tail
) 
 820         return dlil_input_with_stats(ifp
, m_head
, m_tail
, NULL
); 
 824 dlil_input_packet(struct ifnet  
*ifp
, struct mbuf 
*m
, 
 828     struct if_proto              
*ifproto 
= 0; 
 829     protocol_family_t   protocol_family
; 
 830     struct ifnet_filter 
*filter
; 
 833     KERNEL_DEBUG(DBG_FNC_DLIL_INPUT 
| DBG_FUNC_START
,0,0,0,0,0); 
 836          * Lock the interface while we run through 
 837          * the filters and the demux. This lock 
 838          * protects the filter list and the demux list. 
 843          * Call family demux module. If the demux module finds a match 
 844          * for the frame it will fill-in the ifproto pointer. 
 847         retval 
= ifp
->if_demux(ifp
, m
, frame_header
, &protocol_family
); 
 850         if (retval 
== EJUSTRETURN
) { 
 856         if (m
->m_flags 
& (M_BCAST
|M_MCAST
)) 
 860          * Run interface filters 
 863         /* Do not pass VLAN tagged packets to filters PR-3586856 */ 
 864         if ((m
->m_pkthdr
.csum_flags 
& CSUM_VLAN_TAG_VALID
) == 0) { 
 865                 TAILQ_FOREACH(filter
, &ifp
->if_flt_head
, filt_next
) { 
 867                         if (filter
->filt_input 
&& (filter
->filt_protocol 
== 0 || 
 868                                  filter
->filt_protocol 
== protocol_family
)) { 
 869                                 filter_result 
= filter
->filt_input(filter
->filt_cookie
, ifp
, protocol_family
, &m
, &frame_header
); 
 873                                         if (filter_result 
== EJUSTRETURN
) { 
 880                                         return filter_result
; 
 886         /* Demux is done, interface filters have been processed, unlock the mutex */ 
 887         if (retval 
|| ((m
->m_flags 
& M_PROMISC
) != 0) ) { 
 889                 if (retval 
!= EJUSTRETURN
) { 
 897         ifproto 
= find_attached_proto(ifp
, protocol_family
); 
 901                 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n"); 
 907          * Hand the packet off to the protocol. 
 910         if (ifproto
->dl_domain 
&& (ifproto
->dl_domain
->dom_flags 
& DOM_REENTRANT
) == 0) { 
 911                 lck_mtx_lock(ifproto
->dl_domain
->dom_mtx
); 
 914         if (ifproto
->proto_kpi 
== kProtoKPI_DLIL
) 
 915                 retval 
= (*ifproto
->kpi
.dlil
.dl_input
)(m
, frame_header
,  
 916                                           ifp
, ifproto
->protocol_family
,  
 919                 retval 
= ifproto
->kpi
.v1
.input(ifp
, ifproto
->protocol_family
, m
, frame_header
); 
 921         if (ifproto
->dl_domain 
&& (ifproto
->dl_domain
->dom_flags 
& DOM_REENTRANT
) == 0) { 
 922                 lck_mtx_unlock(ifproto
->dl_domain
->dom_mtx
); 
 927         if (retval 
== EJUSTRETURN
) 
 933         KERNEL_DEBUG(DBG_FNC_DLIL_INPUT 
| DBG_FUNC_END
,0,0,0,0,0); 
 938 dlil_event_internal(struct ifnet 
*ifp
, struct kev_msg 
*event
) 
 940         struct ifnet_filter 
*filter
; 
 942         if (ifp_use(ifp
, kIfNetUseCount_MustNotBeZero
) == 0) { 
 945                 /* Pass the event to the interface filters */ 
 946                 TAILQ_FOREACH(filter
, &ifp
->if_flt_head
, filt_next
) { 
 947                         if (filter
->filt_event
) 
 948                                 filter
->filt_event(filter
->filt_cookie
, ifp
, filter
->filt_protocol
, event
); 
 951                 if (ifp
->if_proto_hash
) { 
 954                         for (i 
= 0; i 
< PROTO_HASH_SLOTS
; i
++) { 
 955                                 struct if_proto 
*proto
; 
 957                                 SLIST_FOREACH(proto
, &ifp
->if_proto_hash
[i
], next_hash
) { 
 958                                         /* Pass the event to the protocol */ 
 959                                         if (proto
->proto_kpi 
== kProtoKPI_DLIL
) { 
 960                                                 if (proto
->kpi
.dlil
.dl_event
) 
 961                                                         proto
->kpi
.dlil
.dl_event(ifp
, event
); 
 964                                                 if (proto
->kpi
.v1
.event
) 
 965                                                         proto
->kpi
.v1
.event(ifp
, proto
->protocol_family
, event
); 
 973                 /* Pass the event to the interface */ 
 975                         ifp
->if_event(ifp
, event
); 
 978                         ifp_use_reached_zero(ifp
); 
 981         return kev_post_msg(event
); 
 985 dlil_event(struct ifnet 
*ifp
, struct kern_event_msg 
*event
) 
 989         struct kev_msg               kev_msg
; 
 991         kev_msg
.vendor_code    
= event
->vendor_code
; 
 992         kev_msg
.kev_class      
= event
->kev_class
; 
 993         kev_msg
.kev_subclass   
= event
->kev_subclass
; 
 994         kev_msg
.event_code     
= event
->event_code
; 
 995         kev_msg
.dv
[0].data_ptr 
= &event
->event_data
[0]; 
 996         kev_msg
.dv
[0].data_length 
= event
->total_size 
- KEV_MSG_HEADER_SIZE
; 
 997         kev_msg
.dv
[1].data_length 
= 0; 
1000         result 
= dlil_event_internal(ifp
, &kev_msg
); 
1009         u_long proto_family
, 
1010         struct mbuf             
*packetlist
, 
1012         const struct sockaddr   
*dest
, 
1015         char                    *frame_type 
= 0; 
1016         char                    *dst_linkaddr 
= 0; 
1017         int                     error
, retval 
= 0; 
1018         char                    frame_type_buffer
[MAX_FRAME_TYPE_SIZE 
* 4]; 
1019         char                    dst_linkaddr_buffer
[MAX_LINKADDR 
* 4]; 
1020         struct ifnet_filter 
*filter
; 
1021         struct if_proto 
*proto 
= 0; 
1024         KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT 
| DBG_FUNC_START
,0,0,0,0,0); 
1026         if ((raw 
!= 0) || proto_family 
!= PF_INET 
|| do_brige
) { 
1028         if ((raw 
!= 0) || proto_family 
!= PF_INET
) { 
1030                 while (packetlist
) { 
1032                         packetlist 
= packetlist
->m_nextpkt
; 
1033                         m
->m_nextpkt 
= NULL
; 
1034                         error 
= dlil_output(ifp
, proto_family
, m
, route
, dest
, raw
); 
1037                                         m_freem_list(packetlist
); 
1046         frame_type         
= frame_type_buffer
; 
1047         dst_linkaddr   
= dst_linkaddr_buffer
; 
1049         packetlist 
= packetlist
->m_nextpkt
; 
1050         m
->m_nextpkt 
= NULL
; 
1052         proto 
= find_attached_proto(ifp
, proto_family
); 
1053         if (proto 
== NULL
) { 
1059         if (proto
->proto_kpi 
== kProtoKPI_DLIL
) { 
1060                 if (proto
->kpi
.dlil
.dl_pre_output
) 
1061                 retval 
= proto
->kpi
.dlil
.dl_pre_output(ifp
, proto_family
, &m
, dest
, route
, frame_type
, dst_linkaddr
); 
1064                 if (proto
->kpi
.v1
.pre_output
) 
1065                 retval 
= proto
->kpi
.v1
.pre_output(ifp
, proto_family
, &m
, dest
, route
, frame_type
, dst_linkaddr
); 
1069                 if (retval 
!= EJUSTRETURN
)  { 
1078                 if (ifp
->if_framer
) { 
1079                         retval 
= ifp
->if_framer(ifp
, &m
, dest
, dst_linkaddr
, frame_type
);  
1081                                 if (retval 
!= EJUSTRETURN
) { 
1089                  * Let interface filters (if any) do their thing ... 
1091                 /* Do not pass VLAN tagged packets to filters PR-3586856 */ 
1092                 if ((m
->m_pkthdr
.csum_flags 
& CSUM_VLAN_TAG_VALID
) == 0) { 
1093                         TAILQ_FOREACH(filter
, &ifp
->if_flt_head
, filt_next
) { 
1094                                 if ((filter
->filt_protocol 
== 0 || (filter
->filt_protocol 
== proto_family
)) && 
1095                                         filter
->filt_output
) { 
1096                                         retval 
= filter
->filt_output(filter
->filt_cookie
, ifp
, proto_family
, &m
); 
1098                                                 if (retval 
== EJUSTRETURN
) 
1109                 * Finally, call the driver. 
1112                 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT 
| DBG_FUNC_START
, 0,0,0,0,0); 
1113                 retval 
= ifp
->if_output(ifp
, m
); 
1115                         printf("dlil_output_list: output error retval = %x\n", retval
); 
1118                 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT 
| DBG_FUNC_END
, 0,0,0,0,0); 
1122                         packetlist 
= packetlist
->m_nextpkt
; 
1123                         m
->m_nextpkt 
= NULL
; 
1128         KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT 
| DBG_FUNC_END
,0,0,0,0,0); 
1132         if (packetlist
) /* if any packet left, clean up */ 
1133                 m_freem_list(packetlist
); 
1134         if (retval 
== EJUSTRETURN
) 
1142  * Caller should have a lock on the protocol domain if the protocol 
1143  * doesn't support finer grained locking. In most cases, the lock 
1144  * will be held from the socket layer and won't be released until 
1145  * we return back to the socket layer. 
1147  * This does mean that we must take a protocol lock before we take 
1148  * an interface lock if we're going to take both. This makes sense 
1149  * because a protocol is likely to interact with an ifp while it 
1150  * is under the protocol lock. 
1155         u_long proto_family
, 
1158         const struct sockaddr   
*dest
, 
1161         char                    *frame_type 
= 0; 
1162         char                    *dst_linkaddr 
= 0; 
1164         char                    frame_type_buffer
[MAX_FRAME_TYPE_SIZE 
* 4]; 
1165         char                    dst_linkaddr_buffer
[MAX_LINKADDR 
* 4]; 
1166         struct ifnet_filter 
*filter
; 
1168         KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT 
| DBG_FUNC_START
,0,0,0,0,0); 
1172         frame_type         
= frame_type_buffer
; 
1173         dst_linkaddr   
= dst_linkaddr_buffer
; 
1176                 struct if_proto 
*proto 
= 0; 
1178                 proto 
= find_attached_proto(ifp
, proto_family
); 
1179                 if (proto 
== NULL
) { 
1186                 if (proto
->proto_kpi 
== kProtoKPI_DLIL
) { 
1187                         if (proto
->kpi
.dlil
.dl_pre_output
) 
1188                                 retval 
= proto
->kpi
.dlil
.dl_pre_output(ifp
, proto_family
, &m
, dest
, route
, frame_type
, dst_linkaddr
); 
1191                         if (proto
->kpi
.v1
.pre_output
) 
1192                                 retval 
= proto
->kpi
.v1
.pre_output(ifp
, proto_family
, &m
, dest
, route
, frame_type
, dst_linkaddr
); 
1196                         if (retval 
!= EJUSTRETURN
) { 
1204          * Call framing module  
1206         if ((raw 
== 0) && (ifp
->if_framer
)) { 
1207                 retval 
= ifp
->if_framer(ifp
, &m
, dest
, dst_linkaddr
, frame_type
);  
1209                         if (retval 
!= EJUSTRETURN
) { 
1219          * Need to consider how to handle this. 
1223                 struct mbuf 
*m0 
= m
; 
1224                 struct ether_header 
*eh 
= mtod(m
, struct ether_header 
*); 
1226                 if (m
->m_pkthdr
.rcvif
) 
1227                         m
->m_pkthdr
.rcvif 
= NULL
; 
1228                 ifp 
= bridge_dst_lookup(eh
); 
1229                 bdg_forward(&m0
, ifp
); 
1239          * Let interface filters (if any) do their thing ... 
1242         /* Do not pass VLAN tagged packets to filters PR-3586856 */ 
1243         if ((m
->m_pkthdr
.csum_flags 
& CSUM_VLAN_TAG_VALID
) == 0) { 
1244                 TAILQ_FOREACH(filter
, &ifp
->if_flt_head
, filt_next
) { 
1245                         if ((filter
->filt_protocol 
== 0 || (filter
->filt_protocol 
== proto_family
)) && 
1246                                 filter
->filt_output
) { 
1247                                 retval 
= filter
->filt_output(filter
->filt_cookie
, ifp
, proto_family
, &m
); 
1249                                         if (retval 
!= EJUSTRETURN
) 
1258         * Finally, call the driver. 
1261         KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT 
| DBG_FUNC_START
, 0,0,0,0,0); 
1262         retval 
= ifp
->if_output(ifp
, m
); 
1263         KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT 
| DBG_FUNC_END
, 0,0,0,0,0); 
1265         KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT 
| DBG_FUNC_END
,0,0,0,0,0); 
1269         if (retval 
== EJUSTRETURN
) 
1275 dlil_ioctl(u_long       proto_fam
, 
1280         struct ifnet_filter             
*filter
; 
1281         int                                             retval 
= EOPNOTSUPP
; 
1283         struct if_family_str    
*if_family
; 
1284         int                                             holding_read 
= 0; 
1286         /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */ 
1287         result 
= ifp_use(ifp
, kIfNetUseCount_MustNotBeZero
); 
1294         /* Run the interface filters first. 
1295          * We want to run all filters before calling the protocol, 
1296          * interface family, or interface. 
1298         TAILQ_FOREACH(filter
, &ifp
->if_flt_head
, filt_next
) { 
1299                 if ((filter
->filt_protocol 
== 0 || (filter
->filt_protocol 
== proto_fam
)) && 
1300                         filter
->filt_ioctl 
!= NULL
) { 
1301                         result 
= filter
->filt_ioctl(filter
->filt_cookie
, ifp
, proto_fam
, ioctl_code
, ioctl_arg
); 
1302                         /* Only update retval if no one has handled the ioctl */ 
1303                         if (retval 
== EOPNOTSUPP 
|| result 
== EJUSTRETURN
) { 
1304                                 if (result 
== ENOTSUP
) 
1305                                         result 
= EOPNOTSUPP
; 
1307                                 if (retval 
&& retval 
!= EOPNOTSUPP
) { 
1314         /* Allow the protocol to handle the ioctl */ 
1316                 struct if_proto 
*proto 
= find_attached_proto(ifp
, proto_fam
); 
1319                         result 
= EOPNOTSUPP
; 
1320                         if (proto
->proto_kpi 
== kProtoKPI_DLIL
) { 
1321                                 if (proto
->kpi
.dlil
.dl_ioctl
) 
1322                                         result 
= proto
->kpi
.dlil
.dl_ioctl(proto_fam
, ifp
, ioctl_code
, ioctl_arg
); 
1325                                 if (proto
->kpi
.v1
.ioctl
) 
1326                                         result 
= proto
->kpi
.v1
.ioctl(ifp
, proto_fam
, ioctl_code
, ioctl_arg
); 
1329                         /* Only update retval if no one has handled the ioctl */ 
1330                         if (retval 
== EOPNOTSUPP 
|| result 
== EJUSTRETURN
) { 
1331                                 if (result 
== ENOTSUP
) 
1332                                         result 
= EOPNOTSUPP
; 
1334                                 if (retval 
&& retval 
!= EOPNOTSUPP
) { 
1342          * Since we have incremented the use count on the ifp, we are guaranteed 
1343          * that the ifp will not go away (the function pointers may not be changed). 
1344          * We release the dlil read lock so the interface ioctl may trigger a 
1345          * protocol attach. This happens with vlan and may occur with other virtual 
1351         /* retval is either 0 or EOPNOTSUPP */ 
1354          * Let the family handle this ioctl. 
1355          * If it returns something non-zero and not EOPNOTSUPP, we're done. 
1356          * If it returns zero, the ioctl was handled, so set retval to zero. 
1358         if_family 
= find_family_module(ifp
->if_family
); 
1359         if ((if_family
) && (if_family
->ifmod_ioctl
)) { 
1360                 result 
= (*if_family
->ifmod_ioctl
)(ifp
, ioctl_code
, ioctl_arg
); 
1362                 /* Only update retval if no one has handled the ioctl */ 
1363                 if (retval 
== EOPNOTSUPP 
|| result 
== EJUSTRETURN
) { 
1364                         if (result 
== ENOTSUP
) 
1365                                 result 
= EOPNOTSUPP
; 
1367                         if (retval 
&& retval 
!= EOPNOTSUPP
) { 
1374          * Let the interface handle this ioctl. 
1375          * If it returns EOPNOTSUPP, ignore that, we may have 
1376          * already handled this in the protocol or family. 
1379                 result 
= (*ifp
->if_ioctl
)(ifp
, ioctl_code
, ioctl_arg
); 
1381         /* Only update retval if no one has handled the ioctl */ 
1382         if (retval 
== EOPNOTSUPP 
|| result 
== EJUSTRETURN
) { 
1383                 if (result 
== ENOTSUP
) 
1384                         result 
= EOPNOTSUPP
; 
1386                 if (retval 
&& retval 
!= EOPNOTSUPP
) { 
1395                 ifp_use_reached_zero(ifp
); 
1397         if (retval 
== EJUSTRETURN
) 
1402 __private_extern__ errno_t
 
1406         bpf_packet_func callback
) 
1411         if (ifp
->if_set_bpf_tap
) 
1412                 error 
= ifp
->if_set_bpf_tap(ifp
, mode
, callback
); 
1418 __private_extern__ errno_t
 
1421         const struct sockaddr 
*proto_addr
, 
1422         struct sockaddr 
*ll_addr
, 
1425         errno_t result 
= EOPNOTSUPP
; 
1426         struct if_proto 
*proto
; 
1427         const struct sockaddr 
*verify
; 
1431         bzero(ll_addr
, ll_len
); 
1433         /* Call the protocol first */ 
1434         proto 
= find_attached_proto(ifp
, proto_addr
->sa_family
); 
1435         if (proto 
!= NULL 
&& proto
->proto_kpi 
!= kProtoKPI_DLIL 
&& 
1436                 proto
->kpi
.v1
.resolve_multi 
!= NULL
) { 
1437                 result 
= proto
->kpi
.v1
.resolve_multi(ifp
, proto_addr
, 
1438                                                                                 (struct sockaddr_dl
*)ll_addr
, ll_len
); 
1441         /* Let the interface verify the multicast address */ 
1442         if ((result 
== EOPNOTSUPP 
|| result 
== 0) && ifp
->if_check_multi
) { 
1446                         verify 
= proto_addr
; 
1447                 result 
= ifp
->if_check_multi(ifp
, verify
); 
1455 __private_extern__ errno_t
 
1456 dlil_send_arp_internal( 
1459         const struct sockaddr_dl
* sender_hw
, 
1460         const struct sockaddr
* sender_proto
, 
1461         const struct sockaddr_dl
* target_hw
, 
1462         const struct sockaddr
* target_proto
) 
1464         struct if_proto 
*proto
; 
1469         proto 
= find_attached_proto(ifp
, target_proto
->sa_family
); 
1470         if (proto 
== NULL 
|| proto
->proto_kpi 
== kProtoKPI_DLIL 
|| 
1471                 proto
->kpi
.v1
.send_arp 
== NULL
) { 
1475                 result 
= proto
->kpi
.v1
.send_arp(ifp
, arpop
, sender_hw
, sender_proto
, 
1476                                                                                 target_hw
, target_proto
); 
1484 __private_extern__ errno_t
 
1488         const struct sockaddr_dl
* sender_hw
, 
1489         const struct sockaddr
* sender_proto
, 
1490         const struct sockaddr_dl
* target_hw
, 
1491         const struct sockaddr
* target_proto
) 
1495         if (target_proto 
== NULL 
|| (sender_proto 
&& 
1496                 sender_proto
->sa_family 
!= target_proto
->sa_family
)) 
1500          * If this is an ARP request and the target IP is IPv4LL, 
1501          * send the request on all interfaces. 
1503         if (IN_LINKLOCAL(((const struct sockaddr_in
*)target_proto
)->sin_addr
.s_addr
) 
1504                  && ipv4_ll_arp_aware 
!= 0 && target_proto
->sa_family 
== AF_INET 
&& 
1505                 arpop 
== ARPOP_REQUEST
) { 
1512                 if (ifnet_list_get(IFNET_FAMILY_ANY
, &ifp_list
, &count
) == 0) { 
1513                         for (ifp_on 
= 0; ifp_on 
< count
; ifp_on
++) { 
1515                                 ifaddr_t                        source_hw 
= NULL
; 
1516                                 ifaddr_t                        source_ip 
= NULL
; 
1517                                 struct sockaddr_in      source_ip_copy
; 
1520                                  * Only arp on interfaces marked for IPv4LL ARPing. This may 
1521                                  * mean that we don't ARP on the interface the subnet route 
1524                                 if ((ifp_list
[ifp_on
]->if_eflags 
& IFEF_ARPLL
) == 0) { 
1528                                 source_hw 
= TAILQ_FIRST(&ifp_list
[ifp_on
]->if_addrhead
); 
1530                                 /* Find the source IP address */ 
1531                                 ifnet_lock_shared(ifp_list
[ifp_on
]); 
1532                                 TAILQ_FOREACH(source_ip
, &ifp_list
[ifp_on
]->if_addrhead
, 
1534                                         if (source_ip
->ifa_addr 
&& 
1535                                                 source_ip
->ifa_addr
->sa_family 
== AF_INET
) { 
1540                                 /* No IP Source, don't arp */ 
1541                                 if (source_ip 
== NULL
) { 
1542                                         ifnet_lock_done(ifp_list
[ifp_on
]); 
1546                                 /* Copy the source IP address */ 
1547                                 source_ip_copy 
= *(struct sockaddr_in
*)source_ip
->ifa_addr
; 
1549                                 ifnet_lock_done(ifp_list
[ifp_on
]); 
1552                                 new_result 
= dlil_send_arp_internal(ifp_list
[ifp_on
], arpop
, 
1553                                                                         (struct sockaddr_dl
*)source_hw
->ifa_addr
, 
1554                                                                         (struct sockaddr
*)&source_ip_copy
, NULL
, 
1557                                 if (result 
== ENOTSUP
) { 
1558                                         result 
= new_result
; 
1563                 ifnet_list_free(ifp_list
); 
1566                 result 
= dlil_send_arp_internal(ifp
, arpop
, sender_hw
, sender_proto
, 
1567                                                                                 target_hw
, target_proto
); 
1582                 old_value 
= ifp
->if_usecnt
; 
1583                 if (old_value 
== 0 && handle_zero 
== kIfNetUseCount_MustNotBeZero
) { 
1584                         retval 
= ENXIO
; // ifp is invalid 
1587         } while (!OSCompareAndSwap((UInt32
)old_value
, (UInt32
)old_value 
+ 1, (UInt32
*)&ifp
->if_usecnt
)); 
1592 /* ifp_unuse is broken into two pieces. 
1594  * ifp_use and ifp_unuse must be called between when the caller calls 
1595  * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some 
1596  * operations after dlil_write_end has been called. For this reason, 
1597  * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse 
1598  * returns a non-zero value. The caller must call ifp_use_reached_zero 
1599  * after the caller has called dlil_write_end. 
1602 ifp_use_reached_zero( 
1605         struct if_family_str 
*if_family
; 
1606         ifnet_detached_func     free_func
; 
1610         if (ifp
->if_usecnt 
!= 0) 
1611                 panic("ifp_use_reached_zero: ifp->if_usecnt != 0"); 
1613         /* Let BPF know we're detaching */ 
1616         ifnet_head_lock_exclusive(); 
1617         ifnet_lock_exclusive(ifp
); 
1619         /* Remove ourselves from the list */ 
1620         TAILQ_REMOVE(&ifnet_head
, ifp
, if_link
); 
1621         ifnet_addrs
[ifp
->if_index 
- 1] = 0; 
1623         /* ifp should be removed from the interface list */ 
1624         while (ifp
->if_multiaddrs
.lh_first
) { 
1625                 struct ifmultiaddr 
*ifma 
= ifp
->if_multiaddrs
.lh_first
; 
1628                  * When the interface is gone, we will no longer 
1629                  * be listening on these multicasts. Various bits 
1630                  * of the stack may be referencing these multicasts, 
1631                  * release only our reference. 
1633                 LIST_REMOVE(ifma
, ifma_link
); 
1634                 ifma
->ifma_ifp 
= NULL
; 
1639         ifp
->if_eflags 
&= ~IFEF_DETACHING
; // clear the detaching flag 
1640         ifnet_lock_done(ifp
); 
1642         if_family 
= find_family_module(ifp
->if_family
); 
1643         if (if_family 
&& if_family
->del_if
) 
1644                 if_family
->del_if(ifp
); 
1646         if (--if_family
->if_usecnt 
== 0) { 
1647                 if (if_family
->shutdown
) 
1648                         (*if_family
->shutdown
)(); 
1650                 TAILQ_REMOVE(&if_family_head
, if_family
, if_fam_next
); 
1651                 FREE(if_family
, M_IFADDR
); 
1655         dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_IF_DETACHED
, 0, 0); 
1656         free_func 
= ifp
->if_free
; 
1668         oldval 
= OSDecrementAtomic((UInt32
*)&ifp
->if_usecnt
); 
1670                 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp
->if_name
, ifp
->if_unit
); 
1675         if ((ifp
->if_eflags 
& IFEF_DETACHING
) == 0) 
1676                 panic("ifp_unuse: use count reached zero but detching flag is not set!"); 
1678         return 1; /* caller must call ifp_use_reached_zero */ 
1686         oldval 
= OSIncrementAtomic(&ifp
->if_refcnt
); 
1694         oldval 
= OSDecrementAtomic((UInt32
*)&ifp
->if_refcnt
); 
1696                 panic("dlil_if_reference - refcount decremented past zero!"); 
1699 extern lck_mtx_t        
*domain_proto_mtx
; 
1702 dlil_attach_protocol_internal( 
1703         struct if_proto 
*proto
, 
1704         const struct ddesc_head_str 
*demux
, 
1705         const struct ifnet_demux_desc 
*demux_list
, 
1706         u_int32_t       demux_count
) 
1708         struct ddesc_head_str temp_head
; 
1709         struct kev_dl_proto_data        ev_pr_data
; 
1710         struct ifnet 
*ifp 
= proto
->ifp
; 
1712         u_long hash_value 
= proto_hash_value(proto
->protocol_family
); 
1713         int     if_using_kpi 
= (ifp
->if_eflags 
& IFEF_USEKPI
) != 0; 
1714         void* free_me 
= NULL
; 
1716     /* setup some of the common values */ 
1719                 lck_mtx_lock(domain_proto_mtx
); 
1720                 struct domain 
*dp 
= domains
; 
1721                 while (dp 
&& (protocol_family_t
)dp
->dom_family 
!= proto
->protocol_family
) 
1723                 proto
->dl_domain 
= dp
; 
1724                 lck_mtx_unlock(domain_proto_mtx
); 
1728          * Convert the demux descriptors to a type the interface 
1729          * will understand. Checking e_flags should be safe, this 
1730          * flag won't change. 
1732         if (if_using_kpi 
&& demux
) { 
1733                 /* Convert the demux linked list to a demux_list */ 
1734                 struct dlil_demux_desc  
*demux_entry
; 
1735                 struct ifnet_demux_desc 
*temp_list 
= NULL
; 
1738                 TAILQ_FOREACH(demux_entry
, demux
, next
) { 
1742                 temp_list 
= _MALLOC(sizeof(struct ifnet_demux_desc
) * i
, M_TEMP
, M_WAITOK
); 
1743                 free_me 
= temp_list
; 
1745                 if (temp_list 
== NULL
) 
1749                 TAILQ_FOREACH(demux_entry
, demux
, next
) { 
1750                         /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */ 
1751                         if (demux_entry
->type 
== 1 || 
1752                                 demux_entry
->type 
== 2 || 
1753                                 demux_entry
->type 
== 3) { 
1754                                 FREE(free_me
, M_TEMP
); 
1758                         temp_list
[i
].type 
= demux_entry
->type
; 
1759                         temp_list
[i
].data 
= demux_entry
->native_type
; 
1760                         temp_list
[i
].datalen 
= demux_entry
->variants
.native_type_length
; 
1764                 demux_list 
= temp_list
; 
1766         else if (!if_using_kpi 
&& demux_list 
!= NULL
) { 
1767                 struct dlil_demux_desc 
*demux_entry
; 
1770                 demux_entry 
= _MALLOC(sizeof(struct dlil_demux_desc
) * demux_count
, M_TEMP
, M_WAITOK
); 
1771                 free_me 
= demux_entry
; 
1772                 if (demux_entry 
== NULL
) 
1775                 TAILQ_INIT(&temp_head
); 
1777                 for (i 
= 0; i 
< demux_count
; i
++) { 
1778                         demux_entry
[i
].type 
= demux_list
[i
].type
; 
1779                         demux_entry
[i
].native_type 
= demux_list
[i
].data
; 
1780                         demux_entry
[i
].variants
.native_type_length 
= demux_list
[i
].datalen
; 
1781                         TAILQ_INSERT_TAIL(&temp_head
, &demux_entry
[i
], next
); 
1787          * Take the write lock to protect readers and exclude other writers. 
1791         /* Check that the interface isn't currently detaching */ 
1792         ifnet_lock_shared(ifp
); 
1793         if ((ifp
->if_eflags 
& IFEF_DETACHING
) != 0) { 
1794                 ifnet_lock_done(ifp
); 
1797                         FREE(free_me
, M_TEMP
); 
1800         ifnet_lock_done(ifp
); 
1802         if (find_attached_proto(ifp
, proto
->protocol_family
) != NULL
) { 
1805                         FREE(free_me
, M_TEMP
); 
1810          * Call family module add_proto routine so it can refine the 
1811          * demux descriptors as it wishes. 
1814                 retval 
= ifp
->if_add_proto_u
.kpi(ifp
, proto
->protocol_family
, demux_list
, demux_count
); 
1816                 retval 
= ifp
->if_add_proto_u
.original(ifp
, proto
->protocol_family
, 
1817                                                                                           _cast_non_const(demux
)); 
1822                         FREE(free_me
, M_TEMP
); 
1827          * We can't fail from this point on. 
1828          * Increment the number of uses (protocol attachments + interface attached). 
1830         ifp_use(ifp
, kIfNetUseCount_MustNotBeZero
); 
1833          * Insert the protocol in the hash 
1836                 struct if_proto
*        prev_proto 
= SLIST_FIRST(&ifp
->if_proto_hash
[hash_value
]); 
1837                 while (prev_proto 
&& SLIST_NEXT(prev_proto
, next_hash
) != NULL
) 
1838                         prev_proto 
= SLIST_NEXT(prev_proto
, next_hash
); 
1840                         SLIST_INSERT_AFTER(prev_proto
, proto
, next_hash
); 
1842                         SLIST_INSERT_HEAD(&ifp
->if_proto_hash
[hash_value
], proto
, next_hash
); 
1846          * Add to if_proto list for this interface 
1848         if_proto_ref(proto
); 
1849         if (proto
->proto_kpi 
== kProtoKPI_DLIL 
&& proto
->kpi
.dlil
.dl_offer
) 
1853         /* the reserved field carries the number of protocol still attached (subject to change) */ 
1854         ev_pr_data
.proto_family 
= proto
->protocol_family
; 
1855         ev_pr_data
.proto_remaining_count 
= dlil_ifp_proto_count(ifp
); 
1856         dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_PROTO_ATTACHED
,  
1857                                   (struct net_event_data 
*)&ev_pr_data
,  
1858                                   sizeof(struct kev_dl_proto_data
)); 
1860         DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto
->protocol_family
, 
1861                          ifp
->if_name
, ifp
->if_unit
, retval
); 
1863                 FREE(free_me
, M_TEMP
); 
1867 __private_extern__ 
int 
1868 dlil_attach_protocol_kpi(ifnet_t ifp
, protocol_family_t protocol
, 
1869         const struct ifnet_attach_proto_param 
*proto_details
) 
1872         struct if_proto  
*ifproto 
= NULL
; 
1874         ifproto 
= _MALLOC(sizeof(struct if_proto
), M_IFADDR
, M_WAITOK
); 
1876                 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n"); 
1880         bzero(ifproto
, sizeof(*ifproto
)); 
1883         ifproto
->protocol_family 
= protocol
; 
1884         ifproto
->proto_kpi 
= kProtoKPI_v1
; 
1885         ifproto
->kpi
.v1
.input 
= proto_details
->input
; 
1886         ifproto
->kpi
.v1
.pre_output 
= proto_details
->pre_output
; 
1887         ifproto
->kpi
.v1
.event 
= proto_details
->event
; 
1888         ifproto
->kpi
.v1
.ioctl 
= proto_details
->ioctl
; 
1889         ifproto
->kpi
.v1
.detached 
= proto_details
->detached
; 
1890         ifproto
->kpi
.v1
.resolve_multi 
= proto_details
->resolve
; 
1891         ifproto
->kpi
.v1
.send_arp 
= proto_details
->send_arp
; 
1893         retval 
= dlil_attach_protocol_internal(ifproto
, NULL
, 
1894                                 proto_details
->demux_list
, proto_details
->demux_count
); 
1897         if (retval 
&& ifproto
) 
1898                 FREE(ifproto
, M_IFADDR
); 
1903 dlil_attach_protocol(struct dlil_proto_reg_str   
*proto
) 
1905         struct ifnet     
*ifp 
= NULL
; 
1906         struct if_proto  
*ifproto 
= NULL
; 
1910          * Do everything we can before taking the write lock 
1913         if ((proto
->protocol_family 
== 0) || (proto
->interface_family 
== 0)) 
1917          * Allocate and init a new if_proto structure 
1919         ifproto 
= _MALLOC(sizeof(struct if_proto
), M_IFADDR
, M_WAITOK
); 
1921                 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n"); 
1927         /* ifbyfamily returns us an ifp with an incremented if_usecnt */ 
1928         ifp 
= ifbyfamily(proto
->interface_family
, proto
->unit_number
); 
1930                 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",  
1931                                 proto
->interface_family
, proto
->unit_number
); 
1936     bzero(ifproto
, sizeof(struct if_proto
)); 
1939         ifproto
->protocol_family 
= proto
->protocol_family
; 
1940         ifproto
->proto_kpi 
= kProtoKPI_DLIL
; 
1941         ifproto
->kpi
.dlil
.dl_input              
= proto
->input
; 
1942         ifproto
->kpi
.dlil
.dl_pre_output 
= proto
->pre_output
; 
1943         ifproto
->kpi
.dlil
.dl_event              
= proto
->event
; 
1944         ifproto
->kpi
.dlil
.dl_offer              
= proto
->offer
; 
1945         ifproto
->kpi
.dlil
.dl_ioctl              
= proto
->ioctl
; 
1946         ifproto
->kpi
.dlil
.dl_detached   
= proto
->detached
; 
1948         retval 
= dlil_attach_protocol_internal(ifproto
, &proto
->demux_desc_head
, NULL
, 0); 
1951         if (retval 
&& ifproto
) 
1952                 FREE(ifproto
, M_IFADDR
); 
1956 extern void if_rtproto_del(struct ifnet 
*ifp
, int protocol
); 
1959 dlil_detach_protocol_internal( 
1960         struct if_proto 
*proto
) 
1962         struct ifnet 
*ifp 
= proto
->ifp
; 
1963         u_long proto_family 
= proto
->protocol_family
; 
1964         struct kev_dl_proto_data        ev_pr_data
; 
1966         if (proto
->proto_kpi 
== kProtoKPI_DLIL
) { 
1967                 if (proto
->kpi
.dlil
.dl_detached
) 
1968                         proto
->kpi
.dlil
.dl_detached(proto
->protocol_family
, ifp
); 
1971                 if (proto
->kpi
.v1
.detached
) 
1972                         proto
->kpi
.v1
.detached(ifp
, proto
->protocol_family
); 
1974         if_proto_free(proto
); 
1977          * Cleanup routes that may still be in the routing table for that interface/protocol pair. 
1980         if_rtproto_del(ifp
, proto_family
); 
1982         /* the reserved field carries the number of protocol still attached (subject to change) */ 
1983         ev_pr_data
.proto_family   
= proto_family
; 
1984         ev_pr_data
.proto_remaining_count 
= dlil_ifp_proto_count(ifp
); 
1985         dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_PROTO_DETACHED
,  
1986                                   (struct net_event_data 
*)&ev_pr_data
,  
1987                                   sizeof(struct kev_dl_proto_data
)); 
1992 dlil_detach_protocol(struct ifnet 
*ifp
, u_long proto_family
) 
1994         struct if_proto 
*proto 
= NULL
; 
1996         int use_reached_zero 
= 0; 
1999         if ((retval 
= dlil_write_begin()) != 0) { 
2000                 if (retval 
== EDEADLK
) { 
2003                         proto 
= find_attached_proto(ifp
, proto_family
); 
2008                                 proto
->detaching 
= 1; 
2009                                 dlil_detach_waiting 
= 1; 
2010                                 wakeup(&dlil_detach_waiting
); 
2017         proto 
= find_attached_proto(ifp
, proto_family
); 
2019         if (proto 
== NULL
) { 
2026          * Call family module del_proto 
2029         if (ifp
->if_del_proto
) 
2030                 ifp
->if_del_proto(ifp
, proto
->protocol_family
); 
2032         if (proto
->proto_kpi 
== kProtoKPI_DLIL 
&& proto
->kpi
.dlil
.dl_offer
) 
2035         SLIST_REMOVE(&ifp
->if_proto_hash
[proto_hash_value(proto_family
)], proto
, if_proto
, next_hash
); 
2038          * We can do the rest of the work outside of the write lock. 
2040         use_reached_zero 
= ifp_unuse(ifp
); 
2043         dlil_detach_protocol_internal(proto
); 
2046          * Only handle the case where the interface will go away after 
2047          * we've sent the message. This way post message can send the 
2048          * message to the interface safely. 
2051         if (use_reached_zero
) 
2052                 ifp_use_reached_zero(ifp
); 
2059  * dlil_delayed_detach_thread is responsible for detaching 
2060  * protocols, protocol filters, and interface filters after 
2061  * an attempt was made to detach one of those items while 
2062  * it was not safe to do so (i.e. called dlil_read_begin). 
2064  * This function will take the dlil write lock and walk 
2065  * through each of the interfaces looking for items with 
2066  * the detaching flag set. When an item is found, it is 
2067  * detached from the interface and placed on a local list. 
2068  * After all of the items have been collected, we drop the 
2069  * write lock and performed the post detach. This is done 
2070  * so we only have to take the write lock once. 
2072  * When detaching a protocol filter, if we find that we 
2073  * have detached the very last protocol and we need to call 
2074  * ifp_use_reached_zero, we have to break out of our work 
2075  * to drop the write lock so we can call ifp_use_reached_zero. 
2079 dlil_delayed_detach_thread(__unused 
void* foo
, __unused wait_result_t wait
) 
2081         thread_t self 
= current_thread(); 
2084         ml_thread_policy(self
, MACHINE_GROUP
, 
2085                                          (MACHINE_NETWORK_GROUP
|MACHINE_NETWORK_NETISR
)); 
2089                 if (dlil_detach_waiting 
!= 0 && dlil_write_begin() == 0) { 
2091                         struct proto_hash_entry detached_protos
; 
2092                         struct ifnet_filter_head detached_filters
; 
2093                         struct if_proto 
*proto
; 
2094                         struct if_proto 
*next_proto
; 
2095                         struct ifnet_filter 
*filt
; 
2096                         struct ifnet_filter 
*next_filt
; 
2101                         /* Clear the detach waiting flag */ 
2102                         dlil_detach_waiting 
= 0; 
2103                         TAILQ_INIT(&detached_filters
); 
2104                         SLIST_INIT(&detached_protos
); 
2106                         ifnet_head_lock_shared(); 
2107                         TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) { 
2110                                 // Look for protocols and protocol filters 
2111                                 for (i 
= 0; i 
< PROTO_HASH_SLOTS 
&& !reached_zero
; i
++) { 
2112                                         struct if_proto 
**prev_nextptr 
= &SLIST_FIRST(&ifp
->if_proto_hash
[i
]); 
2113                                         for (proto 
= *prev_nextptr
; proto
; proto 
= *prev_nextptr
) { 
2115                                                 // Detach this protocol 
2116                                                 if (proto
->detaching
) { 
2117                                                         if (ifp
->if_del_proto
) 
2118                                                                 ifp
->if_del_proto(ifp
, proto
->protocol_family
); 
2119                                                         if (proto
->proto_kpi 
== kProtoKPI_DLIL 
&& proto
->kpi
.dlil
.dl_offer
) 
2121                                                         *prev_nextptr 
= SLIST_NEXT(proto
, next_hash
); 
2122                                                         SLIST_INSERT_HEAD(&detached_protos
, proto
, next_hash
); 
2123                                                         reached_zero 
= ifp_unuse(ifp
); 
2129                                                         // Update prev_nextptr to point to our next ptr 
2130                                                         prev_nextptr 
= &SLIST_NEXT(proto
, next_hash
); 
2135                                 // look for interface filters that need to be detached 
2136                                 for (filt 
= TAILQ_FIRST(&ifp
->if_flt_head
); filt
; filt 
= next_filt
) { 
2137                                         next_filt 
= TAILQ_NEXT(filt
, filt_next
); 
2138                                         if (filt
->filt_detaching 
!= 0) { 
2139                                                 // take this interface filter off the interface filter list 
2140                                                 TAILQ_REMOVE(&ifp
->if_flt_head
, filt
, filt_next
); 
2142                                                 // put this interface filter on the detached filters list 
2143                                                 TAILQ_INSERT_TAIL(&detached_filters
, filt
, filt_next
); 
2147                                 if (ifp
->if_delayed_detach
) { 
2148                                         ifp
->if_delayed_detach 
= 0; 
2149                                         reached_zero 
= ifp_unuse(ifp
); 
2158                         for (filt 
= TAILQ_FIRST(&detached_filters
); filt
; filt 
= next_filt
) { 
2159                                 next_filt 
= TAILQ_NEXT(filt
, filt_next
); 
2161                                  * dlil_detach_filter_internal won't remove an item from 
2162                                  * the list if it is already detached (second parameter). 
2163                                  * The item will be freed though. 
2165                                 dlil_detach_filter_internal(filt
, 1); 
2168                         for (proto 
= SLIST_FIRST(&detached_protos
); proto
; proto 
= next_proto
) { 
2169                                 next_proto 
= SLIST_NEXT(proto
, next_hash
); 
2170                                 dlil_detach_protocol_internal(proto
); 
2174                                 ifp_use_reached_zero(ifp
); 
2175                                 dlil_detach_waiting 
= 1; // we may have missed something 
2179                 if (!asserted 
&& dlil_detach_waiting 
== 0) { 
2181                         assert_wait(&dlil_detach_waiting
, THREAD_UNINT
); 
2184                 if (dlil_detach_waiting 
== 0) { 
2186                         thread_block(dlil_delayed_detach_thread
); 
2192 dlil_call_delayed_detach_thread(void) { 
2193         dlil_delayed_detach_thread(NULL
, THREAD_RESTART
); 
2196 extern int if_next_index(void); 
2198 __private_extern__ 
int 
2199 dlil_if_attach_with_address( 
2201         const struct sockaddr_dl        
*ll_addr
) 
2203         u_long              interface_family 
= ifp
->if_family
; 
2204         struct if_family_str    
*if_family 
= NULL
; 
2206         struct ifnet 
*tmp_if
; 
2207         struct proto_hash_entry 
*new_proto_list 
= NULL
; 
2211         ifnet_head_lock_shared(); 
2213         /* Verify we aren't already on the list */ 
2214         TAILQ_FOREACH(tmp_if
, &ifnet_head
, if_link
) { 
2215                 if (tmp_if 
== ifp
) { 
2223         if ((ifp
->if_eflags 
& IFEF_REUSE
) == 0 || ifp
->if_lock 
== 0) 
2225                 ifp
->if_lock 
= lck_rw_alloc_init(ifnet_lock_group
, ifnet_lock_attr
); 
2227                 ifp
->if_lock 
= lck_mtx_alloc_init(ifnet_lock_group
, ifnet_lock_attr
); 
2230         if (ifp
->if_lock 
== 0) { 
2234         // Only use family if this is not a KPI interface 
2235         if ((ifp
->if_eflags 
& IFEF_USEKPI
) == 0) { 
2236                 if_family 
= find_family_module(interface_family
); 
2240          * Allow interfaces withouth protocol families to attach 
2241          * only if they have the necessary fields filled out. 
2244         if ((if_family 
== 0) && 
2245                 (ifp
->if_add_proto 
== 0 || ifp
->if_del_proto 
== 0)) { 
2246                 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",  
2251         if ((ifp
->if_eflags 
& IFEF_REUSE
) == 0 || ifp
->if_proto_hash 
== NULL
) { 
2252                 MALLOC(new_proto_list
, struct proto_hash_entry
*, sizeof(struct proto_hash_entry
) * PROTO_HASH_SLOTS
, 
2255                 if (new_proto_list 
== 0) { 
2264          * Call the family module to fill in the appropriate fields in the 
2269                 stat 
= if_family
->add_if(ifp
); 
2271                         DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat
); 
2275                 ifp
->if_add_proto_u
.original 
= if_family
->add_proto
; 
2276                 ifp
->if_del_proto 
= if_family
->del_proto
; 
2277                 if_family
->refcnt
++; 
2281         TAILQ_INIT(&ifp
->if_flt_head
); 
2284         if (new_proto_list
) { 
2285                 bzero(new_proto_list
, (PROTO_HASH_SLOTS 
* sizeof(struct proto_hash_entry
))); 
2286                 ifp
->if_proto_hash 
= new_proto_list
; 
2292                 struct ifaddr 
*ifa 
= 0; 
2294                 if (ifp
->if_snd
.ifq_maxlen 
== 0) 
2295                         ifp
->if_snd
.ifq_maxlen 
= ifqmaxlen
; 
2296                 TAILQ_INIT(&ifp
->if_prefixhead
); 
2297                 LIST_INIT(&ifp
->if_multiaddrs
); 
2298                 ifnet_touch_lastchange(ifp
); 
2300                 /* usecount to track attachment to the ifnet list */ 
2301                 ifp_use(ifp
, kIfNetUseCount_MayBeZero
); 
2303                 /* Lock the list of interfaces */ 
2304                 ifnet_head_lock_exclusive(); 
2305                 ifnet_lock_exclusive(ifp
); 
2307                 if ((ifp
->if_eflags 
& IFEF_REUSE
) == 0 || ifp
->if_index 
== 0) { 
2309                         int namelen
, masklen
, socksize
, ifasize
; 
2311                         ifp
->if_index 
= if_next_index(); 
2313                         namelen 
= snprintf(workbuf
, sizeof(workbuf
), "%s%d", ifp
->if_name
, ifp
->if_unit
); 
2314 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m)) 
2315                         masklen 
= _offsetof(struct sockaddr_dl
, sdl_data
[0]) + namelen
; 
2316                         socksize 
= masklen 
+ ifp
->if_addrlen
; 
2317 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1))) 
2318                         if ((u_long
)socksize 
< sizeof(struct sockaddr_dl
)) 
2319                                 socksize 
= sizeof(struct sockaddr_dl
); 
2320                         socksize 
= ROUNDUP(socksize
); 
2321                         ifasize 
= sizeof(struct ifaddr
) + 2 * socksize
; 
2322                         ifa 
= (struct ifaddr
*)_MALLOC(ifasize
, M_IFADDR
, M_WAITOK
); 
2324                                 struct sockaddr_dl 
*sdl 
= (struct sockaddr_dl 
*)(ifa 
+ 1); 
2325                                 ifnet_addrs
[ifp
->if_index 
- 1] = ifa
; 
2326                                 bzero(ifa
, ifasize
); 
2327                                 sdl
->sdl_len 
= socksize
; 
2328                                 sdl
->sdl_family 
= AF_LINK
; 
2329                                 bcopy(workbuf
, sdl
->sdl_data
, namelen
); 
2330                                 sdl
->sdl_nlen 
= namelen
; 
2331                                 sdl
->sdl_index 
= ifp
->if_index
; 
2332                                 sdl
->sdl_type 
= ifp
->if_type
; 
2334                                         sdl
->sdl_alen 
= ll_addr
->sdl_alen
; 
2335                                         if (ll_addr
->sdl_alen 
!= ifp
->if_addrlen
) 
2336                                                 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen"); 
2337                                         bcopy(CONST_LLADDR(ll_addr
), LLADDR(sdl
), sdl
->sdl_alen
); 
2340                                 ifa
->ifa_rtrequest 
= link_rtrequest
; 
2341                                 ifa
->ifa_addr 
= (struct sockaddr
*)sdl
; 
2342                                 sdl 
= (struct sockaddr_dl
*)(socksize 
+ (caddr_t
)sdl
); 
2343                                 ifa
->ifa_netmask 
= (struct sockaddr
*)sdl
; 
2344                                 sdl
->sdl_len 
= masklen
; 
2345                                 while (namelen 
!= 0) 
2346                                         sdl
->sdl_data
[--namelen
] = 0xff; 
2350                         /* preserve the first ifaddr */ 
2351                         ifnet_addrs
[ifp
->if_index 
- 1] = TAILQ_FIRST(&ifp
->if_addrhead
); 
2355                 TAILQ_INIT(&ifp
->if_addrhead
); 
2356                 ifa 
= ifnet_addrs
[ifp
->if_index 
- 1]; 
2360                          * We don't use if_attach_ifa because we want 
2361                          * this address to be first on the list. 
2364                         ifa
->ifa_debug 
|= IFA_ATTACHED
; 
2365                         TAILQ_INSERT_HEAD(&ifp
->if_addrhead
, ifa
, ifa_link
); 
2368                 TAILQ_INSERT_TAIL(&ifnet_head
, ifp
, if_link
); 
2369                 ifindex2ifnet
[ifp
->if_index
] = ifp
; 
2375         if (if_family 
&& if_family
->init_if
) { 
2376                 stat 
= if_family
->init_if(ifp
); 
2378                         DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat
); 
2382         ifnet_lock_done(ifp
); 
2384     dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_IF_ATTACHED
, 0, 0); 
2390 dlil_if_attach(struct ifnet     
*ifp
) 
2392         dlil_if_attach_with_address(ifp
, NULL
); 
2397 dlil_if_detach(struct ifnet 
*ifp
) 
2399         struct ifnet_filter 
*filter
; 
2400         struct ifnet_filter     
*filter_next
; 
2403         struct ifnet_filter_head fhead
; 
2406         ifnet_lock_exclusive(ifp
); 
2408         if ((ifp
->if_eflags 
& IFEF_DETACHING
) != 0) { 
2409                 /* Interface has already been detached */ 
2410                 ifnet_lock_done(ifp
); 
2415          * Indicate this interface is being detached. 
2417          * This should prevent protocols from attaching 
2418          * from this point on. Interface will remain on 
2419          * the list until all of the protocols are detached. 
2421         ifp
->if_eflags 
|= IFEF_DETACHING
; 
2422         ifnet_lock_done(ifp
); 
2424         dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_IF_DETACHING
, 0, 0); 
2426         if ((retval 
= dlil_write_begin()) != 0) { 
2427                 if (retval 
== EDEADLK
) { 
2428                         retval 
= DLIL_WAIT_FOR_FREE
; 
2430                         /* We need to perform a delayed detach */ 
2431                         ifp
->if_delayed_detach 
= 1; 
2432                         dlil_detach_waiting 
= 1; 
2433                         wakeup(&dlil_detach_waiting
); 
2438         /* Steal the list of interface filters */ 
2439         fhead 
= ifp
->if_flt_head
; 
2440         TAILQ_INIT(&ifp
->if_flt_head
); 
2442         /* unuse the interface */ 
2443         zeroed 
= ifp_unuse(ifp
); 
2447         for (filter 
= TAILQ_FIRST(&fhead
); filter
; filter 
= filter_next
) { 
2448                 filter_next 
= TAILQ_NEXT(filter
, filt_next
); 
2449                 dlil_detach_filter_internal(filter
, 1); 
2453                 retval 
= DLIL_WAIT_FOR_FREE
; 
2457                 ifp_use_reached_zero(ifp
); 
2465 dlil_reg_if_modules(u_long  interface_family
,  
2466                     struct dlil_ifmod_reg_str  
*ifmod
) 
2468     struct if_family_str 
*if_family
; 
2471     if (find_family_module(interface_family
))  { 
2472         DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",  
2477     if ((!ifmod
->add_if
) || (!ifmod
->del_if
) || 
2478         (!ifmod
->add_proto
) || (!ifmod
->del_proto
)) { 
2479         DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n"); 
2484      * The following is a gross hack to keep from breaking 
2485      * Vicomsoft's internet gateway on Jaguar. Vicomsoft 
2486      * does not zero the reserved fields in dlil_ifmod_reg_str. 
2487      * As a result, we have to zero any function that used to 
2488      * be reserved fields at the time Vicomsoft built their 
2489      * kext. Radar #2974305 
2491     if (ifmod
->reserved
[0] != 0 || ifmod
->reserved
[1] != 0 || ifmod
->reserved
[2]) { 
2492         if (interface_family 
== 123) {  /* Vicom */ 
2499     if_family 
= (struct if_family_str 
*) _MALLOC(sizeof(struct if_family_str
), M_IFADDR
, M_WAITOK
); 
2501         DLIL_PRINTF("dlil_reg_if_modules failed allocation\n"); 
2505     bzero(if_family
, sizeof(struct if_family_str
)); 
2507     if_family
->if_family        
= interface_family 
& 0xffff; 
2508     if_family
->shutdown         
= ifmod
->shutdown
; 
2509     if_family
->add_if           
= ifmod
->add_if
; 
2510     if_family
->del_if           
= ifmod
->del_if
; 
2511     if_family
->init_if          
= ifmod
->init_if
; 
2512     if_family
->add_proto        
= ifmod
->add_proto
; 
2513     if_family
->del_proto        
= ifmod
->del_proto
; 
2514     if_family
->ifmod_ioctl      
= ifmod
->ifmod_ioctl
; 
2515     if_family
->refcnt           
= 1; 
2516     if_family
->flags            
= 0; 
2518     TAILQ_INSERT_TAIL(&if_family_head
, if_family
, if_fam_next
); 
2522 int dlil_dereg_if_modules(u_long interface_family
) 
2524     struct if_family_str  
*if_family
; 
2528     if_family 
= find_family_module(interface_family
); 
2529     if (if_family 
== 0) { 
2533     if (--if_family
->refcnt 
== 0) { 
2534         if (if_family
->shutdown
) 
2535             (*if_family
->shutdown
)(); 
2537         TAILQ_REMOVE(&if_family_head
, if_family
, if_fam_next
); 
2538         FREE(if_family
, M_IFADDR
); 
2541         if_family
->flags 
|= DLIL_SHUTDOWN
; 
2542         ret 
= DLIL_WAIT_FOR_FREE
; 
2551 dlil_reg_proto_module( 
2552         u_long protocol_family
, 
2553         u_long  interface_family
,  
2554         int (*attach
)(struct ifnet 
*ifp
, u_long protocol_family
), 
2555         int (*detach
)(struct ifnet 
*ifp
, u_long protocol_family
)) 
2557         struct proto_family_str 
*proto_family
; 
2559         if (attach 
== NULL
) return EINVAL
; 
2561         lck_mtx_lock(proto_family_mutex
); 
2563         TAILQ_FOREACH(proto_family
, &proto_family_head
, proto_fam_next
) { 
2564                 if (proto_family
->proto_family 
== protocol_family 
&& 
2565                         proto_family
->if_family 
== interface_family
) { 
2566                         lck_mtx_unlock(proto_family_mutex
); 
2571         proto_family 
= (struct proto_family_str 
*) _MALLOC(sizeof(struct proto_family_str
), M_IFADDR
, M_WAITOK
); 
2572         if (!proto_family
) { 
2573                 lck_mtx_unlock(proto_family_mutex
); 
2577         bzero(proto_family
, sizeof(struct proto_family_str
)); 
2578         proto_family
->proto_family      
= protocol_family
; 
2579         proto_family
->if_family         
= interface_family 
& 0xffff; 
2580         proto_family
->attach_proto      
= attach
; 
2581         proto_family
->detach_proto      
= detach
; 
2583         TAILQ_INSERT_TAIL(&proto_family_head
, proto_family
, proto_fam_next
); 
2584         lck_mtx_unlock(proto_family_mutex
); 
2588 int dlil_dereg_proto_module(u_long protocol_family
, u_long interface_family
) 
2590         struct proto_family_str  
*proto_family
; 
2593         lck_mtx_lock(proto_family_mutex
); 
2595         proto_family 
= find_proto_module(protocol_family
, interface_family
); 
2596         if (proto_family 
== 0) { 
2597                 lck_mtx_unlock(proto_family_mutex
); 
2601         TAILQ_REMOVE(&proto_family_head
, proto_family
, proto_fam_next
); 
2602         FREE(proto_family
, M_IFADDR
); 
2604         lck_mtx_unlock(proto_family_mutex
); 
2608 int dlil_plumb_protocol(u_long protocol_family
, struct ifnet 
*ifp
) 
2610         struct proto_family_str  
*proto_family
; 
2613         lck_mtx_lock(proto_family_mutex
); 
2614         proto_family 
= find_proto_module(protocol_family
, ifp
->if_family
); 
2615         if (proto_family 
== 0) { 
2616                 lck_mtx_unlock(proto_family_mutex
); 
2620         ret 
= proto_family
->attach_proto(ifp
, protocol_family
); 
2622         lck_mtx_unlock(proto_family_mutex
); 
2627 int dlil_unplumb_protocol(u_long protocol_family
, struct ifnet 
*ifp
) 
2629         struct proto_family_str  
*proto_family
; 
2632         lck_mtx_lock(proto_family_mutex
); 
2634         proto_family 
= find_proto_module(protocol_family
, ifp
->if_family
); 
2635         if (proto_family 
&& proto_family
->detach_proto
) 
2636                 ret 
= proto_family
->detach_proto(ifp
, protocol_family
); 
2638                 ret 
= dlil_detach_protocol(ifp
, protocol_family
); 
2640         lck_mtx_unlock(proto_family_mutex
); 
2646         __unused ifnet_t ifnet_ptr
, 
2647         __unused u_int32_t ioctl_code
, 
2648         __unused 
void *ioctl_arg
) 
2654 dlil_recycle_output( 
2655         __unused 
struct ifnet 
*ifnet_ptr
, 
2664         __unused ifnet_t ifnet_ptr
) 
2669 dlil_recycle_set_bpf_tap( 
2670         __unused ifnet_t ifp
, 
2671         __unused bpf_tap_mode mode
, 
2672         __unused bpf_packet_func callback
) 
2674     /* XXX not sure what to do here */ 
2678 int dlil_if_acquire( 
2680         const void *uniqueid
, 
2681         size_t uniqueid_len
,  
2684     struct ifnet        
*ifp1 
= NULL
; 
2685     struct dlil_ifnet   
*dlifp1 
= NULL
; 
2688     lck_mtx_lock(dlil_ifnet_mutex
); 
2689     TAILQ_FOREACH(dlifp1
, &dlil_ifnet_head
, dl_if_link
) { 
2691         ifp1 
= (struct ifnet 
*)dlifp1
; 
2693                 if (ifp1
->if_family 
== family
)  { 
2695             /* same uniqueid and same len or no unique id specified */ 
2696             if ((uniqueid_len 
== dlifp1
->if_uniqueid_len
) 
2697                 && !bcmp(uniqueid
, dlifp1
->if_uniqueid
, uniqueid_len
)) { 
2699                                 /* check for matching interface in use */ 
2700                                 if (ifp1
->if_eflags 
& IFEF_INUSE
) { 
2708                                                 panic("ifp's lock is gone\n"); 
2709                                         ifnet_lock_exclusive(ifp1
); 
2710                                         ifp1
->if_eflags 
|= (IFEF_INUSE 
| IFEF_REUSE
); 
2711                                         ifnet_lock_done(ifp1
); 
2719     /* no interface found, allocate a new one */ 
2720     MALLOC(dlifp1
, struct dlil_ifnet 
*, sizeof(*dlifp1
), M_NKE
, M_WAITOK
); 
2726     bzero(dlifp1
, sizeof(*dlifp1
)); 
2729         MALLOC(dlifp1
->if_uniqueid
, void *, uniqueid_len
, M_NKE
, M_WAITOK
); 
2730         if (dlifp1
->if_uniqueid 
== 0) { 
2731             FREE(dlifp1
, M_NKE
); 
2735         bcopy(uniqueid
, dlifp1
->if_uniqueid
, uniqueid_len
); 
2736         dlifp1
->if_uniqueid_len 
= uniqueid_len
; 
2739     ifp1 
= (struct ifnet 
*)dlifp1
; 
2740     ifp1
->if_eflags 
|= IFEF_INUSE
; 
2741     ifp1
->if_name 
= dlifp1
->if_namestorage
; 
2743     TAILQ_INSERT_TAIL(&dlil_ifnet_head
, dlifp1
, dl_if_link
); 
2748         lck_mtx_unlock(dlil_ifnet_mutex
); 
2753 void dlil_if_release(struct ifnet 
*ifp
) 
2755     struct dlil_ifnet   
*dlifp 
= (struct dlil_ifnet 
*)ifp
; 
2758     /* Interface does not have a lock until it is attached - radar 3713951 */ 
2760                 ifnet_lock_exclusive(ifp
); 
2761     ifp
->if_eflags 
&= ~IFEF_INUSE
; 
2762     ifp
->if_ioctl 
= dlil_recycle_ioctl
; 
2763     ifp
->if_output 
= dlil_recycle_output
; 
2764     ifp
->if_free 
= dlil_recycle_free
; 
2765     ifp
->if_set_bpf_tap 
= dlil_recycle_set_bpf_tap
; 
2767     strncpy(dlifp
->if_namestorage
, ifp
->if_name
, IFNAMSIZ
); 
2768     ifp
->if_name 
= dlifp
->if_namestorage
; 
2770                 ifnet_lock_done(ifp
);