+ struct radix_node_head *rnh;
+ if (use_routegenid)
+ route_generation++;
+ if ((protocol <= AF_MAX) && (protocol >= 0) &&
+ ((rnh = rt_tables[protocol]) != NULL) && (ifp != NULL)) {
+ lck_mtx_lock(rt_mtx);
+ (void) rnh->rnh_walktree(rnh, if_rtdel, ifp);
+ lck_mtx_unlock(rt_mtx);
+ }
+}
+
+static int
+if_rtmtu(struct radix_node *rn, void *arg)
+{
+ struct rtentry *rt = (struct rtentry *)rn;
+ struct ifnet *ifp = arg;
+
+ if (rt->rt_ifp == ifp) {
+ /*
+ * Update the MTU of this entry only if the MTU
+ * has not been locked (RTV_MTU is not set) and
+ * if it was non-zero to begin with.
+ */
+ if (!(rt->rt_rmx.rmx_locks & RTV_MTU) && rt->rt_rmx.rmx_mtu)
+ rt->rt_rmx.rmx_mtu = ifp->if_mtu;
+ }
+
+ return (0);
+}
+
+/*
+ * Update the MTU metric of all route entries in all protocol tables
+ * associated with a particular interface; this is called when the
+ * MTU of that interface has changed.
+ */
+static
+void if_rtmtu_update(struct ifnet *ifp)
+{
+ struct radix_node_head *rnh;
+ int p;
+
+ for (p = 0; p < AF_MAX + 1; p++) {
+ if ((rnh = rt_tables[p]) == NULL)
+ continue;
+
+ lck_mtx_lock(rt_mtx);
+ (void) rnh->rnh_walktree(rnh, if_rtmtu, ifp);
+ lck_mtx_unlock(rt_mtx);
+ }
+
+ if (use_routegenid)
+ route_generation++;
+}
+
+__private_extern__ void
+if_data_internal_to_if_data(
+ struct ifnet *ifp,
+ const struct if_data_internal *if_data_int,
+ struct if_data *if_data)
+{
+ struct dlil_threading_info *thread;
+ if ((thread = ifp->if_input_thread) == NULL || (dlil_multithreaded_input == 0))
+ thread = dlil_lo_thread_ptr;
+
+#define COPYFIELD(fld) if_data->fld = if_data_int->fld
+#define COPYFIELD32(fld) if_data->fld = (u_int32_t)(if_data_int->fld)
+ COPYFIELD(ifi_type);
+ COPYFIELD(ifi_typelen);
+ COPYFIELD(ifi_physical);
+ COPYFIELD(ifi_addrlen);
+ COPYFIELD(ifi_hdrlen);
+ COPYFIELD(ifi_recvquota);
+ COPYFIELD(ifi_xmitquota);
+ if_data->ifi_unused1 = 0;
+ COPYFIELD(ifi_mtu);
+ COPYFIELD(ifi_metric);
+ if (if_data_int->ifi_baudrate & 0xFFFFFFFF00000000LL) {
+ if_data->ifi_baudrate = 0xFFFFFFFF;
+ }
+ else {
+ COPYFIELD32(ifi_baudrate);
+ }
+
+ lck_mtx_lock(thread->input_lck);
+ COPYFIELD32(ifi_ipackets);
+ COPYFIELD32(ifi_ierrors);
+ COPYFIELD32(ifi_opackets);
+ COPYFIELD32(ifi_oerrors);
+ COPYFIELD32(ifi_collisions);
+ COPYFIELD32(ifi_ibytes);
+ COPYFIELD32(ifi_obytes);
+ COPYFIELD32(ifi_imcasts);
+ COPYFIELD32(ifi_omcasts);
+ COPYFIELD32(ifi_iqdrops);
+ COPYFIELD32(ifi_noproto);
+ COPYFIELD32(ifi_recvtiming);
+ COPYFIELD32(ifi_xmittiming);
+ COPYFIELD(ifi_lastchange);
+ lck_mtx_unlock(thread->input_lck);
+
+#if IF_LASTCHANGEUPTIME
+ if_data->ifi_lastchange.tv_sec += boottime_sec();
+#endif
+
+ if_data->ifi_unused2 = 0;
+ COPYFIELD(ifi_hwassist);
+ if_data->ifi_reserved1 = 0;
+ if_data->ifi_reserved2 = 0;
+#undef COPYFIELD32
+#undef COPYFIELD
+}
+
+__private_extern__ void
+if_data_internal_to_if_data64(
+ struct ifnet *ifp,
+ const struct if_data_internal *if_data_int,
+ struct if_data64 *if_data64)
+{
+ struct dlil_threading_info *thread;
+ if ((thread = ifp->if_input_thread) == NULL || (dlil_multithreaded_input == 0))
+ thread = dlil_lo_thread_ptr;
+
+#define COPYFIELD(fld) if_data64->fld = if_data_int->fld
+ COPYFIELD(ifi_type);
+ COPYFIELD(ifi_typelen);
+ COPYFIELD(ifi_physical);
+ COPYFIELD(ifi_addrlen);
+ COPYFIELD(ifi_hdrlen);
+ COPYFIELD(ifi_recvquota);
+ COPYFIELD(ifi_xmitquota);
+ if_data64->ifi_unused1 = 0;
+ COPYFIELD(ifi_mtu);
+ COPYFIELD(ifi_metric);
+ COPYFIELD(ifi_baudrate);
+
+ lck_mtx_lock(thread->input_lck);
+ COPYFIELD(ifi_ipackets);
+ COPYFIELD(ifi_ierrors);
+ COPYFIELD(ifi_opackets);
+ COPYFIELD(ifi_oerrors);
+ COPYFIELD(ifi_collisions);
+ COPYFIELD(ifi_ibytes);
+ COPYFIELD(ifi_obytes);
+ COPYFIELD(ifi_imcasts);
+ COPYFIELD(ifi_omcasts);
+ COPYFIELD(ifi_iqdrops);
+ COPYFIELD(ifi_noproto);
+ COPYFIELD(ifi_recvtiming);
+ COPYFIELD(ifi_xmittiming);
+ COPYFIELD(ifi_lastchange);
+ lck_mtx_unlock(thread->input_lck);