+ return (error);
+}
+
+/**************************************************************************/
+/* misc */
+/**************************************************************************/
+
+errno_t
+ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
+ u_int32_t flags, u_int8_t *bitfield)
+{
+ u_int32_t ifindex;
+ u_int32_t inp_flags = 0;
+
+ inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_WILDCARDOK) ?
+ INPCB_GET_PORTS_USED_WILDCARDOK : 0);
+ inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_NOWAKEUPOK) ?
+ INPCB_GET_PORTS_USED_NOWAKEUPOK : 0);
+ inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_RECVANYIFONLY) ?
+ INPCB_GET_PORTS_USED_RECVANYIFONLY : 0);
+ inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY) ?
+ INPCB_GET_PORTS_USED_EXTBGIDLEONLY : 0);
+ inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_ACTIVEONLY) ?
+ INPCB_GET_PORTS_USED_ACTIVEONLY : 0);
+
+ if (bitfield == NULL)
+ return (EINVAL);
+
+ switch (protocol) {
+ case PF_UNSPEC:
+ case PF_INET:
+ case PF_INET6:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ /* bit string is long enough to hold 16-bit port values */
+ bzero(bitfield, bitstr_size(65536));
+
+ ifindex = (ifp != NULL) ? ifp->if_index : 0;
+
+ if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY))
+ udp_get_ports_used(ifindex, protocol, inp_flags, bitfield);
+
+ if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY))
+ tcp_get_ports_used(ifindex, protocol, inp_flags, bitfield);
+
+ return (0);
+}
+
+errno_t
+ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield)
+{
+ u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
+ return (ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
+ bitfield));
+}
+
+errno_t
+ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
+ int lqm, int npm, u_int8_t srvinfo[48])
+{
+ if (ifp == NULL || sa == NULL || srvinfo == NULL)
+ return (EINVAL);
+ if (sa->sa_len > sizeof(struct sockaddr_storage))
+ return (EINVAL);
+ if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
+ return (EINVAL);
+
+ dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
+ return (0);
+}
+
+errno_t
+ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
+{
+ if (ifp == NULL || sa == NULL)
+ return (EINVAL);
+ if (sa->sa_len > sizeof(struct sockaddr_storage))
+ return (EINVAL);
+ if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
+ return (EINVAL);
+
+ dlil_node_absent(ifp, sa);
+ return (0);
+}
+
+errno_t
+ifnet_notice_master_elected(ifnet_t ifp)
+{
+ if (ifp == NULL)
+ return (EINVAL);
+
+ dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_MASTER_ELECTED, NULL, 0);
+ return (0);
+}
+
+errno_t
+ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
+{
+#pragma unused(val)
+
+ m_do_tx_compl_callback(m, ifp);
+
+ return (0);
+}
+
+errno_t
+ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
+{
+ m_do_tx_compl_callback(m, ifp);
+
+ return (0);
+}
+
+errno_t
+ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
+ u_int8_t info[IFNET_MODARGLEN])
+{
+ if (ifp == NULL || modid == NULL)
+ return (EINVAL);
+
+ dlil_report_issues(ifp, modid, info);
+ return (0);
+}
+
+errno_t
+ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
+{
+ ifnet_t odifp = NULL;
+
+ if (ifp == NULL)
+ return (EINVAL);
+ else if (!ifnet_is_attached(ifp, 1))
+ return (ENXIO);
+
+ ifnet_lock_exclusive(ifp);
+ odifp = ifp->if_delegated.ifp;
+ if (odifp != NULL && odifp == delegated_ifp) {
+ /* delegate info is unchanged; nothing more to do */
+ ifnet_lock_done(ifp);
+ goto done;
+ }
+ // Test if this delegate interface would cause a loop
+ ifnet_t delegate_check_ifp = delegated_ifp;
+ while (delegate_check_ifp != NULL) {
+ if (delegate_check_ifp == ifp) {
+ printf("%s: delegating to %s would cause a loop\n",
+ ifp->if_xname, delegated_ifp->if_xname);
+ ifnet_lock_done(ifp);
+ goto done;
+ }
+ delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
+ }
+ bzero(&ifp->if_delegated, sizeof (ifp->if_delegated));
+ if (delegated_ifp != NULL && ifp != delegated_ifp) {
+ ifp->if_delegated.ifp = delegated_ifp;
+ ifnet_reference(delegated_ifp);
+ ifp->if_delegated.type = delegated_ifp->if_type;
+ ifp->if_delegated.family = delegated_ifp->if_family;
+ ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
+ ifp->if_delegated.expensive =
+ delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
+
+ /*
+ * Propogate flags related to ECN from delegated interface
+ */
+ ifp->if_eflags &= ~(IFEF_ECN_ENABLE|IFEF_ECN_DISABLE);
+ ifp->if_eflags |= (delegated_ifp->if_eflags &
+ (IFEF_ECN_ENABLE|IFEF_ECN_DISABLE));
+
+ printf("%s: is now delegating %s (type 0x%x, family %u, "
+ "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
+ delegated_ifp->if_type, delegated_ifp->if_family,
+ delegated_ifp->if_subfamily);
+ }
+
+ ifnet_lock_done(ifp);
+
+ if (odifp != NULL) {
+ if (odifp != delegated_ifp) {
+ printf("%s: is no longer delegating %s\n",
+ ifp->if_xname, odifp->if_xname);
+ }
+ ifnet_release(odifp);
+ }
+
+ /* Generate a kernel event */
+ dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0);
+
+done:
+ /* Release the io ref count */
+ ifnet_decr_iorefcnt(ifp);
+
+ return (0);
+}
+
+errno_t
+ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
+{
+ if (ifp == NULL || pdelegated_ifp == NULL)
+ return (EINVAL);
+ else if (!ifnet_is_attached(ifp, 1))
+ return (ENXIO);
+
+ ifnet_lock_shared(ifp);
+ if (ifp->if_delegated.ifp != NULL)
+ ifnet_reference(ifp->if_delegated.ifp);
+ *pdelegated_ifp = ifp->if_delegated.ifp;
+ ifnet_lock_done(ifp);
+
+ /* Release the io ref count */
+ ifnet_decr_iorefcnt(ifp);
+
+ return (0);
+}
+
+errno_t
+ifnet_get_keepalive_offload_frames(ifnet_t ifp,
+ struct ifnet_keepalive_offload_frame *frames_array,
+ u_int32_t frames_array_count, size_t frame_data_offset,
+ u_int32_t *used_frames_count)
+{
+ u_int32_t i;
+
+ if (frames_array == NULL || used_frames_count == NULL ||
+ frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE)
+ return (EINVAL);
+
+ /* frame_data_offset should be 32-bit aligned */
+ if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
+ frame_data_offset)
+ return (EINVAL);
+
+ *used_frames_count = 0;
+ if (frames_array_count == 0)
+ return (0);
+
+ for (i = 0; i < frames_array_count; i++) {
+ struct ifnet_keepalive_offload_frame *frame = frames_array + i;
+
+ bzero(frame, sizeof(struct ifnet_keepalive_offload_frame));
+ }
+
+ /* First collect IPSec related keep-alive frames */
+ *used_frames_count = key_fill_offload_frames_for_savs(ifp,
+ frames_array, frames_array_count, frame_data_offset);
+
+ /* If there is more room, collect other UDP keep-alive frames */
+ if (*used_frames_count < frames_array_count)
+ udp_fill_keepalive_offload_frames(ifp, frames_array,
+ frames_array_count, frame_data_offset,
+ used_frames_count);
+
+ /* If there is more room, collect other TCP keep-alive frames */
+ if (*used_frames_count < frames_array_count)
+ tcp_fill_keepalive_offload_frames(ifp, frames_array,
+ frames_array_count, frame_data_offset,
+ used_frames_count);
+
+ VERIFY(*used_frames_count <= frames_array_count);
+
+ return (0);