+ if (pcb->utun_kpipe_enabled &&
+ (((rc != 0) && (rc != EAGAIN)) || pcb->utun_output_disabled)) {
+ kern_channel_ring_t rx_ring = pcb->utun_kpipe_rxring;
+
+ // Unlock while calling notify
+ lck_rw_unlock_shared(&pcb->utun_pcb_lock);
+ // Signal the kernel pipe ring to read
+ if (rx_ring != NULL) {
+ kern_channel_notify(rx_ring, 0);
+ }
+ } else {
+ lck_rw_unlock_shared(&pcb->utun_pcb_lock);
+ }
+
+ return (0);
+}
+
+static errno_t
+utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
+ kern_channel_ring_t rx_ring, uint32_t flags)
+{
+#pragma unused(nxprov)
+#pragma unused(flags)
+ struct utun_pcb *pcb = kern_nexus_get_context(nexus);
+ struct kern_channel_ring_stat_increment rx_ring_stats;
+
+ struct netif_stats *nifs = &NX_NETIF_PRIVATE(nexus)->nif_stats;
+
+ lck_rw_lock_shared(&pcb->utun_pcb_lock);
+
+ // Reclaim user-released slots
+ (void) kern_channel_reclaim(rx_ring);
+
+ STATS_INC(nifs, NETIF_STATS_RXSYNC);
+
+ uint32_t avail = kern_channel_available_slot_count(rx_ring);
+ if (avail == 0) {
+ lck_rw_unlock_shared(&pcb->utun_pcb_lock);
+ return 0;
+ }
+
+ struct kern_pbufpool *rx_pp = rx_ring->ckr_pp;
+ VERIFY(rx_pp != NULL);
+ bzero(&rx_ring_stats, sizeof(rx_ring_stats));
+ kern_channel_slot_t rx_pslot = NULL;
+ kern_channel_slot_t rx_slot = kern_channel_get_next_slot(rx_ring, NULL, NULL);
+
+ while (rx_slot != NULL) {
+ // Check for a waiting packet
+ lck_mtx_lock(&pcb->utun_input_chain_lock);
+ mbuf_t data = pcb->utun_input_chain;
+ if (data == NULL) {
+ lck_mtx_unlock(&pcb->utun_input_chain_lock);
+ break;
+ }
+
+ // Allocate rx packet
+ kern_packet_t rx_ph = 0;
+ errno_t error = kern_pbufpool_alloc_nosleep(rx_pp, 1, &rx_ph);
+ if (unlikely(error != 0)) {
+ STATS_INC(nifs, NETIF_STATS_NOMEM_PKT);
+ STATS_INC(nifs, NETIF_STATS_DROPPED);
+ printf("utun_netif_sync_rx %s: failed to allocate packet\n",
+ pcb->utun_ifp->if_xname);
+ lck_mtx_unlock(&pcb->utun_input_chain_lock);
+ break;
+ }
+
+ // Advance waiting packets
+ pcb->utun_input_chain = data->m_nextpkt;
+ data->m_nextpkt = NULL;
+ if (pcb->utun_input_chain == NULL) {
+ pcb->utun_input_chain_last = NULL;
+ }
+ lck_mtx_unlock(&pcb->utun_input_chain_lock);
+
+ size_t header_offset = UTUN_HEADER_SIZE(pcb);
+ size_t length = mbuf_pkthdr_len(data);
+
+ if (length < header_offset) {
+ // mbuf is too small
+ mbuf_freem(data);
+ kern_pbufpool_free(rx_pp, rx_ph);
+ STATS_INC(nifs, NETIF_STATS_BADLEN);
+ STATS_INC(nifs, NETIF_STATS_DROPPED);
+ printf("utun_netif_sync_rx %s: legacy packet length too short for header %zu < %zu\n",
+ pcb->utun_ifp->if_xname, length, header_offset);
+ continue;
+ }
+
+ length -= header_offset;
+ if (length > rx_pp->pp_buflet_size) {
+ // Flush data
+ mbuf_freem(data);
+ kern_pbufpool_free(rx_pp, rx_ph);
+ STATS_INC(nifs, NETIF_STATS_BADLEN);
+ STATS_INC(nifs, NETIF_STATS_DROPPED);
+ printf("utun_netif_sync_rx %s: legacy packet length %zu > %u\n",
+ pcb->utun_ifp->if_xname, length, rx_pp->pp_buflet_size);
+ continue;
+ }
+
+ mbuf_pkthdr_setrcvif(data, pcb->utun_ifp);
+
+ // Fillout rx packet
+ kern_buflet_t rx_buf = kern_packet_get_next_buflet(rx_ph, NULL);
+ VERIFY(rx_buf != NULL);
+ void *rx_baddr = kern_buflet_get_object_address(rx_buf);
+ VERIFY(rx_baddr != NULL);
+
+ // Copy-in data from mbuf to buflet
+ mbuf_copydata(data, header_offset, length, (void *)rx_baddr);
+ kern_packet_clear_flow_uuid(rx_ph); // Zero flow id
+
+ // Finalize and attach the packet
+ error = kern_buflet_set_data_offset(rx_buf, 0);
+ VERIFY(error == 0);
+ error = kern_buflet_set_data_length(rx_buf, length);
+ VERIFY(error == 0);
+ error = kern_packet_set_link_header_offset(rx_ph, 0);
+ VERIFY(error == 0);
+ error = kern_packet_set_network_header_offset(rx_ph, 0);
+ VERIFY(error == 0);
+ error = kern_packet_finalize(rx_ph);
+ VERIFY(error == 0);
+ error = kern_channel_slot_attach_packet(rx_ring, rx_slot, rx_ph);
+ VERIFY(error == 0);
+
+ STATS_INC(nifs, NETIF_STATS_RXPKTS);
+ STATS_INC(nifs, NETIF_STATS_RXCOPY_MBUF);
+ bpf_tap_packet_in(pcb->utun_ifp, DLT_RAW, rx_ph, NULL, 0);
+
+ rx_ring_stats.kcrsi_slots_transferred++;
+ rx_ring_stats.kcrsi_bytes_transferred += length;
+
+ mbuf_freem(data);
+
+ // Advance ring
+ rx_pslot = rx_slot;
+ rx_slot = kern_channel_get_next_slot(rx_ring, rx_slot, NULL);
+ }
+
+ struct kern_channel_ring_stat_increment tx_ring_stats;
+ bzero(&tx_ring_stats, sizeof(tx_ring_stats));
+ kern_channel_ring_t tx_ring = pcb->utun_kpipe_txring;
+ kern_channel_slot_t tx_pslot = NULL;
+ kern_channel_slot_t tx_slot = NULL;
+ if (tx_ring == NULL) {
+ // Net-If TX ring not set up yet, nothing to read
+ goto done;
+ }
+
+ // Unlock utun before entering ring
+ lck_rw_unlock_shared(&pcb->utun_pcb_lock);
+
+ (void)kr_enter(tx_ring, TRUE);
+
+ // Lock again after entering and validate
+ lck_rw_lock_shared(&pcb->utun_pcb_lock);
+ if (tx_ring != pcb->utun_kpipe_txring) {
+ goto done;
+ }
+
+ tx_slot = kern_channel_get_next_slot(tx_ring, NULL, NULL);
+ if (tx_slot == NULL) {
+ // Nothing to read, don't bother signalling
+ goto done;
+ }
+
+ while (rx_slot != NULL && tx_slot != NULL) {
+ // Allocate rx packet
+ kern_packet_t rx_ph = 0;
+ kern_packet_t tx_ph = kern_channel_slot_get_packet(tx_ring, tx_slot);
+
+ // Advance TX ring
+ tx_pslot = tx_slot;
+ tx_slot = kern_channel_get_next_slot(tx_ring, tx_slot, NULL);
+
+ /* Skip slot if packet is zero-length or marked as dropped (QUMF_DROPPED) */
+ if (tx_ph == 0) {
+ continue;
+ }
+
+ errno_t error = kern_pbufpool_alloc_nosleep(rx_pp, 1, &rx_ph);
+ if (unlikely(error != 0)) {
+ STATS_INC(nifs, NETIF_STATS_NOMEM_PKT);
+ STATS_INC(nifs, NETIF_STATS_DROPPED);
+ printf("utun_netif_sync_rx %s: failed to allocate packet\n",
+ pcb->utun_ifp->if_xname);
+ break;
+ }
+
+ kern_buflet_t tx_buf = kern_packet_get_next_buflet(tx_ph, NULL);
+ VERIFY(tx_buf != NULL);
+ uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf);
+ VERIFY(tx_baddr != 0);
+ tx_baddr += kern_buflet_get_data_offset(tx_buf);
+
+ // Check packet length
+ size_t header_offset = UTUN_HEADER_SIZE(pcb);
+ uint32_t tx_length = kern_packet_get_data_length(tx_ph);
+ if (tx_length < header_offset) {
+ // Packet is too small
+ kern_pbufpool_free(rx_pp, rx_ph);
+ STATS_INC(nifs, NETIF_STATS_BADLEN);
+ STATS_INC(nifs, NETIF_STATS_DROPPED);
+ printf("utun_netif_sync_rx %s: packet length too short for header %u < %zu\n",
+ pcb->utun_ifp->if_xname, tx_length, header_offset);
+ continue;
+ }
+
+ size_t length = MIN(tx_length - header_offset,
+ UTUN_IF_DEFAULT_SLOT_SIZE);
+
+ tx_ring_stats.kcrsi_slots_transferred++;
+ tx_ring_stats.kcrsi_bytes_transferred += length;
+
+ // Fillout rx packet
+ kern_buflet_t rx_buf = kern_packet_get_next_buflet(rx_ph, NULL);
+ VERIFY(rx_buf != NULL);
+ void *rx_baddr = kern_buflet_get_object_address(rx_buf);
+ VERIFY(rx_baddr != NULL);
+
+ // Copy-in data from tx to rx
+ memcpy((void *)rx_baddr, (void *)(tx_baddr + header_offset), length);
+ kern_packet_clear_flow_uuid(rx_ph); // Zero flow id
+
+ // Finalize and attach the packet
+ error = kern_buflet_set_data_offset(rx_buf, 0);
+ VERIFY(error == 0);
+ error = kern_buflet_set_data_length(rx_buf, length);
+ VERIFY(error == 0);
+ error = kern_packet_set_link_header_offset(rx_ph, 0);
+ VERIFY(error == 0);
+ error = kern_packet_set_network_header_offset(rx_ph, 0);
+ VERIFY(error == 0);
+ error = kern_packet_finalize(rx_ph);
+ VERIFY(error == 0);
+ error = kern_channel_slot_attach_packet(rx_ring, rx_slot, rx_ph);
+ VERIFY(error == 0);
+
+ STATS_INC(nifs, NETIF_STATS_RXPKTS);
+ STATS_INC(nifs, NETIF_STATS_RXCOPY_DIRECT);
+ bpf_tap_packet_in(pcb->utun_ifp, DLT_RAW, rx_ph, NULL, 0);
+
+ rx_ring_stats.kcrsi_slots_transferred++;
+ rx_ring_stats.kcrsi_bytes_transferred += length;
+
+ rx_pslot = rx_slot;
+ rx_slot = kern_channel_get_next_slot(rx_ring, rx_slot, NULL);
+ }
+
+done:
+ if (rx_pslot) {
+ kern_channel_advance_slot(rx_ring, rx_pslot);
+ kern_channel_increment_ring_net_stats(rx_ring, pcb->utun_ifp, &rx_ring_stats);
+ }
+
+ if (tx_pslot) {
+ kern_channel_advance_slot(tx_ring, tx_pslot);
+ kern_channel_increment_ring_net_stats(tx_ring, pcb->utun_ifp, &tx_ring_stats);
+ (void)kern_channel_reclaim(tx_ring);
+ }
+
+ // Unlock first, then exit ring
+ lck_rw_unlock_shared(&pcb->utun_pcb_lock);
+ if (tx_ring != NULL) {
+ if (tx_pslot != NULL) {
+ kern_channel_notify(tx_ring, 0);
+ }
+ kr_exit(tx_ring);
+ }
+
+ return 0;
+}
+
+static errno_t
+utun_nexus_ifattach(struct utun_pcb *pcb,
+ struct ifnet_init_eparams *init_params,
+ struct ifnet **ifp)
+{
+ errno_t err;
+ nexus_controller_t controller = kern_nexus_shared_controller();
+ struct kern_nexus_net_init net_init;
+
+ nexus_name_t provider_name;
+ snprintf((char *)provider_name, sizeof(provider_name),
+ "com.apple.netif.utun%d", pcb->utun_unit);
+
+ struct kern_nexus_provider_init prov_init = {
+ .nxpi_version = KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION,
+ .nxpi_flags = NXPIF_VIRTUAL_DEVICE,
+ .nxpi_pre_connect = utun_nexus_pre_connect,
+ .nxpi_connected = utun_nexus_connected,
+ .nxpi_pre_disconnect = utun_netif_pre_disconnect,
+ .nxpi_disconnected = utun_nexus_disconnected,
+ .nxpi_ring_init = utun_netif_ring_init,
+ .nxpi_ring_fini = utun_netif_ring_fini,
+ .nxpi_slot_init = NULL,
+ .nxpi_slot_fini = NULL,
+ .nxpi_sync_tx = utun_netif_sync_tx,
+ .nxpi_sync_rx = utun_netif_sync_rx,
+ .nxpi_tx_doorbell = utun_netif_tx_doorbell,
+ };
+
+ nexus_attr_t nxa = NULL;
+ err = kern_nexus_attr_create(&nxa);
+ if (err != 0) {
+ printf("%s: kern_nexus_attr_create failed: %d\n",
+ __func__, err);
+ goto failed;
+ }
+
+ uint64_t slot_buffer_size = UTUN_IF_DEFAULT_SLOT_SIZE;
+ err = kern_nexus_attr_set(nxa, NEXUS_ATTR_SLOT_BUF_SIZE, slot_buffer_size);
+ VERIFY(err == 0);
+
+ // Reset ring size for netif nexus to limit memory usage
+ uint64_t ring_size = if_utun_ring_size;
+ err = kern_nexus_attr_set(nxa, NEXUS_ATTR_TX_SLOTS, ring_size);
+ VERIFY(err == 0);
+ err = kern_nexus_attr_set(nxa, NEXUS_ATTR_RX_SLOTS, ring_size);
+ VERIFY(err == 0);
+
+ pcb->utun_netif_txring_size = ring_size;
+
+ err = kern_nexus_controller_register_provider(controller,
+ utun_nx_dom_prov,
+ provider_name,
+ &prov_init,
+ sizeof(prov_init),
+ nxa,
+ &pcb->utun_nx.if_provider);
+ if (err != 0) {
+ printf("%s register provider failed, error %d\n",
+ __func__, err);
+ goto failed;
+ }
+
+ bzero(&net_init, sizeof(net_init));
+ net_init.nxneti_version = KERN_NEXUS_NET_CURRENT_VERSION;
+ net_init.nxneti_flags = 0;
+ net_init.nxneti_eparams = init_params;
+ net_init.nxneti_lladdr = NULL;
+ net_init.nxneti_prepare = utun_netif_prepare;
+ err = kern_nexus_controller_alloc_net_provider_instance(controller,
+ pcb->utun_nx.if_provider,
+ pcb,
+ &pcb->utun_nx.if_instance,
+ &net_init,
+ ifp);
+ if (err != 0) {
+ printf("%s alloc_net_provider_instance failed, %d\n",
+ __func__, err);
+ kern_nexus_controller_deregister_provider(controller,
+ pcb->utun_nx.if_provider);
+ uuid_clear(pcb->utun_nx.if_provider);
+ goto failed;
+ }
+
+failed:
+ if (nxa) {
+ kern_nexus_attr_destroy(nxa);
+ }
+ return (err);
+}
+
+static void
+utun_detach_provider_and_instance(uuid_t provider, uuid_t instance)
+{
+ nexus_controller_t controller = kern_nexus_shared_controller();
+ errno_t err;
+
+ if (!uuid_is_null(instance)) {
+ err = kern_nexus_controller_free_provider_instance(controller,
+ instance);
+ if (err != 0) {
+ printf("%s free_provider_instance failed %d\n",
+ __func__, err);
+ }
+ uuid_clear(instance);
+ }
+ if (!uuid_is_null(provider)) {
+ err = kern_nexus_controller_deregister_provider(controller,
+ provider);
+ if (err != 0) {
+ printf("%s deregister_provider %d\n", __func__, err);
+ }
+ uuid_clear(provider);
+ }
+ return;
+}
+
+static void
+utun_nexus_detach(utun_nx_t nx)
+{
+ nexus_controller_t controller = kern_nexus_shared_controller();
+ errno_t err;
+
+ if (!uuid_is_null(nx->ms_host)) {
+ err = kern_nexus_ifdetach(controller,
+ nx->ms_instance,
+ nx->ms_host);
+ if (err != 0) {
+ printf("%s: kern_nexus_ifdetach ms host failed %d\n",
+ __func__, err);
+ }
+ }
+
+ if (!uuid_is_null(nx->ms_device)) {
+ err = kern_nexus_ifdetach(controller,
+ nx->ms_instance,
+ nx->ms_device);
+ if (err != 0) {
+ printf("%s: kern_nexus_ifdetach ms device failed %d\n",
+ __func__, err);
+ }
+ }
+
+ utun_detach_provider_and_instance(nx->if_provider,
+ nx->if_instance);
+ utun_detach_provider_and_instance(nx->ms_provider,
+ nx->ms_instance);
+
+ memset(nx, 0, sizeof(*nx));
+}
+
+static errno_t
+utun_create_fs_provider_and_instance(uint32_t subtype, const char *type_name,
+ const char *ifname,
+ uuid_t *provider, uuid_t *instance)
+{
+ nexus_attr_t attr = NULL;
+ nexus_controller_t controller = kern_nexus_shared_controller();
+ uuid_t dom_prov;
+ errno_t err;
+ struct kern_nexus_init init;
+ nexus_name_t provider_name;
+
+ err = kern_nexus_get_builtin_domain_provider(NEXUS_TYPE_FLOW_SWITCH,
+ &dom_prov);
+ if (err != 0) {
+ printf("%s can't get %s provider, error %d\n",
+ __func__, type_name, err);
+ goto failed;
+ }
+
+ err = kern_nexus_attr_create(&attr);
+ if (err != 0) {
+ printf("%s: kern_nexus_attr_create failed: %d\n",
+ __func__, err);
+ goto failed;
+ }
+
+ err = kern_nexus_attr_set(attr, NEXUS_ATTR_EXTENSIONS, subtype);
+ VERIFY(err == 0);
+
+ uint64_t slot_buffer_size = UTUN_IF_DEFAULT_SLOT_SIZE;
+ err = kern_nexus_attr_set(attr, NEXUS_ATTR_SLOT_BUF_SIZE, slot_buffer_size);
+ VERIFY(err == 0);
+
+ // Reset ring size for flowswitch nexus to limit memory usage. Larger RX than netif.
+ uint64_t tx_ring_size = if_utun_tx_fsw_ring_size;
+ err = kern_nexus_attr_set(attr, NEXUS_ATTR_TX_SLOTS, tx_ring_size);
+ VERIFY(err == 0);
+ uint64_t rx_ring_size = if_utun_rx_fsw_ring_size;
+ err = kern_nexus_attr_set(attr, NEXUS_ATTR_RX_SLOTS, rx_ring_size);
+ VERIFY(err == 0);
+
+ snprintf((char *)provider_name, sizeof(provider_name),
+ "com.apple.%s.%s", type_name, ifname);
+ err = kern_nexus_controller_register_provider(controller,
+ dom_prov,
+ provider_name,
+ NULL,
+ 0,
+ attr,
+ provider);
+ kern_nexus_attr_destroy(attr);
+ attr = NULL;
+ if (err != 0) {
+ printf("%s register %s provider failed, error %d\n",
+ __func__, type_name, err);
+ goto failed;
+ }
+ bzero(&init, sizeof (init));
+ init.nxi_version = KERN_NEXUS_CURRENT_VERSION;
+ err = kern_nexus_controller_alloc_provider_instance(controller,
+ *provider,
+ NULL,
+ instance, &init);
+ if (err != 0) {
+ printf("%s alloc_provider_instance %s failed, %d\n",
+ __func__, type_name, err);
+ kern_nexus_controller_deregister_provider(controller,
+ *provider);
+ uuid_clear(*provider);
+ }
+failed:
+ return (err);
+}
+
+static errno_t
+utun_multistack_attach(struct utun_pcb *pcb)
+{
+ nexus_controller_t controller = kern_nexus_shared_controller();
+ errno_t err = 0;
+ utun_nx_t nx = &pcb->utun_nx;
+
+ // Allocate multistack flowswitch
+ err = utun_create_fs_provider_and_instance(NEXUS_EXTENSION_FSW_TYPE_MULTISTACK,
+ "multistack",
+ pcb->utun_ifp->if_xname,
+ &nx->ms_provider,
+ &nx->ms_instance);
+ if (err != 0) {
+ printf("%s: failed to create bridge provider and instance\n",
+ __func__);
+ goto failed;
+ }
+
+ // Attach multistack to device port
+ err = kern_nexus_ifattach(controller, nx->ms_instance,
+ NULL, nx->if_instance,
+ FALSE, &nx->ms_device);
+ if (err != 0) {
+ printf("%s kern_nexus_ifattach ms device %d\n", __func__, err);
+ goto failed;
+ }
+
+ // Attach multistack to host port
+ err = kern_nexus_ifattach(controller, nx->ms_instance,
+ NULL, nx->if_instance,
+ TRUE, &nx->ms_host);
+ if (err != 0) {
+ printf("%s kern_nexus_ifattach ms host %d\n", __func__, err);
+ goto failed;
+ }
+
+ // Extract the agent UUID and save for later
+ struct kern_nexus *multistack_nx = nx_find(nx->ms_instance, false);
+ if (multistack_nx != NULL) {
+ struct nx_flowswitch *flowswitch = NX_FSW_PRIVATE(multistack_nx);
+ if (flowswitch != NULL) {
+ FSW_RLOCK(flowswitch);
+ struct fsw_ms_context *ms_context = (struct fsw_ms_context *)flowswitch->fsw_ops_private;
+ if (ms_context != NULL) {
+ uuid_copy(nx->ms_agent, ms_context->mc_agent_uuid);
+ } else {
+ printf("utun_multistack_attach - fsw_ms_context is NULL\n");
+ }
+ FSW_UNLOCK(flowswitch);
+ } else {
+ printf("utun_multistack_attach - flowswitch is NULL\n");
+ }
+ nx_release(multistack_nx);
+ } else {
+ printf("utun_multistack_attach - unable to find multistack nexus\n");
+ }
+
+ return (0);
+
+failed:
+ utun_nexus_detach(nx);
+
+ errno_t detach_error = 0;
+ if ((detach_error = ifnet_detach(pcb->utun_ifp)) != 0) {
+ panic("utun_multistack_attach - ifnet_detach failed: %d\n", detach_error);
+ /* NOT REACHED */
+ }
+
+ return (err);
+}
+
+static errno_t
+utun_register_kernel_pipe_nexus(void)
+{
+ nexus_attr_t nxa = NULL;
+ errno_t result;
+
+ lck_mtx_lock(&utun_lock);
+ if (utun_ncd_refcount++) {
+ lck_mtx_unlock(&utun_lock);
+ return 0;
+ }
+
+ result = kern_nexus_controller_create(&utun_ncd);
+ if (result) {
+ printf("%s: kern_nexus_controller_create failed: %d\n",
+ __FUNCTION__, result);
+ goto done;
+ }
+
+ uuid_t dom_prov;
+ result = kern_nexus_get_builtin_domain_provider(
+ NEXUS_TYPE_KERNEL_PIPE, &dom_prov);
+ if (result) {
+ printf("%s: kern_nexus_get_builtin_domain_provider failed: %d\n",
+ __FUNCTION__, result);
+ goto done;
+ }
+
+ struct kern_nexus_provider_init prov_init = {
+ .nxpi_version = KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION,
+ .nxpi_flags = NXPIF_VIRTUAL_DEVICE,
+ .nxpi_pre_connect = utun_nexus_pre_connect,
+ .nxpi_connected = utun_nexus_connected,
+ .nxpi_pre_disconnect = utun_nexus_pre_disconnect,
+ .nxpi_disconnected = utun_nexus_disconnected,
+ .nxpi_ring_init = utun_kpipe_ring_init,
+ .nxpi_ring_fini = utun_kpipe_ring_fini,
+ .nxpi_slot_init = NULL,
+ .nxpi_slot_fini = NULL,
+ .nxpi_sync_tx = utun_kpipe_sync_tx,
+ .nxpi_sync_rx = utun_kpipe_sync_rx,
+ .nxpi_tx_doorbell = NULL,
+ };
+
+ result = kern_nexus_attr_create(&nxa);
+ if (result) {
+ printf("%s: kern_nexus_attr_create failed: %d\n",
+ __FUNCTION__, result);
+ goto done;
+ }
+
+ uint64_t slot_buffer_size = UTUN_IF_DEFAULT_SLOT_SIZE;
+ result = kern_nexus_attr_set(nxa, NEXUS_ATTR_SLOT_BUF_SIZE, slot_buffer_size);
+ VERIFY(result == 0);
+
+ // Reset ring size for kernel pipe nexus to limit memory usage
+ uint64_t ring_size = if_utun_ring_size;
+ result = kern_nexus_attr_set(nxa, NEXUS_ATTR_TX_SLOTS, ring_size);
+ VERIFY(result == 0);
+ result = kern_nexus_attr_set(nxa, NEXUS_ATTR_RX_SLOTS, ring_size);
+ VERIFY(result == 0);
+
+ result = kern_nexus_controller_register_provider(utun_ncd,
+ dom_prov,
+ (const uint8_t *)"com.apple.nexus.utun.kpipe",
+ &prov_init,
+ sizeof(prov_init),
+ nxa,
+ &utun_kpipe_uuid);
+ if (result) {
+ printf("%s: kern_nexus_controller_register_provider failed: %d\n",
+ __FUNCTION__, result);
+ goto done;
+ }
+
+done:
+ if (nxa) {
+ kern_nexus_attr_destroy(nxa);
+ }
+
+ if (result) {
+ if (utun_ncd) {
+ kern_nexus_controller_destroy(utun_ncd);
+ utun_ncd = NULL;
+ }
+ utun_ncd_refcount = 0;
+ }
+
+ lck_mtx_unlock(&utun_lock);
+
+ return result;
+}
+
+static void
+utun_unregister_kernel_pipe_nexus(void)
+{
+ lck_mtx_lock(&utun_lock);
+
+ VERIFY(utun_ncd_refcount > 0);
+
+ if (--utun_ncd_refcount == 0) {
+ kern_nexus_controller_destroy(utun_ncd);
+ utun_ncd = NULL;
+ }
+
+ lck_mtx_unlock(&utun_lock);
+}
+
+// For use by socket option, not internally
+static errno_t
+utun_disable_channel(struct utun_pcb *pcb)
+{
+ errno_t result;
+ int enabled;
+ uuid_t uuid;
+
+ lck_rw_lock_exclusive(&pcb->utun_pcb_lock);
+
+ enabled = pcb->utun_kpipe_enabled;
+ uuid_copy(uuid, pcb->utun_kpipe_uuid);
+
+ VERIFY(uuid_is_null(pcb->utun_kpipe_uuid) == !enabled);
+
+ pcb->utun_kpipe_enabled = 0;
+ uuid_clear(pcb->utun_kpipe_uuid);
+
+ lck_rw_unlock_exclusive(&pcb->utun_pcb_lock);
+
+ if (enabled) {
+ result = kern_nexus_controller_free_provider_instance(utun_ncd, uuid);
+ } else {
+ result = ENXIO;
+ }
+
+ if (!result) {
+ utun_unregister_kernel_pipe_nexus();
+ }
+
+ return result;
+}
+
+static errno_t
+utun_enable_channel(struct utun_pcb *pcb, struct proc *proc)
+{
+ struct kern_nexus_init init;
+ errno_t result;
+
+ result = utun_register_kernel_pipe_nexus();
+ if (result) {
+ return result;
+ }
+
+ VERIFY(utun_ncd);
+
+ lck_rw_lock_exclusive(&pcb->utun_pcb_lock);
+
+ if (pcb->utun_kpipe_enabled) {
+ result = EEXIST; // return success instead?
+ goto done;
+ }
+
+ /*
+ * Make sure we can fit packets in the channel buffers and
+ * Allow an extra 4 bytes for the protocol number header in the channel
+ */
+ if (pcb->utun_ifp->if_mtu + UTUN_HEADER_SIZE(pcb) > UTUN_IF_DEFAULT_SLOT_SIZE) {
+ result = EOPNOTSUPP;
+ goto done;
+ }
+
+ VERIFY(uuid_is_null(pcb->utun_kpipe_uuid));
+ bzero(&init, sizeof (init));
+ init.nxi_version = KERN_NEXUS_CURRENT_VERSION;
+ result = kern_nexus_controller_alloc_provider_instance(utun_ncd,
+ utun_kpipe_uuid, pcb, &pcb->utun_kpipe_uuid, &init);
+ if (result) {
+ goto done;
+ }
+
+ nexus_port_t port = NEXUS_PORT_KERNEL_PIPE_CLIENT;
+ result = kern_nexus_controller_bind_provider_instance(utun_ncd,
+ pcb->utun_kpipe_uuid, &port,
+ proc_pid(proc), NULL, NULL, 0, NEXUS_BIND_PID);
+ if (result) {
+ kern_nexus_controller_free_provider_instance(utun_ncd,
+ pcb->utun_kpipe_uuid);
+ uuid_clear(pcb->utun_kpipe_uuid);
+ goto done;
+ }
+
+ pcb->utun_kpipe_enabled = 1;
+
+done:
+ lck_rw_unlock_exclusive(&pcb->utun_pcb_lock);
+
+ if (result) {
+ utun_unregister_kernel_pipe_nexus();
+ }
+
+ return result;
+}
+
+#endif // UTUN_NEXUS