X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/39236c6e673c41db228275375ab7fdb0f837b292..d9a64523371fa019c4575bb400cbbc3a50ac9903:/osfmk/ipc/ipc_port.c diff --git a/osfmk/ipc/ipc_port.c b/osfmk/ipc/ipc_port.c index f8f96739a..823abe3a7 100644 --- a/osfmk/ipc/ipc_port.c +++ b/osfmk/ipc/ipc_port.c @@ -74,11 +74,13 @@ #include #include -#include #include #include #include -#include +#include +#include +#include +#include #include #include #include @@ -88,13 +90,15 @@ #include #include #include +#include +#include +#include #include #include -decl_lck_mtx_data(, ipc_port_multiple_lock_data) -lck_mtx_ext_t ipc_port_multiple_lock_data_ext; +decl_lck_spin_data(, ipc_port_multiple_lock_data) ipc_port_timestamp_t ipc_port_timestamp_data; int ipc_portbt; @@ -202,10 +206,10 @@ ipc_port_request_alloc( if (port->ip_sprequests == 0) { port->ip_sprequests = 1; #if IMPORTANCE_INHERITANCE + /* TODO: Live importance support in send-possible */ if (port->ip_impdonation != 0 && port->ip_spimportant == 0 && (task_is_importance_donor(current_task()))) { - port->ip_spimportant = 1; *importantp = TRUE; } #endif /* IMPORTANCE_INHERTANCE */ @@ -348,20 +352,13 @@ ipc_port_request_grow( * (or armed with importance in that version). */ -#if IMPORTANCE_INHERITANCE boolean_t ipc_port_request_sparm( ipc_port_t port, __assert_only mach_port_name_t name, ipc_port_request_index_t index, - mach_msg_option_t option) -#else -boolean_t -ipc_port_request_sparm( - ipc_port_t port, - __assert_only mach_port_name_t name, - ipc_port_request_index_t index) -#endif /* IMPORTANCE_INHERITANCE */ + mach_msg_option_t option, + mach_msg_priority_t override) { if (index != IE_REQ_NONE) { ipc_port_request_t ipr, table; @@ -374,16 +371,22 @@ ipc_port_request_sparm( ipr = &table[index]; assert(ipr->ipr_name == name); + /* Is there a valid destination? */ if (IPR_SOR_SPREQ(ipr->ipr_soright)) { ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK); port->ip_sprequests = 1; + + if (option & MACH_SEND_OVERRIDE) { + /* apply override to message queue */ + ipc_mqueue_override_send(&port->ip_messages, override); + } + #if IMPORTANCE_INHERITANCE if (((option & MACH_SEND_NOIMPORTANCE) == 0) && (port->ip_impdonation != 0) && (port->ip_spimportant == 0) && (((option & MACH_SEND_IMPORTANCE) != 0) || (task_is_importance_donor(current_task())))) { - port->ip_spimportant = 1; return TRUE; } #else @@ -540,40 +543,61 @@ ipc_port_nsrequest( /* * Routine: ipc_port_clear_receiver * Purpose: - * Prepares a receive right for transmission/destruction. + * Prepares a receive right for transmission/destruction, + * optionally performs mqueue destruction (with port lock held) + * * Conditions: * The port is locked and active. + * Returns: + * If should_destroy is TRUE, then the return value indicates + * whether the caller needs to reap kmsg structures that should + * be destroyed (by calling ipc_kmsg_reap_delayed) + * + * If should_destroy is FALSE, this always returns FALSE */ -void +boolean_t ipc_port_clear_receiver( ipc_port_t port, - queue_t links) + boolean_t should_destroy) { - spl_t s; - - assert(ip_active(port)); + ipc_mqueue_t mqueue = &port->ip_messages; + boolean_t reap_messages = FALSE; /* - * pull ourselves from any sets. + * Pull ourselves out of any sets to which we belong. + * We hold the port locked, so even though this acquires and releases + * the mqueue lock, we know we won't be added to any other sets. */ - if (port->ip_pset_count != 0) { - ipc_pset_remove_from_all(port, links); - assert(port->ip_pset_count == 0); + if (port->ip_in_pset != 0) { + ipc_pset_remove_from_all(port); + assert(port->ip_in_pset == 0); } /* * Send anyone waiting on the port's queue directly away. * Also clear the mscount and seqno. */ - s = splsched(); - imq_lock(&port->ip_messages); - ipc_mqueue_changed(&port->ip_messages); - ipc_port_set_mscount(port, 0); - port->ip_messages.imq_seqno = 0; + imq_lock(mqueue); + ipc_mqueue_changed(mqueue); + port->ip_mscount = 0; + mqueue->imq_seqno = 0; port->ip_context = port->ip_guarded = port->ip_strict_guard = 0; + + if (should_destroy) { + /* + * Mark the mqueue invalid, preventing further send/receive + * operations from succeeding. It's important for this to be + * done under the same lock hold as the ipc_mqueue_changed + * call to avoid additional threads blocking on an mqueue + * that's being destroyed. + */ + reap_messages = ipc_mqueue_destroy_locked(mqueue); + } + imq_unlock(&port->ip_messages); - splx(s); + + return reap_messages; } /* @@ -602,7 +626,6 @@ ipc_port_init( port->ip_pdrequest = IP_NULL; port->ip_requests = IPR_NULL; - port->ip_pset_count = 0; port->ip_premsg = IKM_NULL; port->ip_context = 0; @@ -610,15 +633,20 @@ ipc_port_init( port->ip_spimportant = 0; port->ip_impdonation = 0; port->ip_tempowner = 0; - port->ip_taskptr = 0; port->ip_guarded = 0; port->ip_strict_guard = 0; port->ip_impcount = 0; - port->ip_reserved = 0; + port->ip_specialreply = 0; + port->ip_sync_link_state = PORT_SYNC_LINK_ANY; - ipc_mqueue_init(&port->ip_messages, FALSE /* set */); + reset_ip_srp_bits(port); + + port->ip_send_turnstile = TURNSTILE_NULL; + + ipc_mqueue_init(&port->ip_messages, + FALSE /* !set */); } /* @@ -666,14 +694,6 @@ ipc_port_alloc( /* unlock space after init */ is_write_unlock(space); -#if CONFIG_MACF_MACH - task_t issuer = current_task(); - tasklabel_lock2 (issuer, space->is_task); - mac_port_label_associate(&issuer->maclabel, &space->is_task->maclabel, - &port->ip_label); - tasklabel_unlock2 (issuer, space->is_task); -#endif - *namep = name; *portp = port; @@ -722,14 +742,6 @@ ipc_port_alloc_name( ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX); #endif /* MACH_ASSERT */ -#if CONFIG_MACF_MACH - task_t issuer = current_task(); - tasklabel_lock2 (issuer, space->is_task); - mac_port_label_associate(&issuer->maclabel, &space->is_task->maclabel, - &port->ip_label); - tasklabel_unlock2 (issuer, space->is_task); -#endif - *portp = port; return KERN_SUCCESS; @@ -748,9 +760,6 @@ ipc_port_spnotify( { ipc_port_request_index_t index = 0; ipc_table_elems_t size = 0; -#if IMPORTANCE_INHERITANCE - boolean_t dropassert = FALSE; -#endif /* IMPORTANCE_INHERITANCE */ /* * If the port has no send-possible request @@ -764,14 +773,15 @@ ipc_port_spnotify( #if IMPORTANCE_INHERITANCE if (port->ip_spimportant != 0) { port->ip_spimportant = 0; - port->ip_impcount--; - dropassert = TRUE; + if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) { + ip_lock(port); + } } #endif /* IMPORTANCE_INHERITANCE */ if (port->ip_sprequests == 0) { ip_unlock(port); - goto out; + return; } port->ip_sprequests = 0; @@ -810,13 +820,6 @@ revalidate: } } ip_unlock(port); -out: -#if IMPORTANCE_INHERITANCE - if ((dropassert == TRUE) && (current_task()->imp_receiver != 0)) { - /* drop internal assertion and no task lock held */ - task_importance_drop_internal_assertion(current_task(), 1); - } -#endif /* IMPORTANCE_INHERITANCE */ return; } @@ -869,15 +872,15 @@ ipc_port_dnnotify( */ void -ipc_port_destroy( - ipc_port_t port) +ipc_port_destroy(ipc_port_t port) { ipc_port_t pdrequest, nsrequest; ipc_mqueue_t mqueue; ipc_kmsg_t kmsg; + boolean_t special_reply = port->ip_specialreply; #if IMPORTANCE_INHERITANCE - task_t release_imp_task = TASK_NULL; + ipc_importance_task_t release_imp_task = IIT_NULL; thread_t self = current_thread(); boolean_t top = (self->ith_assertions == 0); natural_t assertcnt = 0; @@ -886,25 +889,21 @@ ipc_port_destroy( assert(ip_active(port)); /* port->ip_receiver_name is garbage */ /* port->ip_receiver/port->ip_destination is garbage */ - assert(port->ip_pset_count == 0); - assert(port->ip_mscount == 0); /* check for a backup port */ pdrequest = port->ip_pdrequest; #if IMPORTANCE_INHERITANCE - /* determine how may assertions to drop and from whom */ + /* determine how many assertions to drop and from whom */ if (port->ip_tempowner != 0) { assert(top); - if (port->ip_taskptr != 0) { - release_imp_task = port->ip_imp_task; - port->ip_imp_task = TASK_NULL; - port->ip_taskptr = 0; + release_imp_task = port->ip_imp_task; + if (IIT_NULL != release_imp_task) { + port->ip_imp_task = IIT_NULL; assertcnt = port->ip_impcount; } /* Otherwise, nothing to drop */ } else { - assert(port->ip_taskptr == 0); assertcnt = port->ip_impcount; if (pdrequest != IP_NULL) /* mark in limbo for the journey */ @@ -916,30 +915,60 @@ ipc_port_destroy( #endif /* IMPORTANCE_INHERITANCE */ if (pdrequest != IP_NULL) { + /* clear receiver, don't destroy the port */ + (void)ipc_port_clear_receiver(port, FALSE); + assert(port->ip_in_pset == 0); + assert(port->ip_mscount == 0); + /* we assume the ref for pdrequest */ port->ip_pdrequest = IP_NULL; /* make port be in limbo */ + imq_lock(&port->ip_messages); port->ip_receiver_name = MACH_PORT_NULL; port->ip_destination = IP_NULL; + imq_unlock(&port->ip_messages); ip_unlock(port); + if (special_reply) { + ipc_port_adjust_special_reply_port(port, + IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); + } /* consumes our refs for port and pdrequest */ ipc_notify_port_destroyed(pdrequest, port); goto drop_assertions; } - /* once port is dead, we don't need to keep it locked */ - + /* port active bit needs to be guarded under mqueue lock for turnstiles */ + imq_lock(&port->ip_messages); port->ip_object.io_bits &= ~IO_BITS_ACTIVE; port->ip_timestamp = ipc_port_timestamp(); + imq_unlock(&port->ip_messages); + nsrequest = port->ip_nsrequest; + + /* + * The mach_msg_* paths don't hold a port lock, they only hold a + * reference to the port object. If a thread raced us and is now + * blocked waiting for message reception on this mqueue (or waiting + * for ipc_mqueue_full), it will never be woken up. We call + * ipc_port_clear_receiver() here, _after_ the port has been marked + * inactive, to wakeup any threads which may be blocked and ensure + * that no other thread can get lost waiting for a wake up on a + * port/mqueue that's been destroyed. + */ + boolean_t reap_msgs = FALSE; + reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks mqueue inactive */ + assert(port->ip_in_pset == 0); + assert(port->ip_mscount == 0); /* * If the port has a preallocated message buffer and that buffer * is not inuse, free it. If it has an inuse one, then the kmsg * free will detect that we freed the association and it can free it * like a normal buffer. + * + * Once the port is marked inactive we don't need to keep it locked. */ if (IP_PREALLOC(port)) { ipc_port_t inuse_port; @@ -947,7 +976,7 @@ ipc_port_destroy( kmsg = port->ip_premsg; assert(kmsg != IKM_NULL); inuse_port = ikm_prealloc_inuse_port(kmsg); - IP_CLEAR_PREALLOC(port, kmsg); + ipc_kmsg_clear_prealloc(kmsg, port); ip_unlock(port); if (inuse_port != IP_NULL) { assert(inuse_port == port); @@ -958,14 +987,27 @@ ipc_port_destroy( ip_unlock(port); } + /* unlink the kmsg from special reply port */ + if (special_reply) { + ipc_port_adjust_special_reply_port(port, + IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); + } + /* throw away no-senders request */ - nsrequest = port->ip_nsrequest; if (nsrequest != IP_NULL) ipc_notify_send_once(nsrequest); /* consumes ref */ - /* destroy any queued messages */ + /* + * Reap any kmsg objects waiting to be destroyed. + * This must be done after we've released the port lock. + */ + if (reap_msgs) + ipc_kmsg_reap_delayed(); + mqueue = &port->ip_messages; - ipc_mqueue_destroy(mqueue); + + /* cleanup waitq related resources */ + ipc_mqueue_deinit(mqueue); /* generate dead-name notifications */ ipc_port_dnnotify(port); @@ -976,25 +1018,22 @@ ipc_port_destroy( drop_assertions: #if IMPORTANCE_INHERITANCE - if (release_imp_task != TASK_NULL) { + if (release_imp_task != IIT_NULL) { if (assertcnt > 0) { assert(top); self->ith_assertions = 0; - assert(release_imp_task->imp_receiver != 0); - task_importance_drop_internal_assertion(release_imp_task, assertcnt); + assert(ipc_importance_task_is_any_receiver_type(release_imp_task)); + ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt); } - task_deallocate(release_imp_task); + ipc_importance_task_release(release_imp_task); } else if (assertcnt > 0) { if (top) { self->ith_assertions = 0; - release_imp_task = current_task(); - if (release_imp_task->imp_receiver != 0) { - task_importance_drop_internal_assertion(release_imp_task, assertcnt); + release_imp_task = current_task()->task_imp_base; + if (ipc_importance_task_is_any_receiver_type(release_imp_task)) { + ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt); } - } else { - /* the port chain we are enqueued on should cover our assertions */ - assert(assertcnt <= self->ith_assertions); } } #endif /* IMPORTANCE_INHERITANCE */ @@ -1013,11 +1052,6 @@ ipc_port_destroy( * but guaranteeing that this doesn't create a circle * port->ip_destination->ip_destination->... == port * - * Additionally, if port was successfully changed to "in transit", - * propagate boost assertions from the "in limbo" port to all - * the ports in the chain, and, if the destination task accepts - * boosts, to the destination task. - * * Conditions: * No ports locked. References held for "port" and "dest". */ @@ -1027,13 +1061,11 @@ ipc_port_check_circularity( ipc_port_t port, ipc_port_t dest) { - ipc_port_t base; - #if IMPORTANCE_INHERITANCE - task_t task = TASK_NULL; - task_t release_task = TASK_NULL; - int assertcnt = 0; -#endif /* IMPORTANCE_INHERITANCE */ + /* adjust importance counts at the same time */ + return ipc_importance_check_circularity(port, dest); +#else + ipc_port_t base; assert(port != IP_NULL); assert(dest != IP_NULL); @@ -1042,11 +1074,13 @@ ipc_port_check_circularity( return TRUE; base = dest; + /* Check if destination needs a turnstile */ + ipc_port_send_turnstile_prepare(dest); + /* * First try a quick check that can run in parallel. * No circularity if dest is not in transit. */ - ip_lock(port); if (ip_lock_try(dest)) { if (!ip_active(dest) || @@ -1091,19 +1125,21 @@ ipc_port_check_circularity( assert(port->ip_receiver_name == MACH_PORT_NULL); assert(port->ip_destination == IP_NULL); - while (dest != IP_NULL) { + base = dest; + while (base != IP_NULL) { ipc_port_t next; /* dest is in transit or in limbo */ - assert(ip_active(dest)); - assert(dest->ip_receiver_name == MACH_PORT_NULL); + assert(ip_active(base)); + assert(base->ip_receiver_name == MACH_PORT_NULL); - next = dest->ip_destination; - ip_unlock(dest); - dest = next; + next = base->ip_destination; + ip_unlock(base); + base = next; } + ipc_port_send_turnstile_complete(dest); return TRUE; } @@ -1116,7 +1152,8 @@ ipc_port_check_circularity( ip_lock(port); ipc_port_multiple_unlock(); - not_circular: +not_circular: + imq_lock(&port->ip_messages); /* port is in limbo */ @@ -1127,37 +1164,26 @@ ipc_port_check_circularity( ip_reference(dest); port->ip_destination = dest; -#if IMPORTANCE_INHERITANCE - /* must have been in limbo or still bound to a task */ - assert(port->ip_tempowner != 0); + /* Setup linkage for source port if it has sync ipc push */ + struct turnstile *send_turnstile = TURNSTILE_NULL; + if (port_send_turnstile(port)) { + send_turnstile = turnstile_prepare((uintptr_t)port, + port_send_turnstile_address(port), + TURNSTILE_NULL, TURNSTILE_SYNC_IPC); - if (port->ip_taskptr != 0) { - /* - * We delayed dropping assertions from a specific task. - * Cache that info now (we'll drop assertions and the - * task reference below). - */ - release_task = port->ip_imp_task; - port->ip_imp_task = TASK_NULL; - port->ip_taskptr = 0; - } - assertcnt = port->ip_impcount; - - /* take the port out of limbo w.r.t. assertions */ - port->ip_tempowner = 0; + turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest), + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); -#endif /* IMPORTANCE_INHERITANCE */ + /* update complete and turnstile complete called after dropping all locks */ + } + imq_unlock(&port->ip_messages); /* now unlock chain */ ip_unlock(port); for (;;) { - -#if IMPORTANCE_INHERITANCE - /* every port along chain track assertions behind it */ - dest->ip_impcount += assertcnt; -#endif /* IMPORTANCE_INHERITANCE */ + ipc_port_t next; if (dest == base) break; @@ -1168,13 +1194,9 @@ ipc_port_check_circularity( assert(dest->ip_receiver_name == MACH_PORT_NULL); assert(dest->ip_destination != IP_NULL); -#if IMPORTANCE_INHERITANCE - assert(dest->ip_tempowner == 0); -#endif /* IMPORTANCE_INHERITANCE */ - - port = dest->ip_destination; + next = dest->ip_destination; ip_unlock(dest); - dest = port; + dest = next; } /* base is not in transit */ @@ -1182,66 +1204,567 @@ ipc_port_check_circularity( (base->ip_receiver_name != MACH_PORT_NULL) || (base->ip_destination == IP_NULL)); -#if IMPORTANCE_INHERITANCE - /* - * Find the task to boost (if any). - * We will boost "through" ports that don't know - * about inheritance to deliver receive rights that - * do. - */ - if (ip_active(base) && (assertcnt > 0)) { - if (base->ip_tempowner != 0) { - if (base->ip_taskptr != 0) - /* specified tempowner task */ - task = base->ip_imp_task; - /* otherwise don't boost current task */ + ip_unlock(base); - } else if (base->ip_receiver_name != MACH_PORT_NULL) { - ipc_space_t space = base->ip_receiver; + /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */ + if (send_turnstile) { + turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD); - /* only spaces with boost-accepting tasks */ - if (space->is_task != TASK_NULL && - space->is_task->imp_receiver != 0) - task = space->is_task; + /* Take the mq lock to call turnstile complete */ + imq_lock(&port->ip_messages); + turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL); + send_turnstile = TURNSTILE_NULL; + imq_unlock(&port->ip_messages); + turnstile_cleanup(); + } + + return FALSE; +#endif /* !IMPORTANCE_INHERITANCE */ +} + +struct turnstile * +ipc_port_get_inheritor(ipc_port_t port) +{ + ipc_mqueue_t mqueue = &port->ip_messages; + struct knote *kn; + + assert(imq_held(mqueue)); + + if (!IMQ_KLIST_VALID(mqueue)) { + return IMQ_INHERITOR(mqueue); + } + + SLIST_FOREACH(kn, &port->ip_messages.imq_klist, kn_selnext) { + if ((kn->kn_sfflags & MACH_RCV_MSG) && (kn->kn_status & KN_DISPATCH)) { + return filt_machport_kqueue_turnstile(kn); + } + } + + return TURNSTILE_NULL; +} + +/* + * Routine: ipc_port_send_turnstile_prepare + * Purpose: + * Get a reference on port's send turnstile, if + * port does not have a send turnstile then allocate one. + * + * Conditions: + * Nothing is locked. + */ +void +ipc_port_send_turnstile_prepare(ipc_port_t port) +{ + struct turnstile *turnstile = TURNSTILE_NULL; + struct turnstile *inheritor = TURNSTILE_NULL; + struct turnstile *send_turnstile = TURNSTILE_NULL; + +retry_alloc: + imq_lock(&port->ip_messages); + + if (port_send_turnstile(port) == NULL || + port_send_turnstile(port)->ts_port_ref == 0) { + + if (turnstile == TURNSTILE_NULL) { + imq_unlock(&port->ip_messages); + turnstile = turnstile_alloc(); + goto retry_alloc; } - /* take reference before unlocking base */ - if (task != TASK_NULL) { - assert(task->imp_receiver != 0); - task_reference(task); + send_turnstile = turnstile_prepare((uintptr_t)port, + port_send_turnstile_address(port), + turnstile, TURNSTILE_SYNC_IPC); + turnstile = TURNSTILE_NULL; + + /* + * if port in transit, setup linkage for its turnstile, + * otherwise the link it to WL turnstile. + */ + if (ip_active(port) && + port->ip_receiver_name == MACH_PORT_NULL && + port->ip_destination != IP_NULL) { + assert(port->ip_receiver_name == MACH_PORT_NULL); + assert(port->ip_destination != IP_NULL); + + inheritor = port_send_turnstile(port->ip_destination); + } else { + inheritor = ipc_port_get_inheritor(port); } + turnstile_update_inheritor(send_turnstile, inheritor, + TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE); + /* turnstile complete will be called in ipc_port_send_turnstile_complete */ } -#endif /* IMPORTANCE_INHERITANCE */ - ip_unlock(base); + /* Increment turnstile counter */ + port_send_turnstile(port)->ts_port_ref++; + imq_unlock(&port->ip_messages); -#if IMPORTANCE_INHERITANCE - /* - * Transfer assertions now that the ports are unlocked. - * Avoid extra overhead if transferring to/from the same task. - */ - boolean_t transfer_assertions = (task != release_task) ? TRUE : FALSE; + if (send_turnstile) { + turnstile_update_inheritor_complete(send_turnstile, + TURNSTILE_INTERLOCK_NOT_HELD); + } + if (turnstile != TURNSTILE_NULL) { + turnstile_deallocate(turnstile); + } +} - if (task != TASK_NULL) { - if (transfer_assertions) - task_importance_hold_internal_assertion(task, assertcnt); - task_deallocate(task); - task = TASK_NULL; + +/* + * Routine: ipc_port_send_turnstile_complete + * Purpose: + * Drop a ref on the port's send turnstile, if the + * ref becomes zero, deallocate the turnstile. + * + * Conditions: + * The space might be locked, use safe deallocate. + */ +void +ipc_port_send_turnstile_complete(ipc_port_t port) +{ + struct turnstile *turnstile = TURNSTILE_NULL; + + /* Drop turnstile count on dest port */ + imq_lock(&port->ip_messages); + + port_send_turnstile(port)->ts_port_ref--; + if (port_send_turnstile(port)->ts_port_ref == 0) { + turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), + &turnstile); + assert(turnstile != TURNSTILE_NULL); } + imq_unlock(&port->ip_messages); + turnstile_cleanup(); - if (release_task != TASK_NULL) { - if (transfer_assertions) - task_importance_drop_internal_assertion(release_task, assertcnt); - task_deallocate(release_task); - release_task = TASK_NULL; + if (turnstile != TURNSTILE_NULL) { + turnstile_deallocate_safe(turnstile); + turnstile = TURNSTILE_NULL; } -#endif /* IMPORTANCE_INHERITANCE */ +} - return FALSE; + +/* + * Routine: ipc_port_rcv_turnstile_waitq + * Purpose: + * Given the mqueue's waitq, find the port's + * rcv turnstile and return its waitq. + * + * Conditions: + * mqueue locked or thread waiting on turnstile is locked. + */ +struct waitq * +ipc_port_rcv_turnstile_waitq(struct waitq *waitq) +{ + struct waitq *safeq; + + ipc_mqueue_t mqueue = imq_from_waitq(waitq); + ipc_port_t port = ip_from_mq(mqueue); + struct turnstile *rcv_turnstile = ipc_port_rcv_turnstile(port); + + /* Check if the port has a rcv turnstile */ + if (rcv_turnstile != TURNSTILE_NULL) { + safeq = &rcv_turnstile->ts_waitq; + } else { + safeq = global_eventq(waitq); + } + return safeq; } + /* - * Routine: ipc_port_importance_delta + * Routine: ipc_port_rcv_turnstile + * Purpose: + * Get the port's receive turnstile + * + * Conditions: + * mqueue locked or thread waiting on turnstile is locked. + */ +struct turnstile * +ipc_port_rcv_turnstile(ipc_port_t port) +{ + return turnstile_lookup_by_proprietor((uintptr_t)port); +} + + +/* + * Routine: ipc_port_link_special_reply_port + * Purpose: + * Link the special reply port with the destination port. + * Allocates turnstile to dest port. + * + * Conditions: + * Nothing is locked. + */ +void +ipc_port_link_special_reply_port( + ipc_port_t special_reply_port, + ipc_port_t dest_port) +{ + boolean_t drop_turnstile_ref = FALSE; + + /* Check if dest_port needs a turnstile */ + ipc_port_send_turnstile_prepare(dest_port); + + /* Lock the special reply port and establish the linkage */ + ip_lock(special_reply_port); + imq_lock(&special_reply_port->ip_messages); + + /* Check if we need to drop the acquired turnstile ref on dest port */ + if (!special_reply_port->ip_specialreply || + special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY || + special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) { + drop_turnstile_ref = TRUE; + } else { + /* take a reference on dest_port */ + ip_reference(dest_port); + special_reply_port->ip_sync_inheritor_port = dest_port; + special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT; + } + + imq_unlock(&special_reply_port->ip_messages); + ip_unlock(special_reply_port); + + if (drop_turnstile_ref) { + ipc_port_send_turnstile_complete(dest_port); + } + + return; +} + +#if DEVELOPMENT || DEBUG +inline void +reset_ip_srp_bits(ipc_port_t special_reply_port) +{ + special_reply_port->ip_srp_lost_link = 0; + special_reply_port->ip_srp_msg_sent = 0; +} + +inline void +reset_ip_srp_msg_sent(ipc_port_t special_reply_port) +{ + if (special_reply_port->ip_specialreply == 1) { + special_reply_port->ip_srp_msg_sent = 0; + } +} + +inline void +set_ip_srp_msg_sent(ipc_port_t special_reply_port) +{ + if (special_reply_port->ip_specialreply == 1) { + special_reply_port->ip_srp_msg_sent = 1; + } +} + +inline void +set_ip_srp_lost_link(ipc_port_t special_reply_port) +{ + if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) { + special_reply_port->ip_srp_lost_link = 1; + } +} + +#else /* DEVELOPMENT || DEBUG */ +inline void +reset_ip_srp_bits(__unused ipc_port_t special_reply_port) +{ + return; +} + +inline void +reset_ip_srp_msg_sent(__unused ipc_port_t special_reply_port) +{ + return; +} + +inline void +set_ip_srp_msg_sent(__unused ipc_port_t special_reply_port) +{ + return; +} + +inline void +set_ip_srp_lost_link(__unused ipc_port_t special_reply_port) +{ + return; +} +#endif /* DEVELOPMENT || DEBUG */ + +/* + * Routine: ipc_port_adjust_special_reply_port_locked + * Purpose: + * If the special port has a turnstile, update it's inheritor. + * Condition: + * Special reply port locked on entry. + * Special reply port unlocked on return. + * Returns: + * None. + */ +void +ipc_port_adjust_special_reply_port_locked( + ipc_port_t special_reply_port, + struct knote *kn, + uint8_t flags, + boolean_t get_turnstile) +{ + ipc_port_t dest_port = IPC_PORT_NULL; + int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE; + turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; + struct turnstile *dest_ts = TURNSTILE_NULL, *ts = TURNSTILE_NULL; + + imq_lock(&special_reply_port->ip_messages); + + if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) { + reset_ip_srp_msg_sent(special_reply_port); + } + + /* Check if the special reply port is marked non-special */ + if (special_reply_port->ip_specialreply == 0 || + special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) { + if (get_turnstile) { + turnstile_complete((uintptr_t)special_reply_port, + port_rcv_turnstile_address(special_reply_port), + NULL); + } + imq_unlock(&special_reply_port->ip_messages); + ip_unlock(special_reply_port); + if (get_turnstile) { + turnstile_cleanup(); + } + return; + } + + /* Clear thread's special reply port and clear linkage */ + if (flags & IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY) { + /* This option should only be specified by a non blocking thread */ + assert(get_turnstile == FALSE); + special_reply_port->ip_specialreply = 0; + + reset_ip_srp_bits(special_reply_port); + + /* Check if need to break linkage */ + if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) { + imq_unlock(&special_reply_port->ip_messages); + ip_unlock(special_reply_port); + return; + } + } else if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) { + if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY || + special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT) { + if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) { + inheritor = filt_machport_stash_port(kn, special_reply_port, + &sync_link_state); + } + } + } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) { + sync_link_state = PORT_SYNC_LINK_ANY; + } + + switch (special_reply_port->ip_sync_link_state) { + case PORT_SYNC_LINK_PORT: + dest_port = special_reply_port->ip_sync_inheritor_port; + special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL; + break; + case PORT_SYNC_LINK_WORKLOOP_KNOTE: + special_reply_port->ip_sync_inheritor_knote = NULL; + break; + case PORT_SYNC_LINK_WORKLOOP_STASH: + dest_ts = special_reply_port->ip_sync_inheritor_ts; + special_reply_port->ip_sync_inheritor_ts = NULL; + break; + } + + special_reply_port->ip_sync_link_state = sync_link_state; + + switch (sync_link_state) { + case PORT_SYNC_LINK_WORKLOOP_KNOTE: + special_reply_port->ip_sync_inheritor_knote = kn; + break; + case PORT_SYNC_LINK_WORKLOOP_STASH: + turnstile_reference(inheritor); + special_reply_port->ip_sync_inheritor_ts = inheritor; + break; + case PORT_SYNC_LINK_NO_LINKAGE: + if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) { + set_ip_srp_lost_link(special_reply_port); + } + break; + } + + /* Get thread's turnstile donated to special reply port */ + if (get_turnstile) { + turnstile_complete((uintptr_t)special_reply_port, + port_rcv_turnstile_address(special_reply_port), + NULL); + } else { + ts = ipc_port_rcv_turnstile(special_reply_port); + if (ts) { + turnstile_reference(ts); + turnstile_update_inheritor(ts, inheritor, + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); + } + } + + imq_unlock(&special_reply_port->ip_messages); + ip_unlock(special_reply_port); + + if (get_turnstile) { + turnstile_cleanup(); + } else if (ts) { + /* Call turnstile cleanup after dropping the interlock */ + turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); + turnstile_deallocate_safe(ts); + } + + /* Release the ref on the dest port and it's turnstile */ + if (dest_port) { + ipc_port_send_turnstile_complete(dest_port); + /* release the reference on the dest port */ + ip_release(dest_port); + } + + if (dest_ts) { + turnstile_deallocate_safe(dest_ts); + } +} + +/* + * Routine: ipc_port_adjust_special_reply_port + * Purpose: + * If the special port has a turnstile, update it's inheritor. + * Condition: + * Nothing locked. + * Returns: + * None. + */ +void +ipc_port_adjust_special_reply_port( + ipc_port_t special_reply_port, + uint8_t flags, + boolean_t get_turnstile) +{ + ip_lock(special_reply_port); + ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL, flags, get_turnstile); + /* special_reply_port unlocked */ +} + +/* + * Routine: ipc_port_get_special_reply_port_inheritor + * Purpose: + * Returns the current inheritor of the special reply port + * Condition: + * mqueue is locked, port is a special reply port + * Returns: + * the current inheritor + */ +turnstile_inheritor_t +ipc_port_get_special_reply_port_inheritor( + ipc_port_t port) +{ + assert(port->ip_specialreply); + imq_held(&port->ip_messages); + + switch (port->ip_sync_link_state) { + case PORT_SYNC_LINK_PORT: + if (port->ip_sync_inheritor_port != NULL) { + return port_send_turnstile(port->ip_sync_inheritor_port); + } + break; + case PORT_SYNC_LINK_WORKLOOP_KNOTE: + return filt_machport_stashed_special_reply_port_turnstile(port); + case PORT_SYNC_LINK_WORKLOOP_STASH: + return port->ip_sync_inheritor_ts; + } + return TURNSTILE_INHERITOR_NULL; +} + +/* + * Routine: ipc_port_impcount_delta + * Purpose: + * Adjust only the importance count associated with a port. + * If there are any adjustments to be made to receiver task, + * those are handled elsewhere. + * + * For now, be defensive during deductions to make sure the + * impcount for the port doesn't underflow zero. This will + * go away when the port boost addition is made atomic (see + * note in ipc_port_importance_delta()). + * Conditions: + * The port is referenced and locked. + * Nothing else is locked. + */ +mach_port_delta_t +ipc_port_impcount_delta( + ipc_port_t port, + mach_port_delta_t delta, + ipc_port_t __unused base) +{ + mach_port_delta_t absdelta; + + if (!ip_active(port)) { + return 0; + } + + /* adding/doing nothing is easy */ + if (delta >= 0) { + port->ip_impcount += delta; + return delta; + } + + absdelta = 0 - delta; + if (port->ip_impcount >= absdelta) { + port->ip_impcount -= absdelta; + return delta; + } + +#if (DEVELOPMENT || DEBUG) + if (port->ip_receiver_name != MACH_PORT_NULL) { + task_t target_task = port->ip_receiver->is_task; + ipc_importance_task_t target_imp = target_task->task_imp_base; + const char *target_procname; + int target_pid; + + if (target_imp != IIT_NULL) { + target_procname = target_imp->iit_procname; + target_pid = target_imp->iit_bsd_pid; + } else { + target_procname = "unknown"; + target_pid = -1; + } + printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), " + "dropping %d assertion(s) but port only has %d remaining.\n", + port->ip_receiver_name, + target_pid, target_procname, + absdelta, port->ip_impcount); + + } else if (base != IP_NULL) { + task_t target_task = base->ip_receiver->is_task; + ipc_importance_task_t target_imp = target_task->task_imp_base; + const char *target_procname; + int target_pid; + + if (target_imp != IIT_NULL) { + target_procname = target_imp->iit_procname; + target_pid = target_imp->iit_bsd_pid; + } else { + target_procname = "unknown"; + target_pid = -1; + } + printf("Over-release of importance assertions for port 0x%lx " + "enqueued on port 0x%x with receiver pid %d (%s), " + "dropping %d assertion(s) but port only has %d remaining.\n", + (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port), + base->ip_receiver_name, + target_pid, target_procname, + absdelta, port->ip_impcount); + } +#endif + + delta = 0 - port->ip_impcount; + port->ip_impcount = 0; + return delta; +} + +/* + * Routine: ipc_port_importance_delta_internal * Purpose: * Adjust the importance count through the given port. * If the port is in transit, apply the delta throughout @@ -1250,6 +1773,7 @@ ipc_port_check_circularity( * and if so, apply the delta. * Conditions: * The port is referenced and locked on entry. + * Importance may be locked. * Nothing else is locked. * The lock may be dropped on exit. * Returns TRUE if lock was dropped. @@ -1257,17 +1781,22 @@ ipc_port_check_circularity( #if IMPORTANCE_INHERITANCE boolean_t -ipc_port_importance_delta( +ipc_port_importance_delta_internal( ipc_port_t port, - mach_port_delta_t delta) + natural_t options, + mach_port_delta_t *deltap, + ipc_importance_task_t *imp_task) { ipc_port_t next, base; - task_t task = TASK_NULL; boolean_t dropped = FALSE; - if (delta == 0) + *imp_task = IIT_NULL; + + if (*deltap == 0) return FALSE; + assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE); + base = port; /* if port is in transit, have to search for end of chain */ @@ -1291,12 +1820,32 @@ ipc_port_importance_delta( ipc_port_multiple_unlock(); } - /* unlock down to the base, adding a boost at each level */ + /* + * If the port lock is dropped b/c the port is in transit, there is a + * race window where another thread can drain messages and/or fire a + * send possible notification before we get here. + * + * We solve this race by checking to see if our caller armed the send + * possible notification, whether or not it's been fired yet, and + * whether or not we've already set the port's ip_spimportant bit. If + * we don't need a send-possible boost, then we'll just apply a + * harmless 0-boost to the port. + */ + if (options & IPID_OPTION_SENDPOSSIBLE) { + assert(*deltap == 1); + if (port->ip_sprequests && port->ip_spimportant == 0) + port->ip_spimportant = 1; + else + *deltap = 0; + } + + /* unlock down to the base, adjusting boost(s) at each level */ for (;;) { - port->ip_impcount += delta; + *deltap = ipc_port_impcount_delta(port, *deltap, base); - if (port == base) + if (port == base) { break; + } /* port is in transit */ assert(port->ip_tempowner == 0); @@ -1308,8 +1857,8 @@ ipc_port_importance_delta( /* find the task (if any) to boost according to the base */ if (ip_active(base)) { if (base->ip_tempowner != 0) { - if (base->ip_taskptr != 0) - task = base->ip_imp_task; + if (IIT_NULL != base->ip_imp_task) + *imp_task = base->ip_imp_task; /* otherwise don't boost */ } else if (base->ip_receiver_name != MACH_PORT_NULL) { @@ -1317,8 +1866,9 @@ ipc_port_importance_delta( /* only spaces with boost-accepting tasks */ if (space->is_task != TASK_NULL && - space->is_task->imp_receiver != 0) - task = space->is_task; + ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) { + *imp_task = space->is_task->task_imp_base; + } } } @@ -1326,21 +1876,12 @@ ipc_port_importance_delta( * Only the base is locked. If we have to hold or drop task * importance assertions, we'll have to drop that lock as well. */ - if (task != TASK_NULL) { + if (*imp_task != IIT_NULL) { /* take a reference before unlocking base */ - assert(task->imp_receiver != 0); - task_reference(task); - - ip_unlock(base); - dropped = TRUE; - - if (delta > 0) - task_importance_hold_internal_assertion(task, delta); - else - task_importance_drop_internal_assertion(task, -delta); + ipc_importance_task_reference(*imp_task); + } - task_deallocate(task); - } else if (dropped == TRUE) { + if (dropped == TRUE) { ip_unlock(base); } @@ -1348,6 +1889,52 @@ ipc_port_importance_delta( } #endif /* IMPORTANCE_INHERITANCE */ +/* + * Routine: ipc_port_importance_delta + * Purpose: + * Adjust the importance count through the given port. + * If the port is in transit, apply the delta throughout + * the chain. + * + * If there is a task at the base of the chain that wants/needs + * to be adjusted, apply the delta. + * Conditions: + * The port is referenced and locked on entry. + * Nothing else is locked. + * The lock may be dropped on exit. + * Returns TRUE if lock was dropped. + */ +#if IMPORTANCE_INHERITANCE + +boolean_t +ipc_port_importance_delta( + ipc_port_t port, + natural_t options, + mach_port_delta_t delta) +{ + ipc_importance_task_t imp_task = IIT_NULL; + boolean_t dropped; + + dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task); + + if (IIT_NULL == imp_task || delta == 0) + return dropped; + + if (!dropped) + ip_unlock(port); + + assert(ipc_importance_task_is_any_receiver_type(imp_task)); + + if (delta > 0) + ipc_importance_task_hold_internal_assertion(imp_task, delta); + else + ipc_importance_task_drop_internal_assertion(imp_task, -delta); + + ipc_importance_task_release(imp_task); + return TRUE; +} +#endif /* IMPORTANCE_INHERITANCE */ + /* * Routine: ipc_port_lookup_notify * Purpose: @@ -1376,7 +1963,7 @@ ipc_port_lookup_notify( if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) return IP_NULL; - port = (ipc_port_t) entry->ie_object; + __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object); assert(port != IP_NULL); ip_lock(port); @@ -1506,6 +2093,40 @@ ipc_port_copyout_send( return name; } +/* + * Routine: ipc_port_copyout_name_send + * Purpose: + * Copyout a naked send right (possibly null/dead) to given name, + * or if that fails, destroy the right. + * Conditions: + * Nothing locked. + */ + +mach_port_name_t +ipc_port_copyout_name_send( + ipc_port_t sright, + ipc_space_t space, + mach_port_name_t name) +{ + if (IP_VALID(sright)) { + kern_return_t kr; + + kr = ipc_object_copyout_name(space, (ipc_object_t) sright, + MACH_MSG_TYPE_PORT_SEND, TRUE, name); + if (kr != KERN_SUCCESS) { + ipc_port_release_send(sright); + + if (kr == KERN_INVALID_CAPABILITY) + name = MACH_PORT_DEAD; + else + name = MACH_PORT_NULL; + } + } else + name = CAST_MACH_PORT_TO_NAME(sright); + + return name; +} + /* * Routine: ipc_port_release_send * Purpose: @@ -1527,15 +2148,20 @@ ipc_port_release_send( ip_lock(port); + assert(port->ip_srights > 0); + if (port->ip_srights == 0) { + panic("Over-release of port %p send right!", port); + } + + port->ip_srights--; + if (!ip_active(port)) { ip_unlock(port); ip_release(port); return; } - assert(port->ip_srights > 0); - - if (--port->ip_srights == 0 && + if (port->ip_srights == 0 && port->ip_nsrequest != IP_NULL) { nsrequest = port->ip_nsrequest; port->ip_nsrequest = IP_NULL; @@ -1614,9 +2240,14 @@ ipc_port_release_sonce( if (!IP_VALID(port)) return; + ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_SR_NONE, FALSE); + ip_lock(port); assert(port->ip_sorights > 0); + if (port->ip_sorights == 0) { + panic("Over-release of port %p send-once right!", port); + } port->ip_sorights--; @@ -1649,8 +2280,10 @@ ipc_port_release_receive( ipc_port_destroy(port); /* consumes ref, unlocks */ - if (dest != IP_NULL) + if (dest != IP_NULL) { + ipc_port_send_turnstile_complete(dest); ip_release(dest); + } } /* @@ -1669,7 +2302,7 @@ ipc_port_alloc_special( { ipc_port_t port; - port = (ipc_port_t) io_alloc(IOT_PORT); + __IGNORE_WCASTALIGN(port = (ipc_port_t) io_alloc(IOT_PORT)); if (port == IP_NULL) return IP_NULL; @@ -1689,18 +2322,6 @@ ipc_port_alloc_special( ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX); #endif /* MACH_ASSERT */ -#if CONFIG_MACF_MACH - /* Currently, ipc_port_alloc_special is used for two things: - * - Reply ports for messages from the kernel - * - Ports for communication with the kernel (e.g. task ports) - * Since both of these would typically be labelled as kernel objects, - * we will use a new entry point for this purpose, as current_task() - * is often wrong (i.e. not kernel_task) or null. - */ - mac_port_label_init(&port->ip_label); - mac_port_label_associate_kernel(&port->ip_label, space == ipc_space_reply); -#endif - return port; } @@ -1728,8 +2349,10 @@ ipc_port_dealloc_special( * the ipc_space_kernel check in ipc_mqueue_send. */ + imq_lock(&port->ip_messages); port->ip_receiver_name = MACH_PORT_NULL; port->ip_receiver = IS_NULL; + imq_unlock(&port->ip_messages); /* relevant part of ipc_port_clear_receiver */ ipc_port_set_mscount(port, 0); @@ -1753,21 +2376,140 @@ ipc_port_finalize( { ipc_port_request_t requests = port->ip_requests; - assert(!ip_active(port)); + assert(port_send_turnstile(port) == TURNSTILE_NULL); + assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL); + + if (ip_active(port)) { + panic("Trying to free an active port. port %p", port); + } + if (requests != IPR_NULL) { ipc_table_size_t its = requests->ipr_size; it_requests_free(its, requests); port->ip_requests = IPR_NULL; } + + ipc_mqueue_deinit(&port->ip_messages); #if MACH_ASSERT ipc_port_track_dealloc(port); #endif /* MACH_ASSERT */ +} + +/* + * Routine: kdp_mqueue_send_find_owner + * Purpose: + * Discover the owner of the ipc_mqueue that contains the input + * waitq object. The thread blocked on the waitq should be + * waiting for an IPC_MQUEUE_FULL event. + * Conditions: + * The 'waitinfo->wait_type' value should already be set to + * kThreadWaitPortSend. + * Note: + * If we find out that the containing port is actually in + * transit, we reset the wait_type field to reflect this. + */ +void +kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) +{ + struct turnstile *turnstile; + assert(waitinfo->wait_type == kThreadWaitPortSend); + assert(event == IPC_MQUEUE_FULL); + assert(waitq_is_turnstile_queue(waitq)); + + turnstile = waitq_to_turnstile(waitq); + ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */ + assert(kdp_is_in_zone(port, "ipc ports")); + + waitinfo->owner = 0; + waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); + if (ip_lock_held_kdp(port)) { + /* + * someone has the port locked: it may be in an + * inconsistent state: bail + */ + waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED; + return; + } -#if CONFIG_MACF_MACH - /* Port label should have been initialized after creation. */ - mac_port_label_destroy(&port->ip_label); -#endif + if (ip_active(port)) { + if (port->ip_tempowner) { + if (port->ip_imp_task != IIT_NULL && port->ip_imp_task->iit_task != NULL) { + /* port is held by a tempowner */ + waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task); + } else { + waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT; + } + } else if (port->ip_receiver_name) { + /* port in a space */ + if (port->ip_receiver == ipc_space_kernel) { + /* + * The kernel pid is 0, make this + * distinguishable from no-owner and + * inconsistent port state. + */ + waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL; + } else { + waitinfo->owner = pid_from_task(port->ip_receiver->is_task); + } + } else if (port->ip_destination != IP_NULL) { + /* port in transit */ + waitinfo->wait_type = kThreadWaitPortSendInTransit; + waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM(port->ip_destination); + } + } +} + +/* + * Routine: kdp_mqueue_recv_find_owner + * Purpose: + * Discover the "owner" of the ipc_mqueue that contains the input + * waitq object. The thread blocked on the waitq is trying to + * receive on the mqueue. + * Conditions: + * The 'waitinfo->wait_type' value should already be set to + * kThreadWaitPortReceive. + * Note: + * If we find that we are actualy waiting on a port set, we reset + * the wait_type field to reflect this. + */ +void +kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) +{ + assert(waitinfo->wait_type == kThreadWaitPortReceive); + assert(event == IPC_MQUEUE_RECEIVE); + + ipc_mqueue_t mqueue = imq_from_waitq(waitq); + waitinfo->owner = 0; + if (imq_is_set(mqueue)) { /* we are waiting on a port set */ + ipc_pset_t set = ips_from_mq(mqueue); + assert(kdp_is_in_zone(set, "ipc port sets")); + + /* Reset wait type to specify waiting on port set receive */ + waitinfo->wait_type = kThreadWaitPortSetReceive; + waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set); + if (ips_lock_held_kdp(set)) { + waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED; + } + /* There is no specific owner "at the other end" of a port set, so leave unset. */ + } else { + ipc_port_t port = ip_from_mq(mqueue); + assert(kdp_is_in_zone(port, "ipc ports")); + + waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); + if (ip_lock_held_kdp(port)) { + waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED; + return; + } + + if (ip_active(port)) { + if (port->ip_receiver_name != MACH_PORT_NULL) { + waitinfo->owner = port->ip_receiver_name; + } else { + waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT; + } + } + } } #if MACH_ASSERT @@ -1778,8 +2520,10 @@ ipc_port_finalize( * Allocation is intercepted via ipc_port_init; * deallocation is intercepted via io_free. */ +#if 0 queue_head_t port_alloc_queue; lck_spin_t port_alloc_queue_lock; +#endif unsigned long port_count = 0; unsigned long port_count_warning = 20000; @@ -1802,9 +2546,10 @@ int db_port_walk( void ipc_port_debug_init(void) { +#if 0 queue_init(&port_alloc_queue); - lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr); +#endif if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof (ipc_portbt))) ipc_portbt = 0;