X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3903760236c30e3b5ace7a4eefac3a269d68957c..b226f5e54a60dc81db17b1260381d7dbfea3cdf1:/osfmk/ipc/ipc_port.c diff --git a/osfmk/ipc/ipc_port.c b/osfmk/ipc/ipc_port.c index 25010f1fc..823abe3a7 100644 --- a/osfmk/ipc/ipc_port.c +++ b/osfmk/ipc/ipc_port.c @@ -79,6 +79,8 @@ #include #include #include +#include +#include #include #include #include @@ -89,6 +91,8 @@ #include #include #include +#include +#include #include @@ -634,10 +638,15 @@ ipc_port_init( port->ip_strict_guard = 0; port->ip_impcount = 0; - port->ip_reserved = 0; + port->ip_specialreply = 0; + port->ip_sync_link_state = PORT_SYNC_LINK_ANY; + + reset_ip_srp_bits(port); + + port->ip_send_turnstile = TURNSTILE_NULL; ipc_mqueue_init(&port->ip_messages, - FALSE /* !set */, NULL /* no reserved link */); + FALSE /* !set */); } /* @@ -868,6 +877,7 @@ ipc_port_destroy(ipc_port_t port) ipc_port_t pdrequest, nsrequest; ipc_mqueue_t mqueue; ipc_kmsg_t kmsg; + boolean_t special_reply = port->ip_specialreply; #if IMPORTANCE_INHERITANCE ipc_importance_task_t release_imp_task = IIT_NULL; @@ -914,18 +924,27 @@ ipc_port_destroy(ipc_port_t port) port->ip_pdrequest = IP_NULL; /* make port be in limbo */ + imq_lock(&port->ip_messages); port->ip_receiver_name = MACH_PORT_NULL; port->ip_destination = IP_NULL; + imq_unlock(&port->ip_messages); ip_unlock(port); + if (special_reply) { + ipc_port_adjust_special_reply_port(port, + IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); + } /* consumes our refs for port and pdrequest */ ipc_notify_port_destroyed(pdrequest, port); goto drop_assertions; } + /* port active bit needs to be guarded under mqueue lock for turnstiles */ + imq_lock(&port->ip_messages); port->ip_object.io_bits &= ~IO_BITS_ACTIVE; port->ip_timestamp = ipc_port_timestamp(); + imq_unlock(&port->ip_messages); nsrequest = port->ip_nsrequest; /* @@ -957,7 +976,7 @@ ipc_port_destroy(ipc_port_t port) kmsg = port->ip_premsg; assert(kmsg != IKM_NULL); inuse_port = ikm_prealloc_inuse_port(kmsg); - IP_CLEAR_PREALLOC(port, kmsg); + ipc_kmsg_clear_prealloc(kmsg, port); ip_unlock(port); if (inuse_port != IP_NULL) { assert(inuse_port == port); @@ -968,6 +987,12 @@ ipc_port_destroy(ipc_port_t port) ip_unlock(port); } + /* unlink the kmsg from special reply port */ + if (special_reply) { + ipc_port_adjust_special_reply_port(port, + IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); + } + /* throw away no-senders request */ if (nsrequest != IP_NULL) ipc_notify_send_once(nsrequest); /* consumes ref */ @@ -1049,6 +1074,9 @@ ipc_port_check_circularity( return TRUE; base = dest; + /* Check if destination needs a turnstile */ + ipc_port_send_turnstile_prepare(dest); + /* * First try a quick check that can run in parallel. * No circularity if dest is not in transit. @@ -1097,19 +1125,21 @@ ipc_port_check_circularity( assert(port->ip_receiver_name == MACH_PORT_NULL); assert(port->ip_destination == IP_NULL); - while (dest != IP_NULL) { + base = dest; + while (base != IP_NULL) { ipc_port_t next; /* dest is in transit or in limbo */ - assert(ip_active(dest)); - assert(dest->ip_receiver_name == MACH_PORT_NULL); + assert(ip_active(base)); + assert(base->ip_receiver_name == MACH_PORT_NULL); - next = dest->ip_destination; - ip_unlock(dest); - dest = next; + next = base->ip_destination; + ip_unlock(base); + base = next; } + ipc_port_send_turnstile_complete(dest); return TRUE; } @@ -1122,7 +1152,8 @@ ipc_port_check_circularity( ip_lock(port); ipc_port_multiple_unlock(); - not_circular: +not_circular: + imq_lock(&port->ip_messages); /* port is in limbo */ @@ -1133,11 +1164,27 @@ ipc_port_check_circularity( ip_reference(dest); port->ip_destination = dest; + /* Setup linkage for source port if it has sync ipc push */ + struct turnstile *send_turnstile = TURNSTILE_NULL; + if (port_send_turnstile(port)) { + send_turnstile = turnstile_prepare((uintptr_t)port, + port_send_turnstile_address(port), + TURNSTILE_NULL, TURNSTILE_SYNC_IPC); + + turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest), + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); + + /* update complete and turnstile complete called after dropping all locks */ + } + imq_unlock(&port->ip_messages); + /* now unlock chain */ ip_unlock(port); for (;;) { + ipc_port_t next; + if (dest == base) break; @@ -1147,9 +1194,9 @@ ipc_port_check_circularity( assert(dest->ip_receiver_name == MACH_PORT_NULL); assert(dest->ip_destination != IP_NULL); - port = dest->ip_destination; + next = dest->ip_destination; ip_unlock(dest); - dest = port; + dest = next; } /* base is not in transit */ @@ -1159,10 +1206,476 @@ ipc_port_check_circularity( ip_unlock(base); + /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */ + if (send_turnstile) { + turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD); + + /* Take the mq lock to call turnstile complete */ + imq_lock(&port->ip_messages); + turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL); + send_turnstile = TURNSTILE_NULL; + imq_unlock(&port->ip_messages); + turnstile_cleanup(); + } + return FALSE; #endif /* !IMPORTANCE_INHERITANCE */ } +struct turnstile * +ipc_port_get_inheritor(ipc_port_t port) +{ + ipc_mqueue_t mqueue = &port->ip_messages; + struct knote *kn; + + assert(imq_held(mqueue)); + + if (!IMQ_KLIST_VALID(mqueue)) { + return IMQ_INHERITOR(mqueue); + } + + SLIST_FOREACH(kn, &port->ip_messages.imq_klist, kn_selnext) { + if ((kn->kn_sfflags & MACH_RCV_MSG) && (kn->kn_status & KN_DISPATCH)) { + return filt_machport_kqueue_turnstile(kn); + } + } + + return TURNSTILE_NULL; +} + +/* + * Routine: ipc_port_send_turnstile_prepare + * Purpose: + * Get a reference on port's send turnstile, if + * port does not have a send turnstile then allocate one. + * + * Conditions: + * Nothing is locked. + */ +void +ipc_port_send_turnstile_prepare(ipc_port_t port) +{ + struct turnstile *turnstile = TURNSTILE_NULL; + struct turnstile *inheritor = TURNSTILE_NULL; + struct turnstile *send_turnstile = TURNSTILE_NULL; + +retry_alloc: + imq_lock(&port->ip_messages); + + if (port_send_turnstile(port) == NULL || + port_send_turnstile(port)->ts_port_ref == 0) { + + if (turnstile == TURNSTILE_NULL) { + imq_unlock(&port->ip_messages); + turnstile = turnstile_alloc(); + goto retry_alloc; + } + + send_turnstile = turnstile_prepare((uintptr_t)port, + port_send_turnstile_address(port), + turnstile, TURNSTILE_SYNC_IPC); + turnstile = TURNSTILE_NULL; + + /* + * if port in transit, setup linkage for its turnstile, + * otherwise the link it to WL turnstile. + */ + if (ip_active(port) && + port->ip_receiver_name == MACH_PORT_NULL && + port->ip_destination != IP_NULL) { + assert(port->ip_receiver_name == MACH_PORT_NULL); + assert(port->ip_destination != IP_NULL); + + inheritor = port_send_turnstile(port->ip_destination); + } else { + inheritor = ipc_port_get_inheritor(port); + } + turnstile_update_inheritor(send_turnstile, inheritor, + TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE); + /* turnstile complete will be called in ipc_port_send_turnstile_complete */ + } + + /* Increment turnstile counter */ + port_send_turnstile(port)->ts_port_ref++; + imq_unlock(&port->ip_messages); + + if (send_turnstile) { + turnstile_update_inheritor_complete(send_turnstile, + TURNSTILE_INTERLOCK_NOT_HELD); + } + if (turnstile != TURNSTILE_NULL) { + turnstile_deallocate(turnstile); + } +} + + +/* + * Routine: ipc_port_send_turnstile_complete + * Purpose: + * Drop a ref on the port's send turnstile, if the + * ref becomes zero, deallocate the turnstile. + * + * Conditions: + * The space might be locked, use safe deallocate. + */ +void +ipc_port_send_turnstile_complete(ipc_port_t port) +{ + struct turnstile *turnstile = TURNSTILE_NULL; + + /* Drop turnstile count on dest port */ + imq_lock(&port->ip_messages); + + port_send_turnstile(port)->ts_port_ref--; + if (port_send_turnstile(port)->ts_port_ref == 0) { + turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), + &turnstile); + assert(turnstile != TURNSTILE_NULL); + } + imq_unlock(&port->ip_messages); + turnstile_cleanup(); + + if (turnstile != TURNSTILE_NULL) { + turnstile_deallocate_safe(turnstile); + turnstile = TURNSTILE_NULL; + } +} + + +/* + * Routine: ipc_port_rcv_turnstile_waitq + * Purpose: + * Given the mqueue's waitq, find the port's + * rcv turnstile and return its waitq. + * + * Conditions: + * mqueue locked or thread waiting on turnstile is locked. + */ +struct waitq * +ipc_port_rcv_turnstile_waitq(struct waitq *waitq) +{ + struct waitq *safeq; + + ipc_mqueue_t mqueue = imq_from_waitq(waitq); + ipc_port_t port = ip_from_mq(mqueue); + struct turnstile *rcv_turnstile = ipc_port_rcv_turnstile(port); + + /* Check if the port has a rcv turnstile */ + if (rcv_turnstile != TURNSTILE_NULL) { + safeq = &rcv_turnstile->ts_waitq; + } else { + safeq = global_eventq(waitq); + } + return safeq; +} + + +/* + * Routine: ipc_port_rcv_turnstile + * Purpose: + * Get the port's receive turnstile + * + * Conditions: + * mqueue locked or thread waiting on turnstile is locked. + */ +struct turnstile * +ipc_port_rcv_turnstile(ipc_port_t port) +{ + return turnstile_lookup_by_proprietor((uintptr_t)port); +} + + +/* + * Routine: ipc_port_link_special_reply_port + * Purpose: + * Link the special reply port with the destination port. + * Allocates turnstile to dest port. + * + * Conditions: + * Nothing is locked. + */ +void +ipc_port_link_special_reply_port( + ipc_port_t special_reply_port, + ipc_port_t dest_port) +{ + boolean_t drop_turnstile_ref = FALSE; + + /* Check if dest_port needs a turnstile */ + ipc_port_send_turnstile_prepare(dest_port); + + /* Lock the special reply port and establish the linkage */ + ip_lock(special_reply_port); + imq_lock(&special_reply_port->ip_messages); + + /* Check if we need to drop the acquired turnstile ref on dest port */ + if (!special_reply_port->ip_specialreply || + special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY || + special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) { + drop_turnstile_ref = TRUE; + } else { + /* take a reference on dest_port */ + ip_reference(dest_port); + special_reply_port->ip_sync_inheritor_port = dest_port; + special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT; + } + + imq_unlock(&special_reply_port->ip_messages); + ip_unlock(special_reply_port); + + if (drop_turnstile_ref) { + ipc_port_send_turnstile_complete(dest_port); + } + + return; +} + +#if DEVELOPMENT || DEBUG +inline void +reset_ip_srp_bits(ipc_port_t special_reply_port) +{ + special_reply_port->ip_srp_lost_link = 0; + special_reply_port->ip_srp_msg_sent = 0; +} + +inline void +reset_ip_srp_msg_sent(ipc_port_t special_reply_port) +{ + if (special_reply_port->ip_specialreply == 1) { + special_reply_port->ip_srp_msg_sent = 0; + } +} + +inline void +set_ip_srp_msg_sent(ipc_port_t special_reply_port) +{ + if (special_reply_port->ip_specialreply == 1) { + special_reply_port->ip_srp_msg_sent = 1; + } +} + +inline void +set_ip_srp_lost_link(ipc_port_t special_reply_port) +{ + if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) { + special_reply_port->ip_srp_lost_link = 1; + } +} + +#else /* DEVELOPMENT || DEBUG */ +inline void +reset_ip_srp_bits(__unused ipc_port_t special_reply_port) +{ + return; +} + +inline void +reset_ip_srp_msg_sent(__unused ipc_port_t special_reply_port) +{ + return; +} + +inline void +set_ip_srp_msg_sent(__unused ipc_port_t special_reply_port) +{ + return; +} + +inline void +set_ip_srp_lost_link(__unused ipc_port_t special_reply_port) +{ + return; +} +#endif /* DEVELOPMENT || DEBUG */ + +/* + * Routine: ipc_port_adjust_special_reply_port_locked + * Purpose: + * If the special port has a turnstile, update it's inheritor. + * Condition: + * Special reply port locked on entry. + * Special reply port unlocked on return. + * Returns: + * None. + */ +void +ipc_port_adjust_special_reply_port_locked( + ipc_port_t special_reply_port, + struct knote *kn, + uint8_t flags, + boolean_t get_turnstile) +{ + ipc_port_t dest_port = IPC_PORT_NULL; + int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE; + turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; + struct turnstile *dest_ts = TURNSTILE_NULL, *ts = TURNSTILE_NULL; + + imq_lock(&special_reply_port->ip_messages); + + if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) { + reset_ip_srp_msg_sent(special_reply_port); + } + + /* Check if the special reply port is marked non-special */ + if (special_reply_port->ip_specialreply == 0 || + special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) { + if (get_turnstile) { + turnstile_complete((uintptr_t)special_reply_port, + port_rcv_turnstile_address(special_reply_port), + NULL); + } + imq_unlock(&special_reply_port->ip_messages); + ip_unlock(special_reply_port); + if (get_turnstile) { + turnstile_cleanup(); + } + return; + } + + /* Clear thread's special reply port and clear linkage */ + if (flags & IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY) { + /* This option should only be specified by a non blocking thread */ + assert(get_turnstile == FALSE); + special_reply_port->ip_specialreply = 0; + + reset_ip_srp_bits(special_reply_port); + + /* Check if need to break linkage */ + if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) { + imq_unlock(&special_reply_port->ip_messages); + ip_unlock(special_reply_port); + return; + } + } else if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) { + if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY || + special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT) { + if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) { + inheritor = filt_machport_stash_port(kn, special_reply_port, + &sync_link_state); + } + } + } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) { + sync_link_state = PORT_SYNC_LINK_ANY; + } + + switch (special_reply_port->ip_sync_link_state) { + case PORT_SYNC_LINK_PORT: + dest_port = special_reply_port->ip_sync_inheritor_port; + special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL; + break; + case PORT_SYNC_LINK_WORKLOOP_KNOTE: + special_reply_port->ip_sync_inheritor_knote = NULL; + break; + case PORT_SYNC_LINK_WORKLOOP_STASH: + dest_ts = special_reply_port->ip_sync_inheritor_ts; + special_reply_port->ip_sync_inheritor_ts = NULL; + break; + } + + special_reply_port->ip_sync_link_state = sync_link_state; + + switch (sync_link_state) { + case PORT_SYNC_LINK_WORKLOOP_KNOTE: + special_reply_port->ip_sync_inheritor_knote = kn; + break; + case PORT_SYNC_LINK_WORKLOOP_STASH: + turnstile_reference(inheritor); + special_reply_port->ip_sync_inheritor_ts = inheritor; + break; + case PORT_SYNC_LINK_NO_LINKAGE: + if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) { + set_ip_srp_lost_link(special_reply_port); + } + break; + } + + /* Get thread's turnstile donated to special reply port */ + if (get_turnstile) { + turnstile_complete((uintptr_t)special_reply_port, + port_rcv_turnstile_address(special_reply_port), + NULL); + } else { + ts = ipc_port_rcv_turnstile(special_reply_port); + if (ts) { + turnstile_reference(ts); + turnstile_update_inheritor(ts, inheritor, + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); + } + } + + imq_unlock(&special_reply_port->ip_messages); + ip_unlock(special_reply_port); + + if (get_turnstile) { + turnstile_cleanup(); + } else if (ts) { + /* Call turnstile cleanup after dropping the interlock */ + turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); + turnstile_deallocate_safe(ts); + } + + /* Release the ref on the dest port and it's turnstile */ + if (dest_port) { + ipc_port_send_turnstile_complete(dest_port); + /* release the reference on the dest port */ + ip_release(dest_port); + } + + if (dest_ts) { + turnstile_deallocate_safe(dest_ts); + } +} + +/* + * Routine: ipc_port_adjust_special_reply_port + * Purpose: + * If the special port has a turnstile, update it's inheritor. + * Condition: + * Nothing locked. + * Returns: + * None. + */ +void +ipc_port_adjust_special_reply_port( + ipc_port_t special_reply_port, + uint8_t flags, + boolean_t get_turnstile) +{ + ip_lock(special_reply_port); + ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL, flags, get_turnstile); + /* special_reply_port unlocked */ +} + +/* + * Routine: ipc_port_get_special_reply_port_inheritor + * Purpose: + * Returns the current inheritor of the special reply port + * Condition: + * mqueue is locked, port is a special reply port + * Returns: + * the current inheritor + */ +turnstile_inheritor_t +ipc_port_get_special_reply_port_inheritor( + ipc_port_t port) +{ + assert(port->ip_specialreply); + imq_held(&port->ip_messages); + + switch (port->ip_sync_link_state) { + case PORT_SYNC_LINK_PORT: + if (port->ip_sync_inheritor_port != NULL) { + return port_send_turnstile(port->ip_sync_inheritor_port); + } + break; + case PORT_SYNC_LINK_WORKLOOP_KNOTE: + return filt_machport_stashed_special_reply_port_turnstile(port); + case PORT_SYNC_LINK_WORKLOOP_STASH: + return port->ip_sync_inheritor_ts; + } + return TURNSTILE_INHERITOR_NULL; +} + /* * Routine: ipc_port_impcount_delta * Purpose: @@ -1580,6 +2093,40 @@ ipc_port_copyout_send( return name; } +/* + * Routine: ipc_port_copyout_name_send + * Purpose: + * Copyout a naked send right (possibly null/dead) to given name, + * or if that fails, destroy the right. + * Conditions: + * Nothing locked. + */ + +mach_port_name_t +ipc_port_copyout_name_send( + ipc_port_t sright, + ipc_space_t space, + mach_port_name_t name) +{ + if (IP_VALID(sright)) { + kern_return_t kr; + + kr = ipc_object_copyout_name(space, (ipc_object_t) sright, + MACH_MSG_TYPE_PORT_SEND, TRUE, name); + if (kr != KERN_SUCCESS) { + ipc_port_release_send(sright); + + if (kr == KERN_INVALID_CAPABILITY) + name = MACH_PORT_DEAD; + else + name = MACH_PORT_NULL; + } + } else + name = CAST_MACH_PORT_TO_NAME(sright); + + return name; +} + /* * Routine: ipc_port_release_send * Purpose: @@ -1602,6 +2149,10 @@ ipc_port_release_send( ip_lock(port); assert(port->ip_srights > 0); + if (port->ip_srights == 0) { + panic("Over-release of port %p send right!", port); + } + port->ip_srights--; if (!ip_active(port)) { @@ -1689,9 +2240,14 @@ ipc_port_release_sonce( if (!IP_VALID(port)) return; + ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_SR_NONE, FALSE); + ip_lock(port); assert(port->ip_sorights > 0); + if (port->ip_sorights == 0) { + panic("Over-release of port %p send-once right!", port); + } port->ip_sorights--; @@ -1724,8 +2280,10 @@ ipc_port_release_receive( ipc_port_destroy(port); /* consumes ref, unlocks */ - if (dest != IP_NULL) + if (dest != IP_NULL) { + ipc_port_send_turnstile_complete(dest); ip_release(dest); + } } /* @@ -1791,8 +2349,10 @@ ipc_port_dealloc_special( * the ipc_space_kernel check in ipc_mqueue_send. */ + imq_lock(&port->ip_messages); port->ip_receiver_name = MACH_PORT_NULL; port->ip_receiver = IS_NULL; + imq_unlock(&port->ip_messages); /* relevant part of ipc_port_clear_receiver */ ipc_port_set_mscount(port, 0); @@ -1816,7 +2376,13 @@ ipc_port_finalize( { ipc_port_request_t requests = port->ip_requests; - assert(!ip_active(port)); + assert(port_send_turnstile(port) == TURNSTILE_NULL); + assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL); + + if (ip_active(port)) { + panic("Trying to free an active port. port %p", port); + } + if (requests != IPR_NULL) { ipc_table_size_t its = requests->ipr_size; it_requests_free(its, requests); @@ -1830,6 +2396,122 @@ ipc_port_finalize( #endif /* MACH_ASSERT */ } +/* + * Routine: kdp_mqueue_send_find_owner + * Purpose: + * Discover the owner of the ipc_mqueue that contains the input + * waitq object. The thread blocked on the waitq should be + * waiting for an IPC_MQUEUE_FULL event. + * Conditions: + * The 'waitinfo->wait_type' value should already be set to + * kThreadWaitPortSend. + * Note: + * If we find out that the containing port is actually in + * transit, we reset the wait_type field to reflect this. + */ +void +kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) +{ + struct turnstile *turnstile; + assert(waitinfo->wait_type == kThreadWaitPortSend); + assert(event == IPC_MQUEUE_FULL); + assert(waitq_is_turnstile_queue(waitq)); + + turnstile = waitq_to_turnstile(waitq); + ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */ + assert(kdp_is_in_zone(port, "ipc ports")); + + waitinfo->owner = 0; + waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); + if (ip_lock_held_kdp(port)) { + /* + * someone has the port locked: it may be in an + * inconsistent state: bail + */ + waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED; + return; + } + + if (ip_active(port)) { + if (port->ip_tempowner) { + if (port->ip_imp_task != IIT_NULL && port->ip_imp_task->iit_task != NULL) { + /* port is held by a tempowner */ + waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task); + } else { + waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT; + } + } else if (port->ip_receiver_name) { + /* port in a space */ + if (port->ip_receiver == ipc_space_kernel) { + /* + * The kernel pid is 0, make this + * distinguishable from no-owner and + * inconsistent port state. + */ + waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL; + } else { + waitinfo->owner = pid_from_task(port->ip_receiver->is_task); + } + } else if (port->ip_destination != IP_NULL) { + /* port in transit */ + waitinfo->wait_type = kThreadWaitPortSendInTransit; + waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM(port->ip_destination); + } + } +} + +/* + * Routine: kdp_mqueue_recv_find_owner + * Purpose: + * Discover the "owner" of the ipc_mqueue that contains the input + * waitq object. The thread blocked on the waitq is trying to + * receive on the mqueue. + * Conditions: + * The 'waitinfo->wait_type' value should already be set to + * kThreadWaitPortReceive. + * Note: + * If we find that we are actualy waiting on a port set, we reset + * the wait_type field to reflect this. + */ +void +kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) +{ + assert(waitinfo->wait_type == kThreadWaitPortReceive); + assert(event == IPC_MQUEUE_RECEIVE); + + ipc_mqueue_t mqueue = imq_from_waitq(waitq); + waitinfo->owner = 0; + if (imq_is_set(mqueue)) { /* we are waiting on a port set */ + ipc_pset_t set = ips_from_mq(mqueue); + assert(kdp_is_in_zone(set, "ipc port sets")); + + /* Reset wait type to specify waiting on port set receive */ + waitinfo->wait_type = kThreadWaitPortSetReceive; + waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set); + if (ips_lock_held_kdp(set)) { + waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED; + } + /* There is no specific owner "at the other end" of a port set, so leave unset. */ + } else { + ipc_port_t port = ip_from_mq(mqueue); + assert(kdp_is_in_zone(port, "ipc ports")); + + waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); + if (ip_lock_held_kdp(port)) { + waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED; + return; + } + + if (ip_active(port)) { + if (port->ip_receiver_name != MACH_PORT_NULL) { + waitinfo->owner = port->ip_receiver_name; + } else { + waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT; + } + } + } +} + #if MACH_ASSERT #include