X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/316670eb35587141e969394ae8537d66b9211e80..4d15aeb193b2c68f1d38666c317f8d3734f5f083:/osfmk/ipc/mach_port.c diff --git a/osfmk/ipc/mach_port.c b/osfmk/ipc/mach_port.c index 18c764892..851a6a1ee 100644 --- a/osfmk/ipc/mach_port.c +++ b/osfmk/ipc/mach_port.c @@ -95,11 +95,13 @@ #include #include #include -#include #include #include -#include +#if IMPORTANCE_INHERITANCE +#include +#endif + /* * Forward declarations @@ -114,12 +116,23 @@ void mach_port_names_helper( void mach_port_gst_helper( ipc_pset_t pset, - ipc_port_t port, ipc_entry_num_t maxnames, mach_port_name_t *names, ipc_entry_num_t *actualp); +kern_return_t +mach_port_guard_exception( + mach_port_name_t name, + uint64_t inguard, + uint64_t portguard, + unsigned reason); + +/* Needs port locked */ +void mach_port_get_status_helper( + ipc_port_t port, + mach_port_status_t *status); + /* Zeroed template of qos flags */ static mach_port_qos_t qos_template; @@ -150,7 +163,7 @@ mach_port_names_helper( bits = entry->ie_bits; request = entry->ie_request; - port = (ipc_port_t) entry->ie_object; + __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object); if (bits & MACH_PORT_TYPE_RECEIVE) { assert(IP_VALID(port)); @@ -237,7 +250,7 @@ mach_port_names( vm_map_copy_t memory2; /* copied-in memory, for types */ /* safe simplifying assumption */ - assert_static(sizeof(mach_port_name_t) == sizeof(mach_port_type_t)); + static_assert(sizeof(mach_port_name_t) == sizeof(mach_port_type_t)); if (space == IS_NULL) return KERN_INVALID_TASK; @@ -260,7 +273,9 @@ mach_port_names( /* upper bound on number of names in the space */ bound = space->is_table_size; - size_needed = round_page(bound * sizeof(mach_port_name_t)); + size_needed = vm_map_round_page( + (bound * sizeof(mach_port_name_t)), + VM_MAP_PAGE_MASK(ipc_kernel_map)); if (size_needed <= size) break; @@ -273,11 +288,11 @@ mach_port_names( } size = size_needed; - kr = vm_allocate(ipc_kernel_map, &addr1, size, VM_FLAGS_ANYWHERE); + kr = vm_allocate(ipc_kernel_map, &addr1, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; - kr = vm_allocate(ipc_kernel_map, &addr2, size, VM_FLAGS_ANYWHERE); + kr = vm_allocate(ipc_kernel_map, &addr2, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr1, size); return KERN_RESOURCE_SHORTAGE; @@ -285,18 +300,28 @@ mach_port_names( /* can't fault while we hold locks */ - kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr1), - vm_map_round_page(addr1 + size), - VM_PROT_READ|VM_PROT_WRITE, FALSE); + kr = vm_map_wire( + ipc_kernel_map, + vm_map_trunc_page(addr1, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + vm_map_round_page(addr1 + size, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), + FALSE); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr1, size); kmem_free(ipc_kernel_map, addr2, size); return KERN_RESOURCE_SHORTAGE; } - kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr2), - vm_map_round_page(addr2 + size), - VM_PROT_READ|VM_PROT_WRITE, FALSE); + kr = vm_map_wire( + ipc_kernel_map, + vm_map_trunc_page(addr2, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + vm_map_round_page(addr2 + size, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), + FALSE); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr1, size); kmem_free(ipc_kernel_map, addr2, size); @@ -343,19 +368,31 @@ mach_port_names( vm_size_t vm_size_used; size_used = actual * sizeof(mach_port_name_t); - vm_size_used = round_page(size_used); + vm_size_used = + vm_map_round_page(size_used, + VM_MAP_PAGE_MASK(ipc_kernel_map)); /* * Make used memory pageable and get it into * copied-in form. Free any unused memory. */ - kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr1), - vm_map_round_page(addr1 + vm_size_used), FALSE); + kr = vm_map_unwire( + ipc_kernel_map, + vm_map_trunc_page(addr1, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + vm_map_round_page(addr1 + vm_size_used, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + FALSE); assert(kr == KERN_SUCCESS); - kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr2), - vm_map_round_page(addr2 + vm_size_used), FALSE); + kr = vm_map_unwire( + ipc_kernel_map, + vm_map_trunc_page(addr2, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + vm_map_round_page(addr2 + vm_size_used, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr1, @@ -729,7 +766,7 @@ mach_port_destroy( return kr; /* space is write-locked and active */ - kr = ipc_right_destroy(space, name, entry); /* unlocks space */ + kr = ipc_right_destroy(space, name, entry, TRUE, 0); /* unlocks space */ return kr; } @@ -908,6 +945,95 @@ mach_port_mod_refs( } +/* + * Routine: mach_port_peek [kernel call] + * Purpose: + * Peek at the message queue for the specified receive + * right and return info about a message in the queue. + * + * On input, seqnop points to a sequence number value + * to match the message being peeked. If zero is specified + * as the seqno, the first message in the queue will be + * peeked. + * + * Only the following trailer types are currently supported: + * MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) + * + * or'ed with one of these element types: + * MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_NULL) + * MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_SEQNO) + * MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_SENDER) + * MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT) + * + * On input, the value pointed to by trailer_sizep must be + * large enough to hold the requested trailer size. + * + * The message sequence number, id, size, requested trailer info + * and requested trailer size are returned in their respective + * output parameters upon success. + * + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Matching message found, out parameters set. + * KERN_INVALID_TASK The space is null or dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote receive rights. + * KERN_INVALID_VALUE The input parameter values are out of bounds. + * KERN_FAILURE The requested message was not found. + */ + +kern_return_t +mach_port_peek( + ipc_space_t space, + mach_port_name_t name, + mach_msg_trailer_type_t trailer_type, + mach_port_seqno_t *seqnop, + mach_msg_size_t *msg_sizep, + mach_msg_id_t *msg_idp, + mach_msg_trailer_info_t trailer_infop, + mach_msg_type_number_t *trailer_sizep) +{ + ipc_port_t port; + kern_return_t kr; + boolean_t found; + mach_msg_max_trailer_t max_trailer; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + /* + * We don't allow anything greater than the audit trailer - to avoid + * leaking the context pointer and to avoid variable-sized context issues. + */ + if (GET_RCV_ELEMENTS(trailer_type) > MACH_RCV_TRAILER_AUDIT || + REQUESTED_TRAILER_SIZE(TRUE, trailer_type) > *trailer_sizep) + return KERN_INVALID_VALUE; + + *trailer_sizep = REQUESTED_TRAILER_SIZE(TRUE, trailer_type); + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + + /* Port locked and active */ + + found = ipc_mqueue_peek(&port->ip_messages, seqnop, + msg_sizep, msg_idp, &max_trailer, NULL); + ip_unlock(port); + + if (found != TRUE) + return KERN_FAILURE; + + max_trailer.msgh_seqno = *seqnop; + memcpy(trailer_infop, &max_trailer, *trailer_sizep); + + return KERN_SUCCESS; +} + /* * Routine: mach_port_set_mscount [kernel call] * Purpose: @@ -1021,8 +1147,13 @@ mach_port_get_context( if (kr != KERN_SUCCESS) return kr; - /* port is locked and active */ - *context = port->ip_context; + /* Port locked and active */ + + /* For strictly guarded ports, return empty context (which acts as guard) */ + if (port->ip_strict_guard) + *context = 0; + else + *context = port->ip_context; ip_unlock(port); return KERN_SUCCESS; @@ -1063,6 +1194,14 @@ mach_port_set_context( return kr; /* port is locked and active */ + if(port->ip_strict_guard) { + uint64_t portguard = port->ip_context; + ip_unlock(port); + /* For strictly guarded ports, disallow overwriting context; Raise Exception */ + mach_port_guard_exception(name, context, portguard, kGUARD_EXC_SET_CONTEXT); + return KERN_INVALID_ARGUMENT; + } + port->ip_context = context; ip_unlock(port); @@ -1070,44 +1209,6 @@ mach_port_set_context( } -/* - * Routine: mach_port_gst_helper - * Conditions: - * portspace is locked for both the recieve right and pset - * under observation. - * Purpose: - * A helper function for mach_port_get_set_status. - */ - -void -mach_port_gst_helper( - ipc_pset_t pset, - ipc_port_t port, - ipc_entry_num_t maxnames, - mach_port_name_t *names, - ipc_entry_num_t *actualp) -{ - mach_port_name_t name; - - assert(port != IP_NULL); - /* - * The space lock is held by the calling function, - * hence it is OK to read name without the port lock. - */ - assert(ip_active(port)); - name = port->ip_receiver_name; - assert(name != MACH_PORT_NULL); - - if (ipc_pset_member(pset, port)) { - ipc_entry_num_t actual = *actualp; - - if (actual < maxnames) - names[actual] = name; - - *actualp = actual+1; - } -} - /* * Routine: mach_port_get_set_status [kernel call] * Purpose: @@ -1145,70 +1246,51 @@ mach_port_get_set_status( if (!MACH_PORT_VALID(name)) return KERN_INVALID_RIGHT; - size = PAGE_SIZE; /* initial guess */ + size = VM_MAP_PAGE_SIZE(ipc_kernel_map); /* initial guess */ for (;;) { - ipc_entry_t entry, table; - ipc_entry_num_t tsize; - mach_port_index_t index; mach_port_name_t *names; + ipc_object_t psobj; ipc_pset_t pset; - kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE); + kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; /* can't fault while we hold locks */ kr = vm_map_wire(ipc_kernel_map, addr, addr + size, - VM_PROT_READ|VM_PROT_WRITE, FALSE); + VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), FALSE); assert(kr == KERN_SUCCESS); - kr = ipc_right_lookup_read(space, name, &entry); + kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_PORT_SET, &psobj); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr, size); return kr; } - /* space is read-locked and active */ - if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) { - is_read_unlock(space); - kmem_free(ipc_kernel_map, addr, size); - return KERN_INVALID_RIGHT; - } - - pset = (ipc_pset_t) entry->ie_object; - assert(pset != IPS_NULL); - /* the port set must be active */ + /* just use a portset reference from here on out */ + __IGNORE_WCASTALIGN(pset = (ipc_pset_t) psobj); + ips_reference(pset); + ips_unlock(pset); names = (mach_port_name_t *) addr; maxnames = (ipc_entry_num_t)(size / sizeof(mach_port_name_t)); - actual = 0; - - table = space->is_table; - tsize = space->is_table_size; - for (index = 0; index < tsize; index++) { - ipc_entry_t ientry = &table[index]; + ipc_mqueue_set_gather_member_names(space, &pset->ips_messages, maxnames, names, &actual); - if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE) { - ipc_port_t port = - (ipc_port_t) ientry->ie_object; - - mach_port_gst_helper(pset, port, - maxnames, names, &actual); - } - } - - is_read_unlock(space); + /* release the portset reference */ + ips_release(pset); if (actual <= maxnames) break; /* didn't have enough memory; allocate more */ - kmem_free(ipc_kernel_map, addr, size); - size = round_page(actual * sizeof(mach_port_name_t)) + PAGE_SIZE; + size = vm_map_round_page( + (actual * sizeof(mach_port_name_t)), + VM_MAP_PAGE_MASK(ipc_kernel_map)) + + VM_MAP_PAGE_SIZE(ipc_kernel_map); } if (actual == 0) { @@ -1220,15 +1302,22 @@ mach_port_get_set_status( vm_size_t vm_size_used; size_used = actual * sizeof(mach_port_name_t); - vm_size_used = round_page(size_used); + vm_size_used = vm_map_round_page( + size_used, + VM_MAP_PAGE_MASK(ipc_kernel_map)); /* * Make used memory pageable and get it into * copied-in form. Free any unused memory. */ - kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr), - vm_map_round_page(addr + vm_size_used), FALSE); + kr = vm_map_unwire( + ipc_kernel_map, + vm_map_trunc_page(addr, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + vm_map_round_page(addr + vm_size_used, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, @@ -1276,9 +1365,8 @@ mach_port_move_member( ipc_port_t port; ipc_pset_t nset; kern_return_t kr; - wait_queue_link_t wql; - queue_head_t links_data; - queue_t links = &links_data; + uint64_t wq_link_id = 0; + uint64_t wq_reserved_prepost = 0; if (space == IS_NULL) return KERN_INVALID_TASK; @@ -1286,14 +1374,24 @@ mach_port_move_member( if (!MACH_PORT_VALID(member)) return KERN_INVALID_RIGHT; - if (after == MACH_PORT_DEAD) + if (after == MACH_PORT_DEAD) { return KERN_INVALID_RIGHT; - else if (after == MACH_PORT_NULL) - wql = WAIT_QUEUE_LINK_NULL; - else - wql = wait_queue_link_allocate(); - - queue_init(links); + } else if (after == MACH_PORT_NULL) { + wq_link_id = 0; + } else { + /* + * We reserve both a link, and + * enough prepost objects to complete + * the set move atomically - we can't block + * while we're holding the space lock, and + * the ipc_pset_add calls ipc_mqueue_add + * which may have to prepost this port onto + * this set. + */ + wq_link_id = waitq_link_reserve(NULL); + wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, + WAITQ_DONT_LOCK); + } kr = ipc_right_lookup_read(space, member, &entry); if (kr != KERN_SUCCESS) @@ -1306,7 +1404,7 @@ mach_port_move_member( goto done; } - port = (ipc_port_t) entry->ie_object; + __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object); assert(port != IP_NULL); if (after == MACH_PORT_NULL) @@ -1325,27 +1423,29 @@ mach_port_move_member( goto done; } - nset = (ipc_pset_t) entry->ie_object; + __IGNORE_WCASTALIGN(nset = (ipc_pset_t) entry->ie_object); assert(nset != IPS_NULL); } ip_lock(port); - ipc_pset_remove_from_all(port, links); + assert(ip_active(port)); + ipc_pset_remove_from_all(port); if (nset != IPS_NULL) { ips_lock(nset); - kr = ipc_pset_add(nset, port, wql); + kr = ipc_pset_add(nset, port, &wq_link_id, &wq_reserved_prepost); ips_unlock(nset); } ip_unlock(port); is_read_unlock(space); done: - if (kr != KERN_SUCCESS && wql != WAIT_QUEUE_LINK_NULL) - wait_queue_link_free(wql); - while(!queue_empty(links)) { - wql = (wait_queue_link_t) dequeue(links); - wait_queue_link_free(wql); - } + + /* + * on success the ipc_pset_add() will consume the wq_link_id + * value (resetting it to 0), so this function is always safe to call. + */ + waitq_link_release(wq_link_id); + waitq_prepost_release_reserve(wq_reserved_prepost); return kr; } @@ -1450,6 +1550,12 @@ mach_port_request_notification( return kr; /* port is locked and active */ + /* you cannot register for port death notifications on a kobject */ + if (ip_kotype(port) != IKOT_NONE) { + ip_unlock(port); + return KERN_INVALID_RIGHT; + } + ipc_port_pdrequest(port, notify, &previous); /* port is unlocked */ @@ -1599,6 +1705,53 @@ mach_port_extract_right( return kr; } +/* + * Routine: mach_port_get_status_helper [helper] + * Purpose: + * Populates a mach_port_status_t structure with + * port information. + * Conditions: + * Port needs to be locked + * Returns: + * None. + */ +void mach_port_get_status_helper( + ipc_port_t port, + mach_port_status_t *statusp) +{ + imq_lock(&port->ip_messages); + /* don't leak set IDs, just indicate that the port is in one or not */ + statusp->mps_pset = !!(port->ip_in_pset); + statusp->mps_seqno = port->ip_messages.imq_seqno; + statusp->mps_qlimit = port->ip_messages.imq_qlimit; + statusp->mps_msgcount = port->ip_messages.imq_msgcount; + imq_unlock(&port->ip_messages); + + statusp->mps_mscount = port->ip_mscount; + statusp->mps_sorights = port->ip_sorights; + statusp->mps_srights = port->ip_srights > 0; + statusp->mps_pdrequest = port->ip_pdrequest != IP_NULL; + statusp->mps_nsrequest = port->ip_nsrequest != IP_NULL; + statusp->mps_flags = 0; + if (port->ip_impdonation) { + statusp->mps_flags |= MACH_PORT_STATUS_FLAG_IMP_DONATION; + if (port->ip_tempowner) { + statusp->mps_flags |= MACH_PORT_STATUS_FLAG_TEMPOWNER; + if (IIT_NULL != port->ip_imp_task) { + statusp->mps_flags |= MACH_PORT_STATUS_FLAG_TASKPTR; + } + } + } + if (port->ip_guarded) { + statusp->mps_flags |= MACH_PORT_STATUS_FLAG_GUARDED; + if (port->ip_strict_guard) { + statusp->mps_flags |= MACH_PORT_STATUS_FLAG_STRICT_GUARD; + } + } + return; +} + + kern_return_t mach_port_get_attributes( @@ -1638,41 +1791,23 @@ mach_port_get_attributes( } case MACH_PORT_RECEIVE_STATUS: { - mach_port_status_t *statusp = (mach_port_status_t *)info; - spl_t s; - - if (*count < MACH_PORT_RECEIVE_STATUS_COUNT) - return KERN_FAILURE; - + mach_port_status_t *statusp = (mach_port_status_t *)info; + + if (*count < MACH_PORT_RECEIVE_STATUS_COUNT) + return KERN_FAILURE; + if (!MACH_PORT_VALID(name)) return KERN_INVALID_RIGHT; - kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) - return kr; - /* port is locked and active */ - - statusp->mps_pset = port->ip_pset_count; - - s = splsched(); - imq_lock(&port->ip_messages); - statusp->mps_seqno = port->ip_messages.imq_seqno; - statusp->mps_qlimit = port->ip_messages.imq_qlimit; - statusp->mps_msgcount = port->ip_messages.imq_msgcount; - imq_unlock(&port->ip_messages); - splx(s); - - statusp->mps_mscount = port->ip_mscount; - statusp->mps_sorights = port->ip_sorights; - statusp->mps_srights = port->ip_srights > 0; - statusp->mps_pdrequest = port->ip_pdrequest != IP_NULL; - statusp->mps_nsrequest = port->ip_nsrequest != IP_NULL; - statusp->mps_flags = 0; - - *count = MACH_PORT_RECEIVE_STATUS_COUNT; - ip_unlock(port); - break; - } + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + mach_port_get_status_helper(port, statusp); + *count = MACH_PORT_RECEIVE_STATUS_COUNT; + ip_unlock(port); + break; + } case MACH_PORT_DNREQUESTS_SIZE: { ipc_port_request_t table; @@ -1700,6 +1835,25 @@ mach_port_get_attributes( break; } + case MACH_PORT_INFO_EXT: { + mach_port_info_ext_t *mp_info = (mach_port_info_ext_t *)info; + if (*count < MACH_PORT_INFO_EXT_COUNT) + return KERN_FAILURE; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + mach_port_get_status_helper(port, &mp_info->mpie_status); + mp_info->mpie_boost_cnt = port->ip_impcount; + *count = MACH_PORT_INFO_EXT_COUNT; + ip_unlock(port); + break; + } + default: return KERN_INVALID_ARGUMENT; /*NOTREACHED*/ @@ -1762,6 +1916,88 @@ mach_port_set_attributes( return kr; break; } + case MACH_PORT_TEMPOWNER: + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + ipc_importance_task_t release_imp_task = IIT_NULL; + natural_t assertcnt = 0; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + /* + * don't allow temp-owner importance donation if user + * associated it with a kobject already (timer, host_notify target). + */ + if (is_ipc_kobject(ip_kotype(port))) { + ip_unlock(port); + return KERN_INVALID_ARGUMENT; + } + + if (port->ip_tempowner != 0) { + if (IIT_NULL != port->ip_imp_task) { + release_imp_task = port->ip_imp_task; + port->ip_imp_task = IIT_NULL; + assertcnt = port->ip_impcount; + } + } else { + assertcnt = port->ip_impcount; + } + + port->ip_impdonation = 1; + port->ip_tempowner = 1; + ip_unlock(port); + +#if IMPORTANCE_INHERITANCE + /* drop assertions from previous destination task */ + if (release_imp_task != IIT_NULL) { + assert(ipc_importance_task_is_any_receiver_type(release_imp_task)); + if (assertcnt > 0) + ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt); + ipc_importance_task_release(release_imp_task); + } else if (assertcnt > 0) { + release_imp_task = current_task()->task_imp_base; + if (release_imp_task != IIT_NULL && + ipc_importance_task_is_any_receiver_type(release_imp_task)) { + ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt); + } + } +#else + if (release_imp_task != IIT_NULL) + ipc_importance_task_release(release_imp_task); +#endif /* IMPORTANCE_INHERITANCE */ + + break; + +#if IMPORTANCE_INHERITANCE + case MACH_PORT_DENAP_RECEIVER: + case MACH_PORT_IMPORTANCE_RECEIVER: + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + + /* + * don't allow importance donation if user associated + * it with a kobject already (timer, host_notify target). + */ + if (is_ipc_kobject(ip_kotype(port))) { + ip_unlock(port); + return KERN_INVALID_ARGUMENT; + } + + /* port is locked and active */ + port->ip_impdonation = 1; + ip_unlock(port); + + break; +#endif /* IMPORTANCE_INHERITANCE */ + default: return KERN_INVALID_ARGUMENT; /*NOTREACHED*/ @@ -1797,7 +2033,8 @@ mach_port_insert_member( ipc_object_t obj; ipc_object_t psobj; kern_return_t kr; - wait_queue_link_t wql; + uint64_t wq_link_id; + uint64_t wq_reserved_prepost; if (space == IS_NULL) return KERN_INVALID_TASK; @@ -1805,7 +2042,9 @@ mach_port_insert_member( if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) return KERN_INVALID_RIGHT; - wql = wait_queue_link_allocate(); + wq_link_id = waitq_link_reserve(NULL); + wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, + WAITQ_DONT_LOCK); kr = ipc_object_translate_two(space, name, MACH_PORT_RIGHT_RECEIVE, &obj, @@ -1817,13 +2056,16 @@ mach_port_insert_member( assert(psobj != IO_NULL); assert(obj != IO_NULL); - kr = ipc_pset_add((ipc_pset_t)psobj, (ipc_port_t)obj, wql); + __IGNORE_WCASTALIGN(kr = ipc_pset_add((ipc_pset_t)psobj, (ipc_port_t)obj, + &wq_link_id, &wq_reserved_prepost)); + io_unlock(psobj); io_unlock(obj); done: - if (kr != KERN_SUCCESS) - wait_queue_link_free(wql); + /* on success, wq_link_id is reset to 0, so this is always safe */ + waitq_link_release(wq_link_id); + waitq_prepost_release_reserve(wq_reserved_prepost); return kr; } @@ -1855,7 +2097,6 @@ mach_port_extract_member( ipc_object_t psobj; ipc_object_t obj; kern_return_t kr; - wait_queue_link_t wql = WAIT_QUEUE_LINK_NULL; if (space == IS_NULL) return KERN_INVALID_TASK; @@ -1873,13 +2114,11 @@ mach_port_extract_member( assert(psobj != IO_NULL); assert(obj != IO_NULL); - kr = ipc_pset_remove((ipc_pset_t)psobj, (ipc_port_t)obj, &wql); + __IGNORE_WCASTALIGN(kr = ipc_pset_remove((ipc_pset_t)psobj, (ipc_port_t)obj)); + io_unlock(psobj); io_unlock(obj); - if (wql != WAIT_QUEUE_LINK_NULL) - wait_queue_link_free(wql); - return kr; } @@ -1895,6 +2134,9 @@ task_set_port_space( { kern_return_t kr; + if (space == IS_NULL) + return KERN_INVALID_TASK; + is_write_lock(space); if (!is_active(space)) { @@ -1909,179 +2151,353 @@ task_set_port_space( } /* - * Get a (new) label handle representing the given port's port label. + * Routine: mach_port_guard_locked [helper routine] + * Purpose: + * Sets a new guard for a locked port. + * Conditions: + * Port Locked. + * Returns: + * KERN_SUCCESS Port Guarded. + * KERN_INVALID_ARGUMENT Port already contains a context/guard. */ -#if CONFIG_MACF_MACH -kern_return_t -mach_get_label( - ipc_space_t space, - mach_port_name_t name, - mach_port_name_t *outlabel) +static kern_return_t +mach_port_guard_locked( + ipc_port_t port, + uint64_t guard, + boolean_t strict) { - ipc_entry_t entry; - ipc_port_t port; - struct label outl; - kern_return_t kr; - int dead; - - if (!MACH_PORT_VALID(name)) - return KERN_INVALID_NAME; + if (port->ip_context) + return KERN_INVALID_ARGUMENT; - /* Lookup the port name in the task's space. */ - kr = ipc_right_lookup_write(space, name, &entry); - if (kr != KERN_SUCCESS) - return kr; + port->ip_context = guard; + port->ip_guarded = 1; + port->ip_strict_guard = (strict)?1:0; + return KERN_SUCCESS; +} - port = (ipc_port_t) entry->ie_object; - dead = ipc_right_check(space, port, name, entry); - if (dead) { - is_write_unlock(space); - ip_release(port); - return KERN_INVALID_RIGHT; +/* + * Routine: mach_port_unguard_locked [helper routine] + * Purpose: + * Removes guard for a locked port. + * Conditions: + * Port Locked. + * Returns: + * KERN_SUCCESS Port Unguarded. + * KERN_INVALID_ARGUMENT Port is either unguarded already or guard mismatch. + * This also raises a EXC_GUARD exception. + */ +static kern_return_t +mach_port_unguard_locked( + ipc_port_t port, + mach_port_name_t name, + uint64_t guard) +{ + /* Port locked and active */ + if (!port->ip_guarded) { + /* Port already unguarded; Raise exception */ + mach_port_guard_exception(name, guard, 0, kGUARD_EXC_UNGUARDED); + return KERN_INVALID_ARGUMENT; } - /* port is now locked */ - is_write_unlock(space); - /* Make sure we are not dealing with a label handle. */ - if (ip_kotype(port) == IKOT_LABELH) { - /* already is a label handle! */ - ip_unlock(port); + if (port->ip_context != guard) { + /* Incorrect guard; Raise exception */ + mach_port_guard_exception(name, guard, port->ip_context, kGUARD_EXC_INCORRECT_GUARD); return KERN_INVALID_ARGUMENT; } - /* Copy the port label and stash it in a new label handle. */ - mac_port_label_init(&outl); - mac_port_label_copy(&port->ip_label, &outl); - kr = labelh_new_user(space, &outl, outlabel); - ip_unlock(port); - + port->ip_context = 0; + port->ip_guarded = port->ip_strict_guard = 0; return KERN_SUCCESS; } -#else + + +/* + * Routine: mach_port_guard_exception [helper routine] + * Purpose: + * Marks the thread with AST_GUARD for mach port guard violation. + * Also saves exception info in thread structure. + * Conditions: + * None. + * Returns: + * KERN_FAILURE Thread marked with AST_GUARD. + */ kern_return_t -mach_get_label( - __unused ipc_space_t space, - __unused mach_port_name_t name, - __unused mach_port_name_t *outlabel) +mach_port_guard_exception( + mach_port_name_t name, + __unused uint64_t inguard, + uint64_t portguard, + unsigned reason) +{ + thread_t t = current_thread(); + uint64_t code, subcode; + + /* + * EXC_GUARD namespace for mach ports + * + * + * Mach Port guards use the exception codes like + * + * code: + * +----------------------------------------------------------------+ + * |[63:61] GUARD_TYPE_MACH_PORT | [60:32] flavor | [31:0] port name| + * +----------------------------------------------------------------+ + * + * subcode: + * +----------------------------------------------------------------+ + * | [63:0] guard value | + * +----------------------------------------------------------------+ + */ + + code = (((uint64_t)GUARD_TYPE_MACH_PORT) << 61) | + (((uint64_t)reason) << 32) | + ((uint64_t)name); + subcode = (uint64_t)(portguard); + + t->guard_exc_info.code = code; + t->guard_exc_info.subcode = subcode; + + /* Mark thread with AST_GUARD */ + thread_guard_violation(t, GUARD_TYPE_MACH_PORT); + return KERN_FAILURE; +} + + +/* + * Routine: mach_port_guard_ast + * Purpose: + * Raises an exception for mach port guard violation. + * Conditions: + * None. + * Returns: + * None. + */ + +void +mach_port_guard_ast(thread_t t) { - return KERN_INVALID_ARGUMENT; + /* Raise an EXC_GUARD exception */ + task_exception_notify(EXC_GUARD, t->guard_exc_info.code, t->guard_exc_info.subcode); + + /* Terminate task which caused the exception */ + task_bsdtask_kill(current_task()); + return; } -#endif /* - * also works on label handles + * Routine: mach_port_construct [kernel call] + * Purpose: + * Constructs a mach port with the provided set of options. + * Conditions: + * None. + * Returns: + * KERN_SUCCESS The right is allocated. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + * KERN_NO_SPACE No room in space for another right. + * KERN_FAILURE Illegal option values requested. */ -#if CONFIG_MACF_MACH + kern_return_t -mach_get_label_text( +mach_port_construct( ipc_space_t space, - mach_port_name_t name, - labelstr_t policies, - labelstr_t outlabel) + mach_port_options_t *options, + uint64_t context, + mach_port_name_t *name) { - ipc_entry_t entry; - ipc_port_t port; - kern_return_t kr; - struct label *l; - int dead; - - if (space == IS_NULL || space->is_task == NULL) - return KERN_INVALID_TASK; + kern_return_t kr; + ipc_port_t port; - if (!MACH_PORT_VALID(name)) - return KERN_INVALID_NAME; + if (space == IS_NULL) + return (KERN_INVALID_TASK); - kr = ipc_right_lookup_write(space, name, &entry); + /* Allocate a new port in the IPC space */ + kr = ipc_port_alloc(space, name, &port); if (kr != KERN_SUCCESS) return kr; + + /* Port locked and active */ + if (options->flags & MPO_CONTEXT_AS_GUARD) { + kr = mach_port_guard_locked(port, (uint64_t) context, (options->flags & MPO_STRICT)); + /* A newly allocated and locked port should always be guarded successfully */ + assert(kr == KERN_SUCCESS); + } else { + port->ip_context = context; + } + + /* Unlock port */ + ip_unlock(port); - port = (ipc_port_t)entry->ie_object; - dead = ipc_right_check(space, port, name, entry); - if (dead) { - is_write_unlock(space); - ip_release(port); - return KERN_INVALID_RIGHT; + /* Set port attributes as requested */ + + if (options->flags & MPO_QLIMIT) { + kr = mach_port_set_attributes(space, *name, MACH_PORT_LIMITS_INFO, + (mach_port_info_t)&options->mpl, sizeof(options->mpl)/sizeof(int)); + if (kr != KERN_SUCCESS) + goto cleanup; + } + + if (options->flags & MPO_TEMPOWNER) { + kr = mach_port_set_attributes(space, *name, MACH_PORT_TEMPOWNER, NULL, 0); + if (kr != KERN_SUCCESS) + goto cleanup; } - /* object (port) is now locked */ - is_write_unlock (space); - l = io_getlabel(entry->ie_object); + if (options->flags & MPO_IMPORTANCE_RECEIVER) { + kr = mach_port_set_attributes(space, *name, MACH_PORT_IMPORTANCE_RECEIVER, NULL, 0); + if (kr != KERN_SUCCESS) + goto cleanup; + } - mac_port_label_externalize(l, policies, outlabel, 512, 0); + if (options->flags & MPO_DENAP_RECEIVER) { + kr = mach_port_set_attributes(space, *name, MACH_PORT_DENAP_RECEIVER, NULL, 0); + if (kr != KERN_SUCCESS) + goto cleanup; + } + + if (options->flags & MPO_INSERT_SEND_RIGHT) { + kr = ipc_object_copyin(space, *name, MACH_MSG_TYPE_MAKE_SEND, (ipc_object_t *)&port); + if (kr != KERN_SUCCESS) + goto cleanup; + + kr = mach_port_insert_right(space, *name, port, MACH_MSG_TYPE_PORT_SEND); + if (kr != KERN_SUCCESS) + goto cleanup; + } - io_unlocklabel(entry->ie_object); - io_unlock(entry->ie_object); return KERN_SUCCESS; + +cleanup: + /* Attempt to destroy port. If its already destroyed by some other thread, we're done */ + (void) mach_port_destruct(space, *name, 0, context); + return kr; } -#else -kern_return_t -mach_get_label_text( - __unused ipc_space_t space, - __unused mach_port_name_t name, - __unused labelstr_t policies, - __unused labelstr_t outlabel) -{ - return KERN_INVALID_ARGUMENT; -} -#endif +/* + * Routine: mach_port_destruct [kernel call] + * Purpose: + * Destroys a mach port with appropriate guard + * Conditions: + * None. + * Returns: + * KERN_SUCCESS The name is destroyed. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT The right isn't correct. + * KERN_INVALID_VALUE The delta for send right is incorrect. + * KERN_INVALID_ARGUMENT Port is either unguarded already or guard mismatch. + * This also raises a EXC_GUARD exception. + */ -#if CONFIG_MACF_MACH kern_return_t -mach_set_port_label( +mach_port_destruct( ipc_space_t space, mach_port_name_t name, - labelstr_t labelstr) + mach_port_delta_t srdelta, + uint64_t guard) { - ipc_entry_t entry; - kern_return_t kr; - struct label inl; - ipc_port_t port; - int rc; + kern_return_t kr; + ipc_entry_t entry; - if (space == IS_NULL || space->is_task == NULL) + if (space == IS_NULL) return KERN_INVALID_TASK; if (!MACH_PORT_VALID(name)) return KERN_INVALID_NAME; - mac_port_label_init(&inl); - rc = mac_port_label_internalize(&inl, labelstr); - if (rc) - return KERN_INVALID_ARGUMENT; - + /* Remove reference for receive right */ kr = ipc_right_lookup_write(space, name, &entry); if (kr != KERN_SUCCESS) return kr; + /* space is write-locked and active */ + kr = ipc_right_destruct(space, name, entry, srdelta, guard); /* unlocks */ - if (io_otype(entMACry->ie_object) != IOT_PORT) { - is_write_unlock(space); - return KERN_INVALID_RIGHT; - } + return kr; +} - port = (ipc_port_t) entry->ie_object; - ip_lock(port); +/* + * Routine: mach_port_guard [kernel call] + * Purpose: + * Guard a mach port with specified guard value. + * The context field of the port is used as the guard. + * Conditions: + * None. + * Returns: + * KERN_SUCCESS The name is destroyed. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT The right isn't correct. + * KERN_INVALID_ARGUMENT Port already contains a context/guard. + */ +kern_return_t +mach_port_guard( + ipc_space_t space, + mach_port_name_t name, + uint64_t guard, + boolean_t strict) +{ + kern_return_t kr; + ipc_port_t port; - tasklabel_lock(space->is_task); - rc = mac_port_check_label_update(&space->is_task->maclabel, - &port->ip_label, &inl); - tasklabel_unlock(space->is_task); - if (rc) - kr = KERN_NO_ACCESS; - else - mac_port_label_copy(&inl, &port->ip_label); + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_NAME; + /* Guard can be applied only to receive rights */ + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + + /* Port locked and active */ + kr = mach_port_guard_locked(port, guard, strict); ip_unlock(port); - is_write_unlock(space); + return kr; + } -#else + +/* + * Routine: mach_port_unguard [kernel call] + * Purpose: + * Unguard a mach port with specified guard value. + * Conditions: + * None. + * Returns: + * KERN_SUCCESS The name is destroyed. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT The right isn't correct. + * KERN_INVALID_ARGUMENT Port is either unguarded already or guard mismatch. + * This also raises a EXC_GUARD exception. + */ kern_return_t -mach_set_port_label( - ipc_space_t space __unused, - mach_port_name_t name __unused, - labelstr_t labelstr __unused) +mach_port_unguard( + ipc_space_t space, + mach_port_name_t name, + uint64_t guard) { - return KERN_INVALID_ARGUMENT; + + kern_return_t kr; + ipc_port_t port; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_NAME; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + + /* Port locked and active */ + kr = mach_port_unguard_locked(port, name, guard); + ip_unlock(port); + return kr; } -#endif +