/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
mach_port_seqno_t seqno,
ipc_space_t space);
-/* the size of each trailer has to be listed here for copyout purposes */
-mach_msg_trailer_size_t trailer_size[] = {
- sizeof(mach_msg_trailer_t),
- sizeof(mach_msg_seqno_trailer_t),
- sizeof(mach_msg_security_trailer_t) };
-
security_token_t KERNEL_SECURITY_TOKEN = KERNEL_SECURITY_TOKEN_VALUE;
+audit_token_t KERNEL_AUDIT_TOKEN = KERNEL_AUDIT_TOKEN_VALUE;
mach_msg_format_0_trailer_t trailer_template = {
/* mach_msg_trailer_type_t */ MACH_MSG_TRAILER_FORMAT_0,
assert(ip_active(dest_port));
assert(dest_port->ip_receiver != ipc_space_kernel);
- assert(!imq_full(&dest_port->ip_messages) ||
- (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) ==
- MACH_MSG_TYPE_PORT_SEND_ONCE));
+// assert(!imq_full(&dest_port->ip_messages) ||
+// (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) ==
+// MACH_MSG_TYPE_PORT_SEND_ONCE));
assert((hdr->msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0);
{
register ipc_mqueue_t dest_mqueue;
wait_queue_t waitq;
thread_t receiver;
-#if THREAD_SWAPPER
- thread_act_t rcv_act;
-#endif
+ processor_t processor;
spl_t s;
s = splsched();
+ processor = current_processor();
+ if (processor->current_pri >= BASEPRI_RTQUEUES)
+ goto abort_send_receive1;
+
dest_mqueue = &dest_port->ip_messages;
waitq = &dest_mqueue->imq_wait_queue;
imq_lock(dest_mqueue);
- wait_queue_peek_locked(waitq, IPC_MQUEUE_RECEIVE, &receiver, &waitq);
+ wait_queue_peek64_locked(waitq, IPC_MQUEUE_RECEIVE, &receiver, &waitq);
/* queue still locked, thread locked - but still on q */
- if (receiver == THREAD_NULL) {
+ if ( receiver == THREAD_NULL ) {
abort_send_receive:
imq_unlock(dest_mqueue);
+ abort_send_receive1:
splx(s);
ip_unlock(dest_port);
ipc_object_release(rcv_object);
assert(receiver->wait_event == IPC_MQUEUE_RECEIVE);
/*
- * See if it is still running on another processor (trying to
- * block itself). If so, fall off.
+ * Make sure that the scheduling state of the receiver is such
+ * that we can handoff to it here. If not, fall off.
*
- * JMM - We have an opportunity here. Since the thread is locked
- * and we find it runnable, it must still be trying to get into
+ * JMM - We have an opportunity here. If the thread is locked
+ * and we find it runnable, it may still be trying to get into
* thread_block on itself. We could just "hand him the message"
* and let him go (thread_go_locked()) and then fall down into a
* slow receive for ourselves. Only his RECEIVE_TOO_LARGE handling
* runs afoul of that. Clean this up!
*/
- if ((receiver->state & TH_RUN|TH_WAIT) != TH_WAIT) {
- assert(NCPUS > 1);
+ if ((receiver->state & (TH_RUN|TH_WAIT)) != TH_WAIT ||
+ receiver->sched_pri >= BASEPRI_RTQUEUES ||
+ receiver->processor_set != processor->processor_set ||
+ (receiver->bound_processor != PROCESSOR_NULL &&
+ receiver->bound_processor != processor)) {
HOT(c_mmot_cold_033++);
fall_off:
thread_unlock(receiver);
goto fall_off;
}
-#if THREAD_SWAPPER
- /*
- * Receiver looks okay -- is it swapped in?
- */
- rpc_lock(receiver);
- rcv_act = receiver->top_act;
- if (rcv_act->swap_state != TH_SW_IN &&
- rcv_act->swap_state != TH_SW_UNSWAPPABLE) {
- rpc_unlock(receiver);
- HOT(c_mmot_rcvr_swapped++);
- goto fall_off;
- }
-
- /*
- * Make sure receiver stays swapped in (if we can).
- */
- if (!act_lock_try(rcv_act)) { /* out of order! */
- rpc_unlock(receiver);
- HOT(c_mmot_rcvr_locked++);
- goto fall_off;
- }
-
- /*
- * Check for task swapping in progress affecting
- * receiver. Since rcv_act is attached to a shuttle,
- * its swap_state is covered by shuttle's thread_lock()
- * (sigh).
- */
- if ((rcv_act->swap_state != TH_SW_IN &&
- rcv_act->swap_state != TH_SW_UNSWAPPABLE) ||
- rcv_act->ast & AST_SWAPOUT) {
- act_unlock(rcv_act);
- rpc_unlock(receiver);
- HOT(c_mmot_rcvr_tswapped++);
- goto fall_off;
- }
-
- /*
- * We don't need to make receiver unswappable here -- holding
- * act_lock() of rcv_act is sufficient to prevent either thread
- * or task swapping from changing its state (see swapout_scan(),
- * task_swapout()). Don't release lock till receiver's state
- * is consistent. Its task may then be marked for swapout,
- * but that's life.
- */
- rpc_unlock(receiver);
- /*
- * NB: act_lock(rcv_act) still held
- */
-#endif /* THREAD_SWAPPER */
-
/*
* Before committing to the handoff, make sure that we are
* really going to block (i.e. there are no messages already
receiver->ith_seqno = dest_mqueue->imq_seqno++;
/*
- * Inline thread_go_locked
- *
- * JMM - Including hacked in version of setrun scheduler op
- * that doesn't try to put thread on a runq.
+ * Update the scheduling state for the handoff.
*/
- {
- receiver->state &= ~(TH_WAIT|TH_UNINT);
- receiver->state |= TH_RUN;
- receiver->wait_result = THREAD_AWAKENED;
+ receiver->state &= ~(TH_WAIT|TH_UNINT);
+ receiver->state |= TH_RUN;
+
+ pset_run_incr(receiver->processor_set);
+ if (receiver->sched_mode & TH_MODE_TIMESHARE)
+ pset_share_incr(receiver->processor_set);
+
+ receiver->wait_result = THREAD_AWAKENED;
+
+ receiver->computation_metered = 0;
+ receiver->reason = AST_NONE;
- receiver->metered_computation = 0;
- }
-
thread_unlock(receiver);
-#if THREAD_SWAPPER
- act_unlock(rcv_act);
-#endif /* THREAD_SWAPPER */
imq_unlock(dest_mqueue);
ip_unlock(dest_port);
* our reply message needs to determine if it
* can hand off directly back to us.
*/
+ thread_lock(self);
self->ith_msg = (rcv_msg) ? rcv_msg : msg;
self->ith_object = rcv_object; /* still holds reference */
self->ith_msize = rcv_size;
self->ith_continuation = thread_syscall_return;
waitq = &rcv_mqueue->imq_wait_queue;
- (void)wait_queue_assert_wait_locked(waitq,
+ (void)wait_queue_assert_wait64_locked(waitq,
IPC_MQUEUE_RECEIVE,
THREAD_ABORTSAFE,
- TRUE); /* unlock? */
- /* rcv_mqueue is unlocked */
+ self);
+ thread_unlock(self);
+ imq_unlock(rcv_mqueue);
- /* Inline thread_block_reason (except don't select a new
- * new thread (we already have one), and don't turn off ASTs
- * (we don't want two threads to hog all the CPU by handing
- * off to each other).
+ /*
+ * Switch directly to receiving thread, and block
+ * this thread as though it had called ipc_mqueue_receive.
*/
- {
- if (self->funnel_state & TH_FN_OWNED) {
- self->funnel_state = TH_FN_REFUNNEL;
- KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, self->funnel_lock, 3, 0, 0, 0);
- funnel_unlock(self->funnel_lock);
-
- }
-
- machine_clock_assist();
-
- thread_lock(self);
- if (self->state & TH_ABORT)
- clear_wait_internal(self, THREAD_INTERRUPTED);
- thread_unlock(self);
-
- /*
- * Switch directly to receiving thread, and block
- * this thread as though it had called ipc_mqueue_receive.
- */
-#if defined (__i386__)
- thread_run(self, (void (*)(void))0, receiver);
-#else
- thread_run(self, ipc_mqueue_receive_continue, receiver);
-#endif
-
- /* if we fell thru */
- if (self->funnel_state & TH_FN_REFUNNEL) {
- kern_return_t wait_result2;
-
- wait_result2 = self->wait_result;
- self->funnel_state = 0;
- KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 6, 0, 0, 0);
- funnel_lock(self->funnel_lock);
- KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 6, 0, 0, 0);
- self->funnel_state = TH_FN_OWNED;
- self->wait_result = wait_result2;
- }
- splx(s);
- }
-
- ipc_mqueue_receive_continue();
+ thread_run(self, ipc_mqueue_receive_continue, receiver);
/* NOTREACHED */
}
return MACH_MSG_SUCCESS;
}
+/*
+ * Routine: mach_msg_trap [mach trap]
+ * Purpose:
+ * Possibly send a message; possibly receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * All of mach_msg_send and mach_msg_receive error codes.
+ */
+
+mach_msg_return_t
+mach_msg_trap(
+ mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_name_t rcv_name,
+ mach_msg_timeout_t timeout,
+ mach_port_name_t notify)
+{
+ return mach_msg_overwrite_trap(msg,
+ option,
+ send_size,
+ rcv_size,
+ rcv_name,
+ timeout,
+ notify,
+ (mach_msg_header_t *)0,
+ (mach_msg_size_t)0);
+}
+
+
/*
* Routine: msg_receive_error [internal]
* Purpose: