]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/mach_msg.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / ipc / mach_msg.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: ipc/mach_msg.c
57 * Author: Rich Draves
58 * Date: 1989
59 *
60 * Exported message traps. See mach/message.h.
61 */
62
63#include <cpus.h>
64
65#include <mach/kern_return.h>
66#include <mach/port.h>
67#include <mach/message.h>
68#include <mach/mig_errors.h>
69#include <kern/assert.h>
70#include <kern/counters.h>
71#include <kern/cpu_number.h>
72#include <kern/task.h>
73#include <kern/thread.h>
74#include <kern/lock.h>
75#include <kern/sched_prim.h>
76#include <kern/exception.h>
77#include <kern/misc_protos.h>
78#include <vm/vm_map.h>
79#include <ipc/ipc_kmsg.h>
80#include <ipc/ipc_mqueue.h>
81#include <ipc/ipc_object.h>
82#include <ipc/ipc_notify.h>
83#include <ipc/ipc_port.h>
84#include <ipc/ipc_pset.h>
85#include <ipc/ipc_space.h>
86#include <ipc/ipc_entry.h>
87#include <kern/kalloc.h>
88#include <kern/thread_swap.h>
89#include <kern/processor.h>
90
0b4e3aa0
A
91#include <kern/mk_sp.h>
92
1c79356b
A
93#include <machine/machine_routines.h>
94#include <sys/kdebug.h>
95
96/*
97 * Forward declarations
98 */
99
100mach_msg_return_t mach_msg_send(
101 mach_msg_header_t *msg,
102 mach_msg_option_t option,
103 mach_msg_size_t send_size,
104 mach_msg_timeout_t timeout,
105 mach_port_name_t notify);
106
107mach_msg_return_t mach_msg_receive(
108 mach_msg_header_t *msg,
109 mach_msg_option_t option,
110 mach_msg_size_t rcv_size,
111 mach_port_name_t rcv_name,
112 mach_msg_timeout_t timeout,
113 void (*continuation)(mach_msg_return_t),
114 mach_msg_size_t slist_size);
115
116
117mach_msg_return_t msg_receive_error(
118 ipc_kmsg_t kmsg,
119 mach_msg_header_t *msg,
120 mach_msg_option_t option,
121 mach_port_seqno_t seqno,
122 ipc_space_t space);
123
1c79356b
A
124security_token_t KERNEL_SECURITY_TOKEN = KERNEL_SECURITY_TOKEN_VALUE;
125
126mach_msg_format_0_trailer_t trailer_template = {
127 /* mach_msg_trailer_type_t */ MACH_MSG_TRAILER_FORMAT_0,
128 /* mach_msg_trailer_size_t */ MACH_MSG_TRAILER_MINIMUM_SIZE,
129 /* mach_port_seqno_t */ 0,
130 /* security_token_t */ KERNEL_SECURITY_TOKEN_VALUE
131};
132
133/*
134 * Routine: mach_msg_send
135 * Purpose:
136 * Send a message.
137 * Conditions:
138 * Nothing locked.
139 * Returns:
140 * MACH_MSG_SUCCESS Sent the message.
141 * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
142 * MACH_SEND_NO_BUFFER Couldn't allocate buffer.
143 * MACH_SEND_INVALID_DATA Couldn't copy message data.
144 * MACH_SEND_INVALID_HEADER
145 * Illegal value in the message header bits.
146 * MACH_SEND_INVALID_DEST The space is dead.
147 * MACH_SEND_INVALID_NOTIFY Bad notify port.
148 * MACH_SEND_INVALID_DEST Can't copyin destination port.
149 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
150 * MACH_SEND_TIMED_OUT Timeout expired without delivery.
151 * MACH_SEND_INTERRUPTED Delivery interrupted.
152 * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
153 * MACH_SEND_WILL_NOTIFY Msg-accepted notif. requested.
154 * MACH_SEND_NOTIFY_IN_PROGRESS
155 * This space has already forced a message to this port.
156 */
157
158mach_msg_return_t
159mach_msg_send(
160 mach_msg_header_t *msg,
161 mach_msg_option_t option,
162 mach_msg_size_t send_size,
163 mach_msg_timeout_t timeout,
164 mach_port_name_t notify)
165{
166 ipc_space_t space = current_space();
167 vm_map_t map = current_map();
168 ipc_kmsg_t kmsg;
169 mach_msg_return_t mr;
170
171 mr = ipc_kmsg_get(msg, send_size, &kmsg);
172
173 if (mr != MACH_MSG_SUCCESS)
174 return mr;
175
176 if (option & MACH_SEND_CANCEL) {
177 if (notify == MACH_PORT_NULL)
178 mr = MACH_SEND_INVALID_NOTIFY;
179 else
180 mr = ipc_kmsg_copyin(kmsg, space, map, notify);
181 } else
182 mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
183 if (mr != MACH_MSG_SUCCESS) {
184 ipc_kmsg_free(kmsg);
185 return mr;
186 }
187
188 mr = ipc_kmsg_send(kmsg, option & MACH_SEND_TIMEOUT, timeout);
189
190 if (mr != MACH_MSG_SUCCESS) {
191 mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL);
192 (void) ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
193 }
194
195 return mr;
196}
197
198/*
199 * Routine: mach_msg_receive
200 * Purpose:
201 * Receive a message.
202 * Conditions:
203 * Nothing locked.
204 * Returns:
205 * MACH_MSG_SUCCESS Received a message.
206 * MACH_RCV_INVALID_NAME The name doesn't denote a right,
207 * or the denoted right is not receive or port set.
208 * MACH_RCV_IN_SET Receive right is a member of a set.
209 * MACH_RCV_TOO_LARGE Message wouldn't fit into buffer.
210 * MACH_RCV_TIMED_OUT Timeout expired without a message.
211 * MACH_RCV_INTERRUPTED Reception interrupted.
212 * MACH_RCV_PORT_DIED Port/set died while receiving.
213 * MACH_RCV_PORT_CHANGED Port moved into set while receiving.
214 * MACH_RCV_INVALID_DATA Couldn't copy to user buffer.
215 * MACH_RCV_INVALID_NOTIFY Bad notify port.
216 * MACH_RCV_HEADER_ERROR
217 */
218
219mach_msg_return_t
220mach_msg_receive_results(void)
221{
222 thread_t self = current_thread();
223 ipc_space_t space = current_space();
224 vm_map_t map = current_map();
225
226 ipc_object_t object = self->ith_object;
227 mach_msg_return_t mr = self->ith_state;
228 mach_msg_header_t *msg = self->ith_msg;
229 mach_msg_option_t option = self->ith_option;
230 ipc_kmsg_t kmsg = self->ith_kmsg;
231 mach_port_seqno_t seqno = self->ith_seqno;
232 mach_msg_size_t slist_size = self->ith_scatter_list_size;
233
234 mach_msg_format_0_trailer_t *trailer;
235
236 ipc_object_release(object);
237
238 if (mr != MACH_MSG_SUCCESS) {
239
240 if (mr == MACH_RCV_TOO_LARGE ) {
241 if (option & MACH_RCV_LARGE) {
242 /*
243 * We need to inform the user-level code that it needs more
244 * space. The value for how much space was returned in the
245 * msize save area instead of the message (which was left on
246 * the queue).
247 */
248 if (copyout((char *) &self->ith_msize,
249 (char *) &msg->msgh_size,
250 sizeof(mach_msg_size_t)))
251 mr = MACH_RCV_INVALID_DATA;
252 goto out;
253 }
254
255 if (msg_receive_error(kmsg, msg, option, seqno, space)
256 == MACH_RCV_INVALID_DATA)
257 mr = MACH_RCV_INVALID_DATA;
258 }
259 goto out;
260 }
261
262 trailer = (mach_msg_format_0_trailer_t *)
263 ((vm_offset_t)&kmsg->ikm_header +
264 round_msg(kmsg->ikm_header.msgh_size));
265 if (option & MACH_RCV_TRAILER_MASK) {
266 trailer->msgh_seqno = seqno;
267 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option);
268 }
269
270 /*
271 * If MACH_RCV_OVERWRITE was specified, try to get the scatter
272 * list and verify it against the contents of the message. If
273 * there is any problem with it, we will continue without it as
274 * normal.
275 */
276 if (option & MACH_RCV_OVERWRITE) {
277 mach_msg_size_t slist_size = self->ith_scatter_list_size;
278 mach_msg_body_t *slist;
279
280 slist = ipc_kmsg_copyin_scatter(msg, slist_size, kmsg);
281 mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL, slist);
282 ipc_kmsg_free_scatter(slist, slist_size);
283 } else {
284 mr = ipc_kmsg_copyout(kmsg, space, map,
285 MACH_PORT_NULL, MACH_MSG_BODY_NULL);
286 }
287
288 if (mr != MACH_MSG_SUCCESS) {
289 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
290 if (ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size +
291 trailer->msgh_trailer_size) == MACH_RCV_INVALID_DATA)
292 mr = MACH_RCV_INVALID_DATA;
293 }
294 else {
295 if (msg_receive_error(kmsg, msg, option, seqno, space)
296 == MACH_RCV_INVALID_DATA)
297 mr = MACH_RCV_INVALID_DATA;
298 }
299 goto out;
300 }
301 mr = ipc_kmsg_put(msg,
302 kmsg,
303 kmsg->ikm_header.msgh_size +
304 trailer->msgh_trailer_size);
305 out:
306 return mr;
307}
308
309mach_msg_return_t
310mach_msg_receive(
311 mach_msg_header_t *msg,
312 mach_msg_option_t option,
313 mach_msg_size_t rcv_size,
314 mach_port_name_t rcv_name,
315 mach_msg_timeout_t timeout,
316 void (*continuation)(mach_msg_return_t),
317 mach_msg_size_t slist_size)
318{
319 thread_t self = current_thread();
320 ipc_space_t space = current_space();
321 vm_map_t map = current_map();
322 ipc_object_t object;
323 ipc_mqueue_t mqueue;
324 ipc_kmsg_t kmsg;
325 mach_port_seqno_t seqno;
326 mach_msg_return_t mr;
327 mach_msg_body_t *slist;
328 mach_msg_format_0_trailer_t *trailer;
329
330 mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
331 if (mr != MACH_MSG_SUCCESS) {
332 return mr;
333 }
334 /* hold ref for object */
335
336 self->ith_msg = msg;
337 self->ith_object = object;
338 self->ith_msize = rcv_size;
339 self->ith_option = option;
340 self->ith_scatter_list_size = slist_size;
341 self->ith_continuation = continuation;
342
343 ipc_mqueue_receive(mqueue, option, rcv_size, timeout, THREAD_ABORTSAFE);
0b4e3aa0
A
344 if ((option & MACH_RCV_TIMEOUT) && timeout == 0)
345 _mk_sp_thread_perhaps_yield(self);
1c79356b
A
346 return mach_msg_receive_results();
347}
348
349void
350mach_msg_receive_continue(void)
351{
352 thread_t self = current_thread();
353
354 (*self->ith_continuation)(mach_msg_receive_results());
355}
356
357/*
358 * Toggle this to compile the hotpath in/out
359 * If compiled in, the run-time toggle "enable_hotpath" below
360 * eases testing & debugging
361 */
362#define ENABLE_HOTPATH 1 /* Hacked on for now */
363
364#if ENABLE_HOTPATH
365/*
366 * These counters allow tracing of hotpath behavior under test loads.
367 * A couple key counters are unconditional (see below).
368 */
369#define HOTPATH_DEBUG 0 /* Toggle to include lots of counters */
370#if HOTPATH_DEBUG
371#define HOT(expr) expr
372
373unsigned int c_mmot_FIRST = 0; /* Unused First Counter */
374unsigned int c_mmot_combined_S_R = 0; /* hotpath candidates */
375unsigned int c_mach_msg_trap_switch_fast = 0; /* hotpath successes */
376unsigned int c_mmot_kernel_send = 0; /* kernel server */
377unsigned int c_mmot_cold_000 = 0; /* see below ... */
378unsigned int c_mmot_smallsendsize = 0;
379unsigned int c_mmot_oddsendsize = 0;
380unsigned int c_mmot_bigsendsize = 0;
381unsigned int c_mmot_copyinmsg_fail = 0;
382unsigned int c_mmot_g_slow_copyin3 = 0;
383unsigned int c_mmot_cold_006 = 0;
384unsigned int c_mmot_cold_007 = 0;
385unsigned int c_mmot_cold_008 = 0;
386unsigned int c_mmot_cold_009 = 0;
387unsigned int c_mmot_cold_010 = 0;
388unsigned int c_mmot_cold_012 = 0;
389unsigned int c_mmot_cold_013 = 0;
390unsigned int c_mmot_cold_014 = 0;
391unsigned int c_mmot_cold_016 = 0;
392unsigned int c_mmot_cold_018 = 0;
393unsigned int c_mmot_cold_019 = 0;
394unsigned int c_mmot_cold_020 = 0;
395unsigned int c_mmot_cold_021 = 0;
396unsigned int c_mmot_cold_022 = 0;
397unsigned int c_mmot_cold_023 = 0;
398unsigned int c_mmot_cold_024 = 0;
399unsigned int c_mmot_cold_025 = 0;
400unsigned int c_mmot_cold_026 = 0;
401unsigned int c_mmot_cold_027 = 0;
402unsigned int c_mmot_hot_fSR_ok = 0;
403unsigned int c_mmot_cold_029 = 0;
404unsigned int c_mmot_cold_030 = 0;
405unsigned int c_mmot_cold_031 = 0;
406unsigned int c_mmot_cold_032 = 0;
407unsigned int c_mmot_cold_033 = 0;
408unsigned int c_mmot_bad_rcvr = 0;
409unsigned int c_mmot_rcvr_swapped = 0;
410unsigned int c_mmot_rcvr_locked = 0;
411unsigned int c_mmot_rcvr_tswapped = 0;
412unsigned int c_mmot_rcvr_freed = 0;
413unsigned int c_mmot_g_slow_copyout6 = 0;
414unsigned int c_mmot_g_slow_copyout5 = 0;
415unsigned int c_mmot_cold_037 = 0;
416unsigned int c_mmot_cold_038 = 0;
417unsigned int c_mmot_cold_039 = 0;
418unsigned int c_mmot_g_slow_copyout4 = 0;
419unsigned int c_mmot_g_slow_copyout3 = 0;
420unsigned int c_mmot_hot_ok1 = 0;
421unsigned int c_mmot_hot_ok2 = 0;
422unsigned int c_mmot_hot_ok3 = 0;
423unsigned int c_mmot_g_slow_copyout1 = 0;
424unsigned int c_mmot_g_slow_copyout2 = 0;
425unsigned int c_mmot_getback_fast_copyin = 0;
426unsigned int c_mmot_cold_048 = 0;
427unsigned int c_mmot_getback_FastSR = 0;
428unsigned int c_mmot_cold_050 = 0;
429unsigned int c_mmot_cold_051 = 0;
430unsigned int c_mmot_cold_052 = 0;
431unsigned int c_mmot_cold_053 = 0;
432unsigned int c_mmot_fastkernelreply = 0;
433unsigned int c_mmot_cold_055 = 0;
434unsigned int c_mmot_getback_fast_put = 0;
435unsigned int c_mmot_LAST = 0; /* End Marker - Unused */
436
437void db_mmot_zero_counters(void); /* forward; */
438void db_mmot_show_counters(void); /* forward; */
439
440void /* Call from the debugger to clear all counters */
441db_mmot_zero_counters(void)
442{
443 register unsigned int *ip = &c_mmot_FIRST;
444 while (ip <= &c_mmot_LAST)
445 *ip++ = 0;
446}
447
448void /* Call from the debugger to show all counters */
449db_mmot_show_counters(void)
450{
451#define xx(str) printf("%s: %d\n", # str, str);
452
453 xx(c_mmot_combined_S_R);
454 xx(c_mach_msg_trap_switch_fast);
455 xx(c_mmot_kernel_send);
456 xx(c_mmot_cold_000);
457 xx(c_mmot_smallsendsize);
458 xx(c_mmot_oddsendsize);
459 xx(c_mmot_bigsendsize);
460 xx(c_mmot_copyinmsg_fail);
461 xx(c_mmot_g_slow_copyin3);
462 xx(c_mmot_cold_006);
463 xx(c_mmot_cold_007);
464 xx(c_mmot_cold_008);
465 xx(c_mmot_cold_009);
466 xx(c_mmot_cold_010);
467 xx(c_mmot_cold_012);
468 xx(c_mmot_cold_013);
469 xx(c_mmot_cold_014);
470 xx(c_mmot_cold_016);
471 xx(c_mmot_cold_018);
472 xx(c_mmot_cold_019);
473 xx(c_mmot_cold_020);
474 xx(c_mmot_cold_021);
475 xx(c_mmot_cold_022);
476 xx(c_mmot_cold_023);
477 xx(c_mmot_cold_024);
478 xx(c_mmot_cold_025);
479 xx(c_mmot_cold_026);
480 xx(c_mmot_cold_027);
481 xx(c_mmot_hot_fSR_ok);
482 xx(c_mmot_cold_029);
483 xx(c_mmot_cold_030);
484 xx(c_mmot_cold_031);
485 xx(c_mmot_cold_032);
486 xx(c_mmot_cold_033);
487 xx(c_mmot_bad_rcvr);
488 xx(c_mmot_rcvr_swapped);
489 xx(c_mmot_rcvr_locked);
490 xx(c_mmot_rcvr_tswapped);
491 xx(c_mmot_rcvr_freed);
492 xx(c_mmot_g_slow_copyout6);
493 xx(c_mmot_g_slow_copyout5);
494 xx(c_mmot_cold_037);
495 xx(c_mmot_cold_038);
496 xx(c_mmot_cold_039);
497 xx(c_mmot_g_slow_copyout4);
498 xx(c_mmot_g_slow_copyout3);
499 xx(c_mmot_g_slow_copyout1);
500 xx(c_mmot_hot_ok3);
501 xx(c_mmot_hot_ok2);
502 xx(c_mmot_hot_ok1);
503 xx(c_mmot_g_slow_copyout2);
504 xx(c_mmot_getback_fast_copyin);
505 xx(c_mmot_cold_048);
506 xx(c_mmot_getback_FastSR);
507 xx(c_mmot_cold_050);
508 xx(c_mmot_cold_051);
509 xx(c_mmot_cold_052);
510 xx(c_mmot_cold_053);
511 xx(c_mmot_fastkernelreply);
512 xx(c_mmot_cold_055);
513 xx(c_mmot_getback_fast_put);
514
515#undef xx
516}
517
518#else /* !HOTPATH_DEBUG */
519
520/*
521 * Duplicate just these few so we can always do a quick sanity check
522 */
523unsigned int c_mmot_combined_S_R = 0; /* hotpath candidates */
524unsigned int c_mach_msg_trap_switch_fast = 0; /* hotpath successes */
525unsigned int c_mmot_kernel_send = 0; /* kernel server calls */
526#define HOT(expr) /* no optional counters */
527
528#endif /* !HOTPATH_DEBUG */
529
530boolean_t enable_hotpath = TRUE; /* Patchable, just in case ... */
531#endif /* HOTPATH_ENABLE */
532
533/*
534 * Routine: mach_msg_overwrite_trap [mach trap]
535 * Purpose:
536 * Possibly send a message; possibly receive a message.
537 * Conditions:
538 * Nothing locked.
539 * Returns:
540 * All of mach_msg_send and mach_msg_receive error codes.
541 */
542
543mach_msg_return_t
544mach_msg_overwrite_trap(
545 mach_msg_header_t *msg,
546 mach_msg_option_t option,
547 mach_msg_size_t send_size,
548 mach_msg_size_t rcv_size,
549 mach_port_name_t rcv_name,
550 mach_msg_timeout_t timeout,
551 mach_port_name_t notify,
552 mach_msg_header_t *rcv_msg,
553 mach_msg_size_t scatter_list_size)
554{
555 register mach_msg_header_t *hdr;
556 mach_msg_return_t mr = MACH_MSG_SUCCESS;
557 /* mask out some of the options before entering the hot path */
558 mach_msg_option_t masked_option =
559 option & ~(MACH_SEND_TRAILER|MACH_RCV_TRAILER_MASK|MACH_RCV_LARGE);
560 int i;
561
562#if ENABLE_HOTPATH
563 /* BEGINNING OF HOT PATH */
564 if ((masked_option == (MACH_SEND_MSG|MACH_RCV_MSG)) && enable_hotpath) {
565 register thread_t self = current_thread();
566 register mach_msg_format_0_trailer_t *trailer;
567
568 ipc_space_t space = current_act()->task->itk_space;
569 ipc_kmsg_t kmsg;
570 register ipc_port_t dest_port;
571 ipc_object_t rcv_object;
572 register ipc_mqueue_t rcv_mqueue;
573 mach_msg_size_t reply_size;
574 ipc_kmsg_t rcv_kmsg;
575
576 c_mmot_combined_S_R++;
577
578 /*
579 * This case is divided into ten sections, each
580 * with a label. There are five optimized
581 * sections and six unoptimized sections, which
582 * do the same thing but handle all possible
583 * cases and are slower.
584 *
585 * The five sections for an RPC are
586 * 1) Get request message into a buffer.
587 * 2) Copyin request message and rcv_name.
588 * (fast_copyin or slow_copyin)
589 * 3) Enqueue request and dequeue reply.
590 * (fast_send_receive or
591 * slow_send and slow_receive)
592 * 4) Copyout reply message.
593 * (fast_copyout or slow_copyout)
594 * 5) Put reply message to user's buffer.
595 *
596 * Keep the locking hierarchy firmly in mind.
597 * (First spaces, then ports, then port sets,
598 * then message queues.) Only a non-blocking
599 * attempt can be made to acquire locks out of
600 * order, or acquire two locks on the same level.
601 * Acquiring two locks on the same level will
602 * fail if the objects are really the same,
603 * unless simple locking is disabled. This is OK,
604 * because then the extra unlock does nothing.
605 *
606 * There are two major reasons these RPCs can't use
607 * ipc_thread_switch, and use slow_send/slow_receive:
608 * 1) Kernel RPCs.
609 * 2) Servers fall behind clients, so
610 * client doesn't find a blocked server thread and
611 * server finds waiting messages and can't block.
612 */
613
614 mr = ipc_kmsg_get(msg, send_size, &kmsg);
615 if (mr != KERN_SUCCESS) {
616 return mr;
617 }
618 hdr = &kmsg->ikm_header;
619 trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr +
620 send_size);
621
622 fast_copyin:
623 /*
624 * optimized ipc_kmsg_copyin/ipc_mqueue_copyin
625 *
626 * We have the request message data in kmsg.
627 * Must still do copyin, send, receive, etc.
628 *
629 * If the message isn't simple, we can't combine
630 * ipc_kmsg_copyin_header and ipc_mqueue_copyin,
631 * because copyin of the message body might
632 * affect rcv_name.
633 */
634
635 switch (hdr->msgh_bits) {
636 case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
637 MACH_MSG_TYPE_MAKE_SEND_ONCE): {
638 register ipc_entry_t table;
639 register ipc_entry_num_t size;
640 register ipc_port_t reply_port;
641
642 /* sending a request message */
643
644 {
645 register mach_port_index_t index;
646 register mach_port_gen_t gen;
647
648 {
649 register mach_port_name_t reply_name =
650 (mach_port_name_t)hdr->msgh_local_port;
651
652 if (reply_name != rcv_name) {
653 HOT(c_mmot_g_slow_copyin3++);
654 goto slow_copyin;
655 }
656
657 /* optimized ipc_entry_lookup of reply_name */
658
659 index = MACH_PORT_INDEX(reply_name);
660 gen = MACH_PORT_GEN(reply_name);
661
662 is_read_lock(space);
663 assert(space->is_active);
664
665 size = space->is_table_size;
666 table = space->is_table;
667
668 {
669 register ipc_entry_t entry;
670 register ipc_entry_bits_t bits;
671
672 if (index < size) {
673 entry = &table[index];
674 bits = entry->ie_bits;
675 if (IE_BITS_GEN(bits) != gen ||
676 (bits & IE_BITS_COLLISION)) {
677 entry = IE_NULL;
678 }
679 } else {
680 entry = IE_NULL;
681 }
682 if (entry == IE_NULL) {
683 entry = ipc_entry_lookup(space, reply_name);
684 if (entry == IE_NULL) {
685 HOT(c_mmot_cold_006++);
686 goto abort_request_copyin;
687 }
688 bits = entry->ie_bits;
689 }
690
691 /* check type bit */
692
693 if (! (bits & MACH_PORT_TYPE_RECEIVE)) {
694 HOT(c_mmot_cold_007++);
695 goto abort_request_copyin;
696 }
697
698 reply_port = (ipc_port_t) entry->ie_object;
699 assert(reply_port != IP_NULL);
700 }
701 }
702 }
703
704 /* optimized ipc_entry_lookup of dest_name */
705
706 {
707 register mach_port_index_t index;
708 register mach_port_gen_t gen;
709
710 {
711 register mach_port_name_t dest_name =
712 (mach_port_name_t)hdr->msgh_remote_port;
713
714 index = MACH_PORT_INDEX(dest_name);
715 gen = MACH_PORT_GEN(dest_name);
716
717 {
718 register ipc_entry_t entry;
719 register ipc_entry_bits_t bits;
720
721 if (index < size) {
722 entry = &table[index];
723 bits = entry->ie_bits;
724 if (IE_BITS_GEN(bits) != gen ||
725 (bits & IE_BITS_COLLISION)) {
726 entry = IE_NULL;
727 }
728 } else {
729 entry = IE_NULL;
730 }
731 if (entry == IE_NULL) {
732 entry = ipc_entry_lookup(space, dest_name);
733 if (entry == IE_NULL) {
734 HOT(c_mmot_cold_008++);
735 goto abort_request_copyin;
736 }
737 bits = entry->ie_bits;
738 }
739
740 /* check type bit */
741
742 if (! (bits & MACH_PORT_TYPE_SEND)) {
743 HOT(c_mmot_cold_009++);
744 goto abort_request_copyin;
745 }
746
747 assert(IE_BITS_UREFS(bits) > 0);
748
749 dest_port = (ipc_port_t) entry->ie_object;
750 assert(dest_port != IP_NULL);
751 }
752 }
753 }
754
755 /*
756 * To do an atomic copyin, need simultaneous
757 * locks on both ports and the space. If
758 * dest_port == reply_port, and simple locking is
759 * enabled, then we will abort. Otherwise it's
760 * OK to unlock twice.
761 */
762
763 ip_lock(dest_port);
764 if (!ip_active(dest_port) ||
765 !ip_lock_try(reply_port)) {
766 ip_unlock(dest_port);
767 HOT(c_mmot_cold_010++);
768 goto abort_request_copyin;
769 }
770 is_read_unlock(space);
771
772 assert(dest_port->ip_srights > 0);
773 dest_port->ip_srights++;
774 ip_reference(dest_port);
775
776 assert(ip_active(reply_port));
777 assert(reply_port->ip_receiver_name ==
778 (mach_port_name_t)hdr->msgh_local_port);
779 assert(reply_port->ip_receiver == space);
780
781 reply_port->ip_sorights++;
782 ip_reference(reply_port);
783
784 hdr->msgh_bits =
785 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
786 MACH_MSG_TYPE_PORT_SEND_ONCE);
787 hdr->msgh_remote_port = dest_port;
788 hdr->msgh_local_port = reply_port;
789
790 /* make sure we can queue to the destination */
791
792 if (dest_port->ip_receiver == ipc_space_kernel) {
793 /*
794 * The kernel server has a reference to
795 * the reply port, which it hands back
796 * to us in the reply message. We do
797 * not need to keep another reference to
798 * it.
799 */
800 ip_unlock(reply_port);
801
802 assert(ip_active(dest_port));
803 dest_port->ip_messages.imq_seqno++;
804 ip_unlock(dest_port);
805 goto kernel_send;
806 }
807
808 if (imq_full(&dest_port->ip_messages)) {
809 HOT(c_mmot_cold_013++);
810 goto abort_request_send_receive;
811 }
812
813 /* optimized ipc_mqueue_copyin */
814
815 rcv_object = (ipc_object_t) reply_port;
816 io_reference(rcv_object);
817 rcv_mqueue = &reply_port->ip_messages;
818 io_unlock(rcv_object);
819 HOT(c_mmot_hot_fSR_ok++);
820 goto fast_send_receive;
821
822 abort_request_copyin:
823 is_read_unlock(space);
824 goto slow_copyin;
825
826 abort_request_send_receive:
827 ip_unlock(dest_port);
828 ip_unlock(reply_port);
829 goto slow_send;
830 }
831
832 case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
833 register ipc_entry_num_t size;
834 register ipc_entry_t table;
835
836 /* sending a reply message */
837
838 {
839 register mach_port_name_t reply_name =
840 (mach_port_name_t)hdr->msgh_local_port;
841
842 if (reply_name != MACH_PORT_NULL) {
843 HOT(c_mmot_cold_018++);
844 goto slow_copyin;
845 }
846 }
847
848 is_write_lock(space);
849 assert(space->is_active);
850
851 /* optimized ipc_entry_lookup */
852
853 size = space->is_table_size;
854 table = space->is_table;
855
856 {
857 register ipc_entry_t entry;
858 register mach_port_gen_t gen;
859 register mach_port_index_t index;
860 ipc_table_index_t *requests;
861
862 {
863 register mach_port_name_t dest_name =
864 (mach_port_name_t)hdr->msgh_remote_port;
865
866 index = MACH_PORT_INDEX(dest_name);
867 gen = MACH_PORT_GEN(dest_name);
868 }
869
870 if (index >= size) {
871 HOT(c_mmot_cold_019++);
872 goto abort_reply_dest_copyin;
873 }
874
875 entry = &table[index];
876
877 /* check generation, collision bit, and type bit */
878
879 if ((entry->ie_bits & (IE_BITS_GEN_MASK|
880 IE_BITS_COLLISION|
881 MACH_PORT_TYPE_SEND_ONCE)) !=
882 (gen | MACH_PORT_TYPE_SEND_ONCE)) {
883 HOT(c_mmot_cold_020++);
884 goto abort_reply_dest_copyin;
885 }
886
887 /* optimized ipc_right_copyin */
888
889 assert(IE_BITS_TYPE(entry->ie_bits) ==
890 MACH_PORT_TYPE_SEND_ONCE);
891 assert(IE_BITS_UREFS(entry->ie_bits) == 1);
892
893 if (entry->ie_request != 0) {
894 HOT(c_mmot_cold_021++);
895 goto abort_reply_dest_copyin;
896 }
897
898 dest_port = (ipc_port_t) entry->ie_object;
899 assert(dest_port != IP_NULL);
900
901 ip_lock(dest_port);
902 if (!ip_active(dest_port)) {
903 ip_unlock(dest_port);
904 HOT(c_mmot_cold_022++);
905 goto abort_reply_dest_copyin;
906 }
907
908 assert(dest_port->ip_sorights > 0);
909
910 /* optimized ipc_entry_dealloc */
911
912
913 entry->ie_bits = gen;
914 entry->ie_next = table->ie_next;
915 table->ie_next = index;
916 entry->ie_object = IO_NULL;
917 }
918
919 hdr->msgh_bits =
920 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
921 0);
922 hdr->msgh_remote_port = dest_port;
923
924 /* make sure we can queue to the destination */
925
926 assert(dest_port->ip_receiver != ipc_space_kernel);
927
928 /* optimized ipc_entry_lookup/ipc_mqueue_copyin */
929
930 {
931 register ipc_entry_t entry;
932 register ipc_entry_bits_t bits;
933
934 {
935 register mach_port_index_t index;
936 register mach_port_gen_t gen;
937
938 index = MACH_PORT_INDEX(rcv_name);
939 gen = MACH_PORT_GEN(rcv_name);
940
941 if (index < size) {
942 entry = &table[index];
943 bits = entry->ie_bits;
944 if (IE_BITS_GEN(bits) != gen ||
945 (bits & IE_BITS_COLLISION)) {
946 entry = IE_NULL;
947 }
948 } else {
949 entry = IE_NULL;
950 }
951 if (entry == IE_NULL) {
952 entry = ipc_entry_lookup(space, rcv_name);
953 if (entry == IE_NULL) {
954 HOT(c_mmot_cold_024++);
955 goto abort_reply_rcv_copyin;
956 }
957 bits = entry->ie_bits;
958 }
959
960 }
961
962 /* check type bits; looking for receive or set */
963#if 0
964 /*
965 * JMM - The check below for messages in the receive
966 * mqueue is insufficient to work with port sets, since
967 * the messages stay in the port queues. For now, don't
968 * allow portsets (but receiving on portsets when sending
969 * a message to a send-once right is actually a very
970 * common case (so we should re-enable).
971 */
972 if (bits & MACH_PORT_TYPE_PORT_SET) {
973 register ipc_pset_t rcv_pset;
974
975 rcv_pset = (ipc_pset_t) entry->ie_object;
976 assert(rcv_pset != IPS_NULL);
977
978 ips_lock(rcv_pset);
979 assert(ips_active(rcv_pset));
980
981 rcv_object = (ipc_object_t) rcv_pset;
982 rcv_mqueue = &rcv_pset->ips_messages;
983 } else
984#endif /* 0 */
985 if (bits & MACH_PORT_TYPE_RECEIVE) {
986 register ipc_port_t rcv_port;
987
988 rcv_port = (ipc_port_t) entry->ie_object;
989 assert(rcv_port != IP_NULL);
990
991 if (!ip_lock_try(rcv_port)) {
992 HOT(c_mmot_cold_025++);
993 goto abort_reply_rcv_copyin;
994 }
995 assert(ip_active(rcv_port));
996
997 if (rcv_port->ip_pset_count != 0) {
998 ip_unlock(rcv_port);
999 HOT(c_mmot_cold_026++);
1000 goto abort_reply_rcv_copyin;
1001 }
1002
1003 rcv_object = (ipc_object_t) rcv_port;
1004 rcv_mqueue = &rcv_port->ip_messages;
1005 } else {
1006 HOT(c_mmot_cold_027++);
1007 goto abort_reply_rcv_copyin;
1008 }
1009 }
1010
1011 is_write_unlock(space);
1012 io_reference(rcv_object);
1013 io_unlock(rcv_object);
1014 HOT(c_mmot_hot_fSR_ok++);
1015 goto fast_send_receive;
1016
1017 abort_reply_dest_copyin:
1018 is_write_unlock(space);
1019 HOT(c_mmot_cold_029++);
1020 goto slow_copyin;
1021
1022 abort_reply_rcv_copyin:
1023 ip_unlock(dest_port);
1024 is_write_unlock(space);
1025 HOT(c_mmot_cold_030++);
1026 goto slow_send;
1027 }
1028
1029 default:
1030 HOT(c_mmot_cold_031++);
1031 goto slow_copyin;
1032 }
1033 /*NOTREACHED*/
1034
1035 fast_send_receive:
1036 /*
1037 * optimized ipc_mqueue_send/ipc_mqueue_receive
1038 *
1039 * Finished get/copyin of kmsg and copyin of rcv_name.
1040 * space is unlocked, dest_port is locked,
1041 * we can queue kmsg to dest_port,
1042 * rcv_mqueue is set, and rcv_object holds a ref
1043 * so the mqueue cannot go away.
1044 *
1045 * JMM - For now, rcv_object is just a port. Portsets
1046 * are disabled for the time being.
1047 */
1048
1049 assert(ip_active(dest_port));
1050 assert(dest_port->ip_receiver != ipc_space_kernel);
de355530
A
1051 assert(!imq_full(&dest_port->ip_messages) ||
1052 (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) ==
1053 MACH_MSG_TYPE_PORT_SEND_ONCE));
1c79356b
A
1054 assert((hdr->msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0);
1055
1056 {
1057 register ipc_mqueue_t dest_mqueue;
1058 wait_queue_t waitq;
1059 thread_t receiver;
1060#if THREAD_SWAPPER
1061 thread_act_t rcv_act;
1062#endif
1063 spl_t s;
1064
1065 s = splsched();
1066 dest_mqueue = &dest_port->ip_messages;
1067 waitq = &dest_mqueue->imq_wait_queue;
1068 imq_lock(dest_mqueue);
1069
9bccf70c 1070 wait_queue_peek64_locked(waitq, IPC_MQUEUE_RECEIVE, &receiver, &waitq);
1c79356b
A
1071 /* queue still locked, thread locked - but still on q */
1072
1073 if (receiver == THREAD_NULL) {
1074 abort_send_receive:
1075 imq_unlock(dest_mqueue);
1076 splx(s);
1077 ip_unlock(dest_port);
1078 ipc_object_release(rcv_object);
1079 HOT(c_mmot_cold_032++);
1080 goto slow_send;
1081 }
1082
1083 assert(receiver->wait_queue == waitq);
1084 assert(receiver->wait_event == IPC_MQUEUE_RECEIVE);
1085
1086 /*
1087 * See if it is still running on another processor (trying to
1088 * block itself). If so, fall off.
1089 *
1090 * JMM - We have an opportunity here. Since the thread is locked
1091 * and we find it runnable, it must still be trying to get into
1092 * thread_block on itself. We could just "hand him the message"
1093 * and let him go (thread_go_locked()) and then fall down into a
1094 * slow receive for ourselves. Only his RECEIVE_TOO_LARGE handling
1095 * runs afoul of that. Clean this up!
1096 */
9bccf70c 1097 if ((receiver->state & (TH_RUN|TH_WAIT)) != TH_WAIT) {
1c79356b
A
1098 assert(NCPUS > 1);
1099 HOT(c_mmot_cold_033++);
1100 fall_off:
1101 thread_unlock(receiver);
1102 if (waitq != &dest_mqueue->imq_wait_queue)
1103 wait_queue_unlock(waitq);
1104 goto abort_send_receive;
1105 }
1106
1107 /*
1108 * Check that the receiver can stay on the hot path.
1109 */
1110 if (send_size + REQUESTED_TRAILER_SIZE(receiver->ith_option) >
1111 receiver->ith_msize) {
1112 /*
1113 * The receiver can't accept the message.
1114 */
1115 HOT(c_mmot_bad_rcvr++);
1116 goto fall_off;
1117 }
1118
1119#if THREAD_SWAPPER
1120 /*
1121 * Receiver looks okay -- is it swapped in?
1122 */
1c79356b
A
1123 rcv_act = receiver->top_act;
1124 if (rcv_act->swap_state != TH_SW_IN &&
1125 rcv_act->swap_state != TH_SW_UNSWAPPABLE) {
1c79356b
A
1126 HOT(c_mmot_rcvr_swapped++);
1127 goto fall_off;
1128 }
1129
1130 /*
1131 * Make sure receiver stays swapped in (if we can).
1132 */
1133 if (!act_lock_try(rcv_act)) { /* out of order! */
1c79356b
A
1134 HOT(c_mmot_rcvr_locked++);
1135 goto fall_off;
1136 }
1137
1138 /*
1139 * Check for task swapping in progress affecting
1140 * receiver. Since rcv_act is attached to a shuttle,
1141 * its swap_state is covered by shuttle's thread_lock()
1142 * (sigh).
1143 */
1144 if ((rcv_act->swap_state != TH_SW_IN &&
1145 rcv_act->swap_state != TH_SW_UNSWAPPABLE) ||
1146 rcv_act->ast & AST_SWAPOUT) {
1147 act_unlock(rcv_act);
1c79356b
A
1148 HOT(c_mmot_rcvr_tswapped++);
1149 goto fall_off;
1150 }
1151
1152 /*
1153 * We don't need to make receiver unswappable here -- holding
1154 * act_lock() of rcv_act is sufficient to prevent either thread
1155 * or task swapping from changing its state (see swapout_scan(),
1156 * task_swapout()). Don't release lock till receiver's state
1157 * is consistent. Its task may then be marked for swapout,
1158 * but that's life.
1159 */
1c79356b
A
1160 /*
1161 * NB: act_lock(rcv_act) still held
1162 */
1163#endif /* THREAD_SWAPPER */
1164
1165 /*
1166 * Before committing to the handoff, make sure that we are
1167 * really going to block (i.e. there are no messages already
1168 * queued for us. This violates lock ordering, so make sure
1169 * we don't deadlock. After the trylock succeeds below, we
1170 * may have up to 3 message queues locked:
1171 * - the dest port mqueue
1172 * - a portset mqueue (where waiting receiver was found)
1173 * - finally our own rcv_mqueue
1174 *
1175 * JMM - Need to make this check appropriate for portsets as
1176 * well before re-enabling them.
1177 */
1178 if (!imq_lock_try(rcv_mqueue)) {
1179 goto fall_off;
1180 }
1181 if (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages) != IKM_NULL) {
1182 imq_unlock(rcv_mqueue);
1183 HOT(c_mmot_cold_033++);
1184 goto fall_off;
1185 }
1186
1187 /* At this point we are committed to do the "handoff". */
1188 c_mach_msg_trap_switch_fast++;
1189
1190 /*
1191 * JMM - Go ahead and pull the receiver from the runq. If the
1192 * runq wasn't the one for the mqueue, unlock it.
1193 */
1194 wait_queue_pull_thread_locked(waitq,
1195 receiver,
1196 (waitq != &dest_mqueue->imq_wait_queue));
1197
1198 /*
1199 * Store the kmsg and seqno where the receiver can pick it up.
1200 */
1201 receiver->ith_state = MACH_MSG_SUCCESS;
1202 receiver->ith_kmsg = kmsg;
1203 receiver->ith_seqno = dest_mqueue->imq_seqno++;
1204
1205 /*
1206 * Inline thread_go_locked
1207 *
1208 * JMM - Including hacked in version of setrun scheduler op
1209 * that doesn't try to put thread on a runq.
1210 */
1211 {
0b4e3aa0 1212 receiver->state &= ~(TH_WAIT|TH_UNINT);
9bccf70c 1213 hw_atomic_add(&receiver->processor_set->run_count, 1);
0b4e3aa0
A
1214 receiver->state |= TH_RUN;
1215 receiver->wait_result = THREAD_AWAKENED;
1c79356b 1216
9bccf70c
A
1217 receiver->computation_metered = 0;
1218 receiver->reason = AST_NONE;
1c79356b
A
1219 }
1220
1221 thread_unlock(receiver);
1222#if THREAD_SWAPPER
1223 act_unlock(rcv_act);
1224#endif /* THREAD_SWAPPER */
1225
1226 imq_unlock(dest_mqueue);
1227 ip_unlock(dest_port);
1228 current_task()->messages_sent++;
1229
1230
1231 /*
1232 * Put self on receive port's queue.
1233 * Also save state that the sender of
1234 * our reply message needs to determine if it
1235 * can hand off directly back to us.
1236 */
1237 self->ith_msg = (rcv_msg) ? rcv_msg : msg;
1238 self->ith_object = rcv_object; /* still holds reference */
1239 self->ith_msize = rcv_size;
1240 self->ith_option = option;
1241 self->ith_scatter_list_size = scatter_list_size;
1242 self->ith_continuation = thread_syscall_return;
1243
1244 waitq = &rcv_mqueue->imq_wait_queue;
9bccf70c 1245 (void)wait_queue_assert_wait64_locked(waitq,
1c79356b
A
1246 IPC_MQUEUE_RECEIVE,
1247 THREAD_ABORTSAFE,
1248 TRUE); /* unlock? */
1249 /* rcv_mqueue is unlocked */
1250
9bccf70c
A
1251 /*
1252 * Switch directly to receiving thread, and block
1253 * this thread as though it had called ipc_mqueue_receive.
1c79356b 1254 */
9bccf70c 1255 thread_run(self, ipc_mqueue_receive_continue, receiver);
1c79356b
A
1256 /* NOTREACHED */
1257 }
1258
1259 fast_copyout:
1260 /*
1261 * Nothing locked and no references held, except
1262 * we have kmsg with msgh_seqno filled in. Must
1263 * still check against rcv_size and do
1264 * ipc_kmsg_copyout/ipc_kmsg_put.
1265 */
1266
1267 reply_size = send_size + trailer->msgh_trailer_size;
1268 if (rcv_size < reply_size) {
1269 HOT(c_mmot_g_slow_copyout6++);
1270 goto slow_copyout;
1271 }
1272
1273 /* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */
1274
1275 switch (hdr->msgh_bits) {
1276 case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
1277 MACH_MSG_TYPE_PORT_SEND_ONCE): {
1278 ipc_port_t reply_port =
1279 (ipc_port_t) hdr->msgh_local_port;
1280 mach_port_name_t dest_name, reply_name;
1281
1282 /* receiving a request message */
1283
1284 if (!IP_VALID(reply_port)) {
1285 HOT(c_mmot_g_slow_copyout5++);
1286 goto slow_copyout;
1287 }
1288
1289 is_write_lock(space);
1290 assert(space->is_active);
1291
1292 /*
1293 * To do an atomic copyout, need simultaneous
1294 * locks on both ports and the space. If
1295 * dest_port == reply_port, and simple locking is
1296 * enabled, then we will abort. Otherwise it's
1297 * OK to unlock twice.
1298 */
1299
1300 ip_lock(dest_port);
1301 if (!ip_active(dest_port) ||
1302 !ip_lock_try(reply_port)) {
1303 HOT(c_mmot_cold_037++);
1304 goto abort_request_copyout;
1305 }
1306
1307 if (!ip_active(reply_port)) {
1308 ip_unlock(reply_port);
1309 HOT(c_mmot_cold_038++);
1310 goto abort_request_copyout;
1311 }
1312
1313 assert(reply_port->ip_sorights > 0);
1314 ip_unlock(reply_port);
1315
1316 {
1317 register ipc_entry_t table;
1318 register ipc_entry_t entry;
1319 register mach_port_index_t index;
1320
1321 /* optimized ipc_entry_get */
1322
1323 table = space->is_table;
1324 index = table->ie_next;
1325
1326 if (index == 0) {
1327 HOT(c_mmot_cold_039++);
1328 goto abort_request_copyout;
1329 }
1330
1331 entry = &table[index];
1332 table->ie_next = entry->ie_next;
1333 entry->ie_request = 0;
1334
1335 {
1336 register mach_port_gen_t gen;
1337
1338 assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
1339 gen = IE_BITS_NEW_GEN(entry->ie_bits);
1340
1341 reply_name = MACH_PORT_MAKE(index, gen);
1342
1343 /* optimized ipc_right_copyout */
1344
1345 entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
1346 }
1347
1348 assert(MACH_PORT_VALID(reply_name));
1349 entry->ie_object = (ipc_object_t) reply_port;
1350 is_write_unlock(space);
1351 }
1352
1353 /* optimized ipc_object_copyout_dest */
1354
1355 assert(dest_port->ip_srights > 0);
1356 ip_release(dest_port);
1357
1358 if (dest_port->ip_receiver == space)
1359 dest_name = dest_port->ip_receiver_name;
1360 else
1361 dest_name = MACH_PORT_NULL;
1362
1363 if ((--dest_port->ip_srights == 0) &&
1364 (dest_port->ip_nsrequest != IP_NULL)) {
1365 ipc_port_t nsrequest;
1366 mach_port_mscount_t mscount;
1367
1368 /* a rather rare case */
1369
1370 nsrequest = dest_port->ip_nsrequest;
1371 mscount = dest_port->ip_mscount;
1372 dest_port->ip_nsrequest = IP_NULL;
1373 ip_unlock(dest_port);
1374 ipc_notify_no_senders(nsrequest, mscount);
1375 } else
1376 ip_unlock(dest_port);
1377
1378 hdr->msgh_bits =
1379 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
1380 MACH_MSG_TYPE_PORT_SEND);
1381 hdr->msgh_remote_port = (mach_port_t)reply_name;
1382 hdr->msgh_local_port = (mach_port_t)dest_name;
1383 HOT(c_mmot_hot_ok1++);
1384 goto fast_put;
1385
1386 abort_request_copyout:
1387 ip_unlock(dest_port);
1388 is_write_unlock(space);
1389 HOT(c_mmot_g_slow_copyout4++);
1390 goto slow_copyout;
1391 }
1392
1393 case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
1394 register mach_port_name_t dest_name;
1395
1396 /* receiving a reply message */
1397
1398 ip_lock(dest_port);
1399 if (!ip_active(dest_port)) {
1400 ip_unlock(dest_port);
1401 HOT(c_mmot_g_slow_copyout3++);
1402 goto slow_copyout;
1403 }
1404
1405 /* optimized ipc_object_copyout_dest */
1406
1407 assert(dest_port->ip_sorights > 0);
1408
1409 if (dest_port->ip_receiver == space) {
1410 ip_release(dest_port);
1411 dest_port->ip_sorights--;
1412 dest_name = dest_port->ip_receiver_name;
1413 ip_unlock(dest_port);
1414 } else {
1415 ip_unlock(dest_port);
1416
1417 ipc_notify_send_once(dest_port);
1418 dest_name = MACH_PORT_NULL;
1419 }
1420
1421 hdr->msgh_bits = MACH_MSGH_BITS(0,
1422 MACH_MSG_TYPE_PORT_SEND_ONCE);
1423 hdr->msgh_remote_port = MACH_PORT_NULL;
1424 hdr->msgh_local_port = (ipc_port_t)dest_name;
1425 HOT(c_mmot_hot_ok2++);
1426 goto fast_put;
1427 }
1428
1429 case MACH_MSGH_BITS_COMPLEX|
1430 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
1431 register mach_port_name_t dest_name;
1432
1433 /* receiving a complex reply message */
1434
1435 ip_lock(dest_port);
1436 if (!ip_active(dest_port)) {
1437 ip_unlock(dest_port);
1438 HOT(c_mmot_g_slow_copyout1++);
1439 goto slow_copyout;
1440 }
1441
1442 /* optimized ipc_object_copyout_dest */
1443
1444 assert(dest_port->ip_sorights > 0);
1445
1446 if (dest_port->ip_receiver == space) {
1447 ip_release(dest_port);
1448 dest_port->ip_sorights--;
1449 dest_name = dest_port->ip_receiver_name;
1450 ip_unlock(dest_port);
1451 } else {
1452 ip_unlock(dest_port);
1453
1454 ipc_notify_send_once(dest_port);
1455 dest_name = MACH_PORT_NULL;
1456 }
1457
1458 hdr->msgh_bits =
1459 MACH_MSGH_BITS_COMPLEX |
1460 MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND_ONCE);
1461 hdr->msgh_remote_port = MACH_PORT_NULL;
1462 hdr->msgh_local_port = (mach_port_t)dest_name;
1463
1464 mr = ipc_kmsg_copyout_body(kmsg, space,
1465 current_map(),
1466 MACH_MSG_BODY_NULL);
1467 if (mr != MACH_MSG_SUCCESS) {
1468 if (ipc_kmsg_put(msg, kmsg, hdr->msgh_size +
1469 trailer->msgh_trailer_size) ==
1470 MACH_RCV_INVALID_DATA)
1471 return MACH_RCV_INVALID_DATA;
1472 else
1473 return mr | MACH_RCV_BODY_ERROR;
1474 }
1475 HOT(c_mmot_hot_ok3++);
1476 goto fast_put;
1477 }
1478
1479 default:
1480 HOT(c_mmot_g_slow_copyout2++);
1481 goto slow_copyout;
1482 }
1483 /*NOTREACHED*/
1484
1485 fast_put:
1486 mr = ipc_kmsg_put(rcv_msg ? rcv_msg : msg,
1487 kmsg,
1488 hdr->msgh_size + trailer->msgh_trailer_size);
1489 if (mr != MACH_MSG_SUCCESS) {
1490 return MACH_RCV_INVALID_DATA;
1491 }
1492 current_task()->messages_received++;
1493 return mr;
1494
1495
1496 /* BEGINNING OF WARM PATH */
1497
1498 /*
1499 * The slow path has a few non-register temporary
1500 * variables used only for call-by-reference.
1501 */
1502
1503 slow_copyin:
1504 {
1505 ipc_kmsg_t temp_kmsg;
1506 mach_port_seqno_t temp_seqno;
1507 ipc_object_t temp_rcv_object;
1508 ipc_mqueue_t temp_rcv_mqueue;
1509 register mach_port_name_t reply_name =
1510 (mach_port_name_t)hdr->msgh_local_port;
1511
1512
1513 /*
1514 * We have the message data in kmsg, but
1515 * we still need to copyin, send it,
1516 * receive a reply, and do copyout.
1517 */
1518
1519 mr = ipc_kmsg_copyin(kmsg, space, current_map(),
1520 MACH_PORT_NULL);
1521 if (mr != MACH_MSG_SUCCESS) {
1522 ipc_kmsg_free(kmsg);
1523 return(mr);
1524 }
1525
1526 /* try to get back on optimized path */
1527
1528 if ((reply_name != rcv_name) ||
1529 (hdr->msgh_bits & MACH_MSGH_BITS_CIRCULAR)) {
1530 HOT(c_mmot_cold_048++);
1531 goto slow_send;
1532 }
1533
1534 dest_port = (ipc_port_t) hdr->msgh_remote_port;
1535 assert(IP_VALID(dest_port));
1536
1537 ip_lock(dest_port);
1538 if (!ip_active(dest_port)) {
1539 ip_unlock(dest_port);
1540 goto slow_send;
1541 }
1542
1543 if (dest_port->ip_receiver == ipc_space_kernel) {
1544 dest_port->ip_messages.imq_seqno++;
1545 ip_unlock(dest_port);
1546 goto kernel_send;
1547 }
1548
1549 if (!imq_full(&dest_port->ip_messages) ||
1550 (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) ==
1551 MACH_MSG_TYPE_PORT_SEND_ONCE))
1552 {
1553 /*
1554 * Try an optimized ipc_mqueue_copyin.
1555 * It will work if this is a request message.
1556 */
1557
1558 register ipc_port_t reply_port;
1559
1560 reply_port = (ipc_port_t) hdr->msgh_local_port;
1561 if (IP_VALID(reply_port)) {
1562 if (ip_lock_try(reply_port)) {
1563 if (ip_active(reply_port) &&
1564 reply_port->ip_receiver == space &&
1565 reply_port->ip_receiver_name == rcv_name &&
1566 reply_port->ip_pset_count == 0)
1567 {
1568 /* Grab a reference to the reply port. */
1569 rcv_object = (ipc_object_t) reply_port;
1570 io_reference(rcv_object);
1571 rcv_mqueue = &reply_port->ip_messages;
1572 io_unlock(rcv_object);
1573 HOT(c_mmot_getback_FastSR++);
1574 goto fast_send_receive;
1575 }
1576 ip_unlock(reply_port);
1577 }
1578 }
1579 }
1580
1581 ip_unlock(dest_port);
1582 HOT(c_mmot_cold_050++);
1583 goto slow_send;
1584
1585 kernel_send:
1586 /*
1587 * Special case: send message to kernel services.
1588 * The request message has been copied into the
1589 * kmsg. Nothing is locked.
1590 */
1591
1592 {
1593 register ipc_port_t reply_port;
1594 mach_port_seqno_t local_seqno;
1595 spl_t s;
1596
1597 /*
1598 * Perform the kernel function.
1599 */
1600 c_mmot_kernel_send++;
1601
1602 current_task()->messages_sent++;
1603
1604 kmsg = ipc_kobject_server(kmsg);
1605 if (kmsg == IKM_NULL) {
1606 /*
1607 * No reply. Take the
1608 * slow receive path.
1609 */
1610 HOT(c_mmot_cold_051++);
1611 goto slow_get_rcv_port;
1612 }
1613
1614 /*
1615 * Check that:
1616 * the reply port is alive
1617 * we hold the receive right
1618 * the name has not changed.
1619 * the port is not in a set
1620 * If any of these are not true,
1621 * we cannot directly receive the reply
1622 * message.
1623 */
1624 hdr = &kmsg->ikm_header;
1625 send_size = hdr->msgh_size;
1626 trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr +
1627 round_msg(send_size));
1628 reply_port = (ipc_port_t) hdr->msgh_remote_port;
1629 ip_lock(reply_port);
1630
1631 if ((!ip_active(reply_port)) ||
1632 (reply_port->ip_receiver != space) ||
1633 (reply_port->ip_receiver_name != rcv_name) ||
1634 (reply_port->ip_pset_count != 0))
1635 {
1636 ip_unlock(reply_port);
1637 ipc_kmsg_send_always(kmsg);
1638 HOT(c_mmot_cold_052++);
1639 goto slow_get_rcv_port;
1640 }
1641
1642 s = splsched();
1643 rcv_mqueue = &reply_port->ip_messages;
1644 imq_lock(rcv_mqueue);
1645
1646 /* keep port locked, and don`t change ref count yet */
1647
1648 /*
1649 * If there are messages on the port
1650 * or other threads waiting for a message,
1651 * we cannot directly receive the reply.
1652 */
1653 if (!wait_queue_empty(&rcv_mqueue->imq_wait_queue) ||
1654 (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages) != IKM_NULL))
1655 {
1656 imq_unlock(rcv_mqueue);
1657 splx(s);
1658 ip_unlock(reply_port);
1659 ipc_kmsg_send_always(kmsg);
1660 HOT(c_mmot_cold_053++);
1661 goto slow_get_rcv_port;
1662 }
1663
1664 /*
1665 * We can directly receive this reply.
1666 * Since there were no messages queued
1667 * on the reply port, there should be
1668 * no threads blocked waiting to send.
1669 */
1670 dest_port = reply_port;
1671 local_seqno = rcv_mqueue->imq_seqno++;
1672 imq_unlock(rcv_mqueue);
1673 splx(s);
1674
1675 /*
1676 * inline ipc_object_release.
1677 * Port is still locked.
1678 * Reference count was not incremented.
1679 */
1680 ip_check_unlock(reply_port);
1681
1682 if (option & MACH_RCV_TRAILER_MASK) {
1683 trailer->msgh_seqno = local_seqno;
1684 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option);
1685 }
1686 /* copy out the kernel reply */
1687 HOT(c_mmot_fastkernelreply++);
1688 goto fast_copyout;
1689 }
1690
1691 slow_send:
1692 /*
1693 * Nothing is locked. We have acquired kmsg, but
1694 * we still need to send it and receive a reply.
1695 */
1696
1697 mr = ipc_kmsg_send(kmsg, MACH_MSG_OPTION_NONE,
1698 MACH_MSG_TIMEOUT_NONE);
1699 if (mr != MACH_MSG_SUCCESS) {
1700 mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
1701 current_map(),
1702 MACH_MSG_BODY_NULL);
1703
1704 (void) ipc_kmsg_put(msg, kmsg, hdr->msgh_size);
1705 return(mr);
1706 }
1707
1708 slow_get_rcv_port:
1709 /*
1710 * We have sent the message. Copy in the receive port.
1711 */
1712 mr = ipc_mqueue_copyin(space, rcv_name,
1713 &temp_rcv_mqueue, &temp_rcv_object);
1714 if (mr != MACH_MSG_SUCCESS) {
1715 return(mr);
1716 }
1717 rcv_mqueue = temp_rcv_mqueue;
1718 rcv_object = temp_rcv_object;
1719 /* hold ref for rcv_object */
1720
1721 slow_receive:
1722 /*
1723 * Now we have sent the request and copied in rcv_name,
1724 * and hold ref for rcv_object (to keep mqueue alive).
1725 * Just receive a reply and try to get back to fast path.
1726 */
1727
1728 self->ith_continuation = (void (*)(mach_msg_return_t))0;
1729 ipc_mqueue_receive(rcv_mqueue,
1730 MACH_MSG_OPTION_NONE,
1731 MACH_MSG_SIZE_MAX,
1732 MACH_MSG_TIMEOUT_NONE,
1733 THREAD_ABORTSAFE);
1734
1735 mr = self->ith_state;
1736 temp_kmsg = self->ith_kmsg;
1737 temp_seqno = self->ith_seqno;
1738
1739 ipc_object_release(rcv_object);
1740
1741 if (mr != MACH_MSG_SUCCESS) {
1742 return(mr);
1743 }
1744
1745 kmsg = temp_kmsg;
1746 hdr = &kmsg->ikm_header;
1747 send_size = hdr->msgh_size;
1748 trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr +
1749 round_msg(send_size));
1750 if (option & MACH_RCV_TRAILER_MASK) {
1751 trailer->msgh_seqno = temp_seqno;
1752 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option);
1753 }
1754 dest_port = (ipc_port_t) hdr->msgh_remote_port;
1755 HOT(c_mmot_cold_055++);
1756 goto fast_copyout;
1757
1758 slow_copyout:
1759 /*
1760 * Nothing locked and no references held, except
1761 * we have kmsg with msgh_seqno filled in. Must
1762 * still check against rcv_size and do
1763 * ipc_kmsg_copyout/ipc_kmsg_put.
1764 */
1765
1766 reply_size = send_size + trailer->msgh_trailer_size;
1767 if (rcv_size < reply_size) {
1768 if (msg_receive_error(kmsg, msg, option, temp_seqno,
1769 space) == MACH_RCV_INVALID_DATA) {
1770 mr = MACH_RCV_INVALID_DATA;
1771 return(mr);
1772 }
1773 else {
1774 mr = MACH_RCV_TOO_LARGE;
1775 return(mr);
1776 }
1777 }
1778
1779 mr = ipc_kmsg_copyout(kmsg, space, current_map(),
1780 MACH_PORT_NULL, MACH_MSG_BODY_NULL);
1781 if (mr != MACH_MSG_SUCCESS) {
1782 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
1783 if (ipc_kmsg_put(msg, kmsg, reply_size) ==
1784 MACH_RCV_INVALID_DATA)
1785 mr = MACH_RCV_INVALID_DATA;
1786 }
1787 else {
1788 if (msg_receive_error(kmsg, msg, option,
1789 temp_seqno, space) == MACH_RCV_INVALID_DATA)
1790 mr = MACH_RCV_INVALID_DATA;
1791 }
1792
1793 return(mr);
1794 }
1795
1796 /* try to get back on optimized path */
1797 HOT(c_mmot_getback_fast_put++);
1798 goto fast_put;
1799
1800 /*NOTREACHED*/
1801 }
1802 } /* END OF HOT PATH */
1803#endif /* ENABLE_HOTPATH */
1804
1805 if (option & MACH_SEND_MSG) {
1806 mr = mach_msg_send(msg, option, send_size,
1807 timeout, notify);
1808 if (mr != MACH_MSG_SUCCESS) {
1809 return mr;
1810 }
1811 }
1812
1813 if (option & MACH_RCV_MSG) {
1814 mach_msg_header_t *rcv;
1815
1816 /*
1817 * 1. MACH_RCV_OVERWRITE is on, and rcv_msg is our scatter list
1818 * and receive buffer
1819 * 2. MACH_RCV_OVERWRITE is off, and rcv_msg might be the
1820 * alternate receive buffer (separate send and receive buffers).
1821 */
1822 if (option & MACH_RCV_OVERWRITE)
1823 rcv = rcv_msg;
1824 else if (rcv_msg != MACH_MSG_NULL)
1825 rcv = rcv_msg;
1826 else
1827 rcv = msg;
1828 mr = mach_msg_receive(rcv, option, rcv_size, rcv_name,
1829 timeout, thread_syscall_return, scatter_list_size);
1830 thread_syscall_return(mr);
1831 }
1832
1833 return MACH_MSG_SUCCESS;
1834}
1835
9bccf70c
A
1836/*
1837 * Routine: mach_msg_trap [mach trap]
1838 * Purpose:
1839 * Possibly send a message; possibly receive a message.
1840 * Conditions:
1841 * Nothing locked.
1842 * Returns:
1843 * All of mach_msg_send and mach_msg_receive error codes.
1844 */
1845
1846mach_msg_return_t
1847mach_msg_trap(
1848 mach_msg_header_t *msg,
1849 mach_msg_option_t option,
1850 mach_msg_size_t send_size,
1851 mach_msg_size_t rcv_size,
1852 mach_port_name_t rcv_name,
1853 mach_msg_timeout_t timeout,
1854 mach_port_name_t notify)
1855{
1856 return mach_msg_overwrite_trap(msg,
1857 option,
1858 send_size,
1859 rcv_size,
1860 rcv_name,
1861 timeout,
1862 notify,
1863 (mach_msg_header_t *)0,
1864 (mach_msg_size_t)0);
1865}
1866
1867
1c79356b
A
1868/*
1869 * Routine: msg_receive_error [internal]
1870 * Purpose:
1871 * Builds a minimal header/trailer and copies it to
1872 * the user message buffer. Invoked when in the case of a
1873 * MACH_RCV_TOO_LARGE or MACH_RCV_BODY_ERROR error.
1874 * Conditions:
1875 * Nothing locked.
1876 * Returns:
1877 * MACH_MSG_SUCCESS minimal header/trailer copied
1878 * MACH_RCV_INVALID_DATA copyout to user buffer failed
1879 */
1880
1881mach_msg_return_t
1882msg_receive_error(
1883 ipc_kmsg_t kmsg,
1884 mach_msg_header_t *msg,
1885 mach_msg_option_t option,
1886 mach_port_seqno_t seqno,
1887 ipc_space_t space)
1888{
1889 mach_msg_format_0_trailer_t *trailer;
1890
1891 /*
1892 * Copy out the destination port in the message.
1893 * Destroy all other rights and memory in the message.
1894 */
1895 ipc_kmsg_copyout_dest(kmsg, space);
1896
1897 /*
1898 * Build a minimal message with the requested trailer.
1899 */
1900 trailer = (mach_msg_format_0_trailer_t *)
1901 ((vm_offset_t)&kmsg->ikm_header +
1902 round_msg(sizeof(mach_msg_header_t)));
1903 kmsg->ikm_header.msgh_size = sizeof(mach_msg_header_t);
1904 bcopy( (char *)&trailer_template,
1905 (char *)trailer,
1906 sizeof(trailer_template));
1907 if (option & MACH_RCV_TRAILER_MASK) {
1908 trailer->msgh_seqno = seqno;
1909 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option);
1910 }
1911
1912 /*
1913 * Copy the message to user space
1914 */
1915 if (ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size +
1916 trailer->msgh_trailer_size) == MACH_RCV_INVALID_DATA)
1917 return(MACH_RCV_INVALID_DATA);
1918 else
1919 return(MACH_MSG_SUCCESS);
1920}