]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/mach_msg.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / ipc / mach_msg.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: ipc/mach_msg.c
54 * Author: Rich Draves
55 * Date: 1989
56 *
57 * Exported message traps. See mach/message.h.
58 */
59
60 #include <cpus.h>
61
62 #include <mach/kern_return.h>
63 #include <mach/port.h>
64 #include <mach/message.h>
65 #include <mach/mig_errors.h>
66 #include <kern/assert.h>
67 #include <kern/counters.h>
68 #include <kern/cpu_number.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/lock.h>
72 #include <kern/sched_prim.h>
73 #include <kern/exception.h>
74 #include <kern/misc_protos.h>
75 #include <vm/vm_map.h>
76 #include <ipc/ipc_kmsg.h>
77 #include <ipc/ipc_mqueue.h>
78 #include <ipc/ipc_object.h>
79 #include <ipc/ipc_notify.h>
80 #include <ipc/ipc_port.h>
81 #include <ipc/ipc_pset.h>
82 #include <ipc/ipc_space.h>
83 #include <ipc/ipc_entry.h>
84 #include <kern/kalloc.h>
85 #include <kern/thread_swap.h>
86 #include <kern/processor.h>
87
88 #include <kern/mk_sp.h>
89
90 #include <machine/machine_routines.h>
91 #include <sys/kdebug.h>
92
93 /*
94 * Forward declarations
95 */
96
97 mach_msg_return_t mach_msg_send(
98 mach_msg_header_t *msg,
99 mach_msg_option_t option,
100 mach_msg_size_t send_size,
101 mach_msg_timeout_t timeout,
102 mach_port_name_t notify);
103
104 mach_msg_return_t mach_msg_receive(
105 mach_msg_header_t *msg,
106 mach_msg_option_t option,
107 mach_msg_size_t rcv_size,
108 mach_port_name_t rcv_name,
109 mach_msg_timeout_t timeout,
110 void (*continuation)(mach_msg_return_t),
111 mach_msg_size_t slist_size);
112
113
114 mach_msg_return_t msg_receive_error(
115 ipc_kmsg_t kmsg,
116 mach_msg_header_t *msg,
117 mach_msg_option_t option,
118 mach_port_seqno_t seqno,
119 ipc_space_t space);
120
121 security_token_t KERNEL_SECURITY_TOKEN = KERNEL_SECURITY_TOKEN_VALUE;
122
123 mach_msg_format_0_trailer_t trailer_template = {
124 /* mach_msg_trailer_type_t */ MACH_MSG_TRAILER_FORMAT_0,
125 /* mach_msg_trailer_size_t */ MACH_MSG_TRAILER_MINIMUM_SIZE,
126 /* mach_port_seqno_t */ 0,
127 /* security_token_t */ KERNEL_SECURITY_TOKEN_VALUE
128 };
129
130 /*
131 * Routine: mach_msg_send
132 * Purpose:
133 * Send a message.
134 * Conditions:
135 * Nothing locked.
136 * Returns:
137 * MACH_MSG_SUCCESS Sent the message.
138 * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
139 * MACH_SEND_NO_BUFFER Couldn't allocate buffer.
140 * MACH_SEND_INVALID_DATA Couldn't copy message data.
141 * MACH_SEND_INVALID_HEADER
142 * Illegal value in the message header bits.
143 * MACH_SEND_INVALID_DEST The space is dead.
144 * MACH_SEND_INVALID_NOTIFY Bad notify port.
145 * MACH_SEND_INVALID_DEST Can't copyin destination port.
146 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
147 * MACH_SEND_TIMED_OUT Timeout expired without delivery.
148 * MACH_SEND_INTERRUPTED Delivery interrupted.
149 * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
150 * MACH_SEND_WILL_NOTIFY Msg-accepted notif. requested.
151 * MACH_SEND_NOTIFY_IN_PROGRESS
152 * This space has already forced a message to this port.
153 */
154
155 mach_msg_return_t
156 mach_msg_send(
157 mach_msg_header_t *msg,
158 mach_msg_option_t option,
159 mach_msg_size_t send_size,
160 mach_msg_timeout_t timeout,
161 mach_port_name_t notify)
162 {
163 ipc_space_t space = current_space();
164 vm_map_t map = current_map();
165 ipc_kmsg_t kmsg;
166 mach_msg_return_t mr;
167
168 mr = ipc_kmsg_get(msg, send_size, &kmsg);
169
170 if (mr != MACH_MSG_SUCCESS)
171 return mr;
172
173 if (option & MACH_SEND_CANCEL) {
174 if (notify == MACH_PORT_NULL)
175 mr = MACH_SEND_INVALID_NOTIFY;
176 else
177 mr = ipc_kmsg_copyin(kmsg, space, map, notify);
178 } else
179 mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
180 if (mr != MACH_MSG_SUCCESS) {
181 ipc_kmsg_free(kmsg);
182 return mr;
183 }
184
185 mr = ipc_kmsg_send(kmsg, option & MACH_SEND_TIMEOUT, timeout);
186
187 if (mr != MACH_MSG_SUCCESS) {
188 mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL);
189 (void) ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
190 }
191
192 return mr;
193 }
194
195 /*
196 * Routine: mach_msg_receive
197 * Purpose:
198 * Receive a message.
199 * Conditions:
200 * Nothing locked.
201 * Returns:
202 * MACH_MSG_SUCCESS Received a message.
203 * MACH_RCV_INVALID_NAME The name doesn't denote a right,
204 * or the denoted right is not receive or port set.
205 * MACH_RCV_IN_SET Receive right is a member of a set.
206 * MACH_RCV_TOO_LARGE Message wouldn't fit into buffer.
207 * MACH_RCV_TIMED_OUT Timeout expired without a message.
208 * MACH_RCV_INTERRUPTED Reception interrupted.
209 * MACH_RCV_PORT_DIED Port/set died while receiving.
210 * MACH_RCV_PORT_CHANGED Port moved into set while receiving.
211 * MACH_RCV_INVALID_DATA Couldn't copy to user buffer.
212 * MACH_RCV_INVALID_NOTIFY Bad notify port.
213 * MACH_RCV_HEADER_ERROR
214 */
215
216 mach_msg_return_t
217 mach_msg_receive_results(void)
218 {
219 thread_t self = current_thread();
220 ipc_space_t space = current_space();
221 vm_map_t map = current_map();
222
223 ipc_object_t object = self->ith_object;
224 mach_msg_return_t mr = self->ith_state;
225 mach_msg_header_t *msg = self->ith_msg;
226 mach_msg_option_t option = self->ith_option;
227 ipc_kmsg_t kmsg = self->ith_kmsg;
228 mach_port_seqno_t seqno = self->ith_seqno;
229 mach_msg_size_t slist_size = self->ith_scatter_list_size;
230
231 mach_msg_format_0_trailer_t *trailer;
232
233 ipc_object_release(object);
234
235 if (mr != MACH_MSG_SUCCESS) {
236
237 if (mr == MACH_RCV_TOO_LARGE ) {
238 if (option & MACH_RCV_LARGE) {
239 /*
240 * We need to inform the user-level code that it needs more
241 * space. The value for how much space was returned in the
242 * msize save area instead of the message (which was left on
243 * the queue).
244 */
245 if (copyout((char *) &self->ith_msize,
246 (char *) &msg->msgh_size,
247 sizeof(mach_msg_size_t)))
248 mr = MACH_RCV_INVALID_DATA;
249 goto out;
250 }
251
252 if (msg_receive_error(kmsg, msg, option, seqno, space)
253 == MACH_RCV_INVALID_DATA)
254 mr = MACH_RCV_INVALID_DATA;
255 }
256 goto out;
257 }
258
259 trailer = (mach_msg_format_0_trailer_t *)
260 ((vm_offset_t)&kmsg->ikm_header +
261 round_msg(kmsg->ikm_header.msgh_size));
262 if (option & MACH_RCV_TRAILER_MASK) {
263 trailer->msgh_seqno = seqno;
264 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option);
265 }
266
267 /*
268 * If MACH_RCV_OVERWRITE was specified, try to get the scatter
269 * list and verify it against the contents of the message. If
270 * there is any problem with it, we will continue without it as
271 * normal.
272 */
273 if (option & MACH_RCV_OVERWRITE) {
274 mach_msg_size_t slist_size = self->ith_scatter_list_size;
275 mach_msg_body_t *slist;
276
277 slist = ipc_kmsg_copyin_scatter(msg, slist_size, kmsg);
278 mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL, slist);
279 ipc_kmsg_free_scatter(slist, slist_size);
280 } else {
281 mr = ipc_kmsg_copyout(kmsg, space, map,
282 MACH_PORT_NULL, MACH_MSG_BODY_NULL);
283 }
284
285 if (mr != MACH_MSG_SUCCESS) {
286 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
287 if (ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size +
288 trailer->msgh_trailer_size) == MACH_RCV_INVALID_DATA)
289 mr = MACH_RCV_INVALID_DATA;
290 }
291 else {
292 if (msg_receive_error(kmsg, msg, option, seqno, space)
293 == MACH_RCV_INVALID_DATA)
294 mr = MACH_RCV_INVALID_DATA;
295 }
296 goto out;
297 }
298 mr = ipc_kmsg_put(msg,
299 kmsg,
300 kmsg->ikm_header.msgh_size +
301 trailer->msgh_trailer_size);
302 out:
303 return mr;
304 }
305
306 mach_msg_return_t
307 mach_msg_receive(
308 mach_msg_header_t *msg,
309 mach_msg_option_t option,
310 mach_msg_size_t rcv_size,
311 mach_port_name_t rcv_name,
312 mach_msg_timeout_t timeout,
313 void (*continuation)(mach_msg_return_t),
314 mach_msg_size_t slist_size)
315 {
316 thread_t self = current_thread();
317 ipc_space_t space = current_space();
318 vm_map_t map = current_map();
319 ipc_object_t object;
320 ipc_mqueue_t mqueue;
321 ipc_kmsg_t kmsg;
322 mach_port_seqno_t seqno;
323 mach_msg_return_t mr;
324 mach_msg_body_t *slist;
325 mach_msg_format_0_trailer_t *trailer;
326
327 mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
328 if (mr != MACH_MSG_SUCCESS) {
329 return mr;
330 }
331 /* hold ref for object */
332
333 self->ith_msg = msg;
334 self->ith_object = object;
335 self->ith_msize = rcv_size;
336 self->ith_option = option;
337 self->ith_scatter_list_size = slist_size;
338 self->ith_continuation = continuation;
339
340 ipc_mqueue_receive(mqueue, option, rcv_size, timeout, THREAD_ABORTSAFE);
341 if ((option & MACH_RCV_TIMEOUT) && timeout == 0)
342 _mk_sp_thread_perhaps_yield(self);
343 return mach_msg_receive_results();
344 }
345
346 void
347 mach_msg_receive_continue(void)
348 {
349 thread_t self = current_thread();
350
351 (*self->ith_continuation)(mach_msg_receive_results());
352 }
353
354 /*
355 * Toggle this to compile the hotpath in/out
356 * If compiled in, the run-time toggle "enable_hotpath" below
357 * eases testing & debugging
358 */
359 #define ENABLE_HOTPATH 1 /* Hacked on for now */
360
361 #if ENABLE_HOTPATH
362 /*
363 * These counters allow tracing of hotpath behavior under test loads.
364 * A couple key counters are unconditional (see below).
365 */
366 #define HOTPATH_DEBUG 0 /* Toggle to include lots of counters */
367 #if HOTPATH_DEBUG
368 #define HOT(expr) expr
369
370 unsigned int c_mmot_FIRST = 0; /* Unused First Counter */
371 unsigned int c_mmot_combined_S_R = 0; /* hotpath candidates */
372 unsigned int c_mach_msg_trap_switch_fast = 0; /* hotpath successes */
373 unsigned int c_mmot_kernel_send = 0; /* kernel server */
374 unsigned int c_mmot_cold_000 = 0; /* see below ... */
375 unsigned int c_mmot_smallsendsize = 0;
376 unsigned int c_mmot_oddsendsize = 0;
377 unsigned int c_mmot_bigsendsize = 0;
378 unsigned int c_mmot_copyinmsg_fail = 0;
379 unsigned int c_mmot_g_slow_copyin3 = 0;
380 unsigned int c_mmot_cold_006 = 0;
381 unsigned int c_mmot_cold_007 = 0;
382 unsigned int c_mmot_cold_008 = 0;
383 unsigned int c_mmot_cold_009 = 0;
384 unsigned int c_mmot_cold_010 = 0;
385 unsigned int c_mmot_cold_012 = 0;
386 unsigned int c_mmot_cold_013 = 0;
387 unsigned int c_mmot_cold_014 = 0;
388 unsigned int c_mmot_cold_016 = 0;
389 unsigned int c_mmot_cold_018 = 0;
390 unsigned int c_mmot_cold_019 = 0;
391 unsigned int c_mmot_cold_020 = 0;
392 unsigned int c_mmot_cold_021 = 0;
393 unsigned int c_mmot_cold_022 = 0;
394 unsigned int c_mmot_cold_023 = 0;
395 unsigned int c_mmot_cold_024 = 0;
396 unsigned int c_mmot_cold_025 = 0;
397 unsigned int c_mmot_cold_026 = 0;
398 unsigned int c_mmot_cold_027 = 0;
399 unsigned int c_mmot_hot_fSR_ok = 0;
400 unsigned int c_mmot_cold_029 = 0;
401 unsigned int c_mmot_cold_030 = 0;
402 unsigned int c_mmot_cold_031 = 0;
403 unsigned int c_mmot_cold_032 = 0;
404 unsigned int c_mmot_cold_033 = 0;
405 unsigned int c_mmot_bad_rcvr = 0;
406 unsigned int c_mmot_rcvr_swapped = 0;
407 unsigned int c_mmot_rcvr_locked = 0;
408 unsigned int c_mmot_rcvr_tswapped = 0;
409 unsigned int c_mmot_rcvr_freed = 0;
410 unsigned int c_mmot_g_slow_copyout6 = 0;
411 unsigned int c_mmot_g_slow_copyout5 = 0;
412 unsigned int c_mmot_cold_037 = 0;
413 unsigned int c_mmot_cold_038 = 0;
414 unsigned int c_mmot_cold_039 = 0;
415 unsigned int c_mmot_g_slow_copyout4 = 0;
416 unsigned int c_mmot_g_slow_copyout3 = 0;
417 unsigned int c_mmot_hot_ok1 = 0;
418 unsigned int c_mmot_hot_ok2 = 0;
419 unsigned int c_mmot_hot_ok3 = 0;
420 unsigned int c_mmot_g_slow_copyout1 = 0;
421 unsigned int c_mmot_g_slow_copyout2 = 0;
422 unsigned int c_mmot_getback_fast_copyin = 0;
423 unsigned int c_mmot_cold_048 = 0;
424 unsigned int c_mmot_getback_FastSR = 0;
425 unsigned int c_mmot_cold_050 = 0;
426 unsigned int c_mmot_cold_051 = 0;
427 unsigned int c_mmot_cold_052 = 0;
428 unsigned int c_mmot_cold_053 = 0;
429 unsigned int c_mmot_fastkernelreply = 0;
430 unsigned int c_mmot_cold_055 = 0;
431 unsigned int c_mmot_getback_fast_put = 0;
432 unsigned int c_mmot_LAST = 0; /* End Marker - Unused */
433
434 void db_mmot_zero_counters(void); /* forward; */
435 void db_mmot_show_counters(void); /* forward; */
436
437 void /* Call from the debugger to clear all counters */
438 db_mmot_zero_counters(void)
439 {
440 register unsigned int *ip = &c_mmot_FIRST;
441 while (ip <= &c_mmot_LAST)
442 *ip++ = 0;
443 }
444
445 void /* Call from the debugger to show all counters */
446 db_mmot_show_counters(void)
447 {
448 #define xx(str) printf("%s: %d\n", # str, str);
449
450 xx(c_mmot_combined_S_R);
451 xx(c_mach_msg_trap_switch_fast);
452 xx(c_mmot_kernel_send);
453 xx(c_mmot_cold_000);
454 xx(c_mmot_smallsendsize);
455 xx(c_mmot_oddsendsize);
456 xx(c_mmot_bigsendsize);
457 xx(c_mmot_copyinmsg_fail);
458 xx(c_mmot_g_slow_copyin3);
459 xx(c_mmot_cold_006);
460 xx(c_mmot_cold_007);
461 xx(c_mmot_cold_008);
462 xx(c_mmot_cold_009);
463 xx(c_mmot_cold_010);
464 xx(c_mmot_cold_012);
465 xx(c_mmot_cold_013);
466 xx(c_mmot_cold_014);
467 xx(c_mmot_cold_016);
468 xx(c_mmot_cold_018);
469 xx(c_mmot_cold_019);
470 xx(c_mmot_cold_020);
471 xx(c_mmot_cold_021);
472 xx(c_mmot_cold_022);
473 xx(c_mmot_cold_023);
474 xx(c_mmot_cold_024);
475 xx(c_mmot_cold_025);
476 xx(c_mmot_cold_026);
477 xx(c_mmot_cold_027);
478 xx(c_mmot_hot_fSR_ok);
479 xx(c_mmot_cold_029);
480 xx(c_mmot_cold_030);
481 xx(c_mmot_cold_031);
482 xx(c_mmot_cold_032);
483 xx(c_mmot_cold_033);
484 xx(c_mmot_bad_rcvr);
485 xx(c_mmot_rcvr_swapped);
486 xx(c_mmot_rcvr_locked);
487 xx(c_mmot_rcvr_tswapped);
488 xx(c_mmot_rcvr_freed);
489 xx(c_mmot_g_slow_copyout6);
490 xx(c_mmot_g_slow_copyout5);
491 xx(c_mmot_cold_037);
492 xx(c_mmot_cold_038);
493 xx(c_mmot_cold_039);
494 xx(c_mmot_g_slow_copyout4);
495 xx(c_mmot_g_slow_copyout3);
496 xx(c_mmot_g_slow_copyout1);
497 xx(c_mmot_hot_ok3);
498 xx(c_mmot_hot_ok2);
499 xx(c_mmot_hot_ok1);
500 xx(c_mmot_g_slow_copyout2);
501 xx(c_mmot_getback_fast_copyin);
502 xx(c_mmot_cold_048);
503 xx(c_mmot_getback_FastSR);
504 xx(c_mmot_cold_050);
505 xx(c_mmot_cold_051);
506 xx(c_mmot_cold_052);
507 xx(c_mmot_cold_053);
508 xx(c_mmot_fastkernelreply);
509 xx(c_mmot_cold_055);
510 xx(c_mmot_getback_fast_put);
511
512 #undef xx
513 }
514
515 #else /* !HOTPATH_DEBUG */
516
517 /*
518 * Duplicate just these few so we can always do a quick sanity check
519 */
520 unsigned int c_mmot_combined_S_R = 0; /* hotpath candidates */
521 unsigned int c_mach_msg_trap_switch_fast = 0; /* hotpath successes */
522 unsigned int c_mmot_kernel_send = 0; /* kernel server calls */
523 #define HOT(expr) /* no optional counters */
524
525 #endif /* !HOTPATH_DEBUG */
526
527 boolean_t enable_hotpath = TRUE; /* Patchable, just in case ... */
528 #endif /* HOTPATH_ENABLE */
529
530 /*
531 * Routine: mach_msg_overwrite_trap [mach trap]
532 * Purpose:
533 * Possibly send a message; possibly receive a message.
534 * Conditions:
535 * Nothing locked.
536 * Returns:
537 * All of mach_msg_send and mach_msg_receive error codes.
538 */
539
540 mach_msg_return_t
541 mach_msg_overwrite_trap(
542 mach_msg_header_t *msg,
543 mach_msg_option_t option,
544 mach_msg_size_t send_size,
545 mach_msg_size_t rcv_size,
546 mach_port_name_t rcv_name,
547 mach_msg_timeout_t timeout,
548 mach_port_name_t notify,
549 mach_msg_header_t *rcv_msg,
550 mach_msg_size_t scatter_list_size)
551 {
552 register mach_msg_header_t *hdr;
553 mach_msg_return_t mr = MACH_MSG_SUCCESS;
554 /* mask out some of the options before entering the hot path */
555 mach_msg_option_t masked_option =
556 option & ~(MACH_SEND_TRAILER|MACH_RCV_TRAILER_MASK|MACH_RCV_LARGE);
557 int i;
558
559 #if ENABLE_HOTPATH
560 /* BEGINNING OF HOT PATH */
561 if ((masked_option == (MACH_SEND_MSG|MACH_RCV_MSG)) && enable_hotpath) {
562 register thread_t self = current_thread();
563 register mach_msg_format_0_trailer_t *trailer;
564
565 ipc_space_t space = current_act()->task->itk_space;
566 ipc_kmsg_t kmsg;
567 register ipc_port_t dest_port;
568 ipc_object_t rcv_object;
569 register ipc_mqueue_t rcv_mqueue;
570 mach_msg_size_t reply_size;
571 ipc_kmsg_t rcv_kmsg;
572
573 c_mmot_combined_S_R++;
574
575 /*
576 * This case is divided into ten sections, each
577 * with a label. There are five optimized
578 * sections and six unoptimized sections, which
579 * do the same thing but handle all possible
580 * cases and are slower.
581 *
582 * The five sections for an RPC are
583 * 1) Get request message into a buffer.
584 * 2) Copyin request message and rcv_name.
585 * (fast_copyin or slow_copyin)
586 * 3) Enqueue request and dequeue reply.
587 * (fast_send_receive or
588 * slow_send and slow_receive)
589 * 4) Copyout reply message.
590 * (fast_copyout or slow_copyout)
591 * 5) Put reply message to user's buffer.
592 *
593 * Keep the locking hierarchy firmly in mind.
594 * (First spaces, then ports, then port sets,
595 * then message queues.) Only a non-blocking
596 * attempt can be made to acquire locks out of
597 * order, or acquire two locks on the same level.
598 * Acquiring two locks on the same level will
599 * fail if the objects are really the same,
600 * unless simple locking is disabled. This is OK,
601 * because then the extra unlock does nothing.
602 *
603 * There are two major reasons these RPCs can't use
604 * ipc_thread_switch, and use slow_send/slow_receive:
605 * 1) Kernel RPCs.
606 * 2) Servers fall behind clients, so
607 * client doesn't find a blocked server thread and
608 * server finds waiting messages and can't block.
609 */
610
611 mr = ipc_kmsg_get(msg, send_size, &kmsg);
612 if (mr != KERN_SUCCESS) {
613 return mr;
614 }
615 hdr = &kmsg->ikm_header;
616 trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr +
617 send_size);
618
619 fast_copyin:
620 /*
621 * optimized ipc_kmsg_copyin/ipc_mqueue_copyin
622 *
623 * We have the request message data in kmsg.
624 * Must still do copyin, send, receive, etc.
625 *
626 * If the message isn't simple, we can't combine
627 * ipc_kmsg_copyin_header and ipc_mqueue_copyin,
628 * because copyin of the message body might
629 * affect rcv_name.
630 */
631
632 switch (hdr->msgh_bits) {
633 case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
634 MACH_MSG_TYPE_MAKE_SEND_ONCE): {
635 register ipc_entry_t table;
636 register ipc_entry_num_t size;
637 register ipc_port_t reply_port;
638
639 /* sending a request message */
640
641 {
642 register mach_port_index_t index;
643 register mach_port_gen_t gen;
644
645 {
646 register mach_port_name_t reply_name =
647 (mach_port_name_t)hdr->msgh_local_port;
648
649 if (reply_name != rcv_name) {
650 HOT(c_mmot_g_slow_copyin3++);
651 goto slow_copyin;
652 }
653
654 /* optimized ipc_entry_lookup of reply_name */
655
656 index = MACH_PORT_INDEX(reply_name);
657 gen = MACH_PORT_GEN(reply_name);
658
659 is_read_lock(space);
660 assert(space->is_active);
661
662 size = space->is_table_size;
663 table = space->is_table;
664
665 {
666 register ipc_entry_t entry;
667 register ipc_entry_bits_t bits;
668
669 if (index < size) {
670 entry = &table[index];
671 bits = entry->ie_bits;
672 if (IE_BITS_GEN(bits) != gen ||
673 (bits & IE_BITS_COLLISION)) {
674 entry = IE_NULL;
675 }
676 } else {
677 entry = IE_NULL;
678 }
679 if (entry == IE_NULL) {
680 entry = ipc_entry_lookup(space, reply_name);
681 if (entry == IE_NULL) {
682 HOT(c_mmot_cold_006++);
683 goto abort_request_copyin;
684 }
685 bits = entry->ie_bits;
686 }
687
688 /* check type bit */
689
690 if (! (bits & MACH_PORT_TYPE_RECEIVE)) {
691 HOT(c_mmot_cold_007++);
692 goto abort_request_copyin;
693 }
694
695 reply_port = (ipc_port_t) entry->ie_object;
696 assert(reply_port != IP_NULL);
697 }
698 }
699 }
700
701 /* optimized ipc_entry_lookup of dest_name */
702
703 {
704 register mach_port_index_t index;
705 register mach_port_gen_t gen;
706
707 {
708 register mach_port_name_t dest_name =
709 (mach_port_name_t)hdr->msgh_remote_port;
710
711 index = MACH_PORT_INDEX(dest_name);
712 gen = MACH_PORT_GEN(dest_name);
713
714 {
715 register ipc_entry_t entry;
716 register ipc_entry_bits_t bits;
717
718 if (index < size) {
719 entry = &table[index];
720 bits = entry->ie_bits;
721 if (IE_BITS_GEN(bits) != gen ||
722 (bits & IE_BITS_COLLISION)) {
723 entry = IE_NULL;
724 }
725 } else {
726 entry = IE_NULL;
727 }
728 if (entry == IE_NULL) {
729 entry = ipc_entry_lookup(space, dest_name);
730 if (entry == IE_NULL) {
731 HOT(c_mmot_cold_008++);
732 goto abort_request_copyin;
733 }
734 bits = entry->ie_bits;
735 }
736
737 /* check type bit */
738
739 if (! (bits & MACH_PORT_TYPE_SEND)) {
740 HOT(c_mmot_cold_009++);
741 goto abort_request_copyin;
742 }
743
744 assert(IE_BITS_UREFS(bits) > 0);
745
746 dest_port = (ipc_port_t) entry->ie_object;
747 assert(dest_port != IP_NULL);
748 }
749 }
750 }
751
752 /*
753 * To do an atomic copyin, need simultaneous
754 * locks on both ports and the space. If
755 * dest_port == reply_port, and simple locking is
756 * enabled, then we will abort. Otherwise it's
757 * OK to unlock twice.
758 */
759
760 ip_lock(dest_port);
761 if (!ip_active(dest_port) ||
762 !ip_lock_try(reply_port)) {
763 ip_unlock(dest_port);
764 HOT(c_mmot_cold_010++);
765 goto abort_request_copyin;
766 }
767 is_read_unlock(space);
768
769 assert(dest_port->ip_srights > 0);
770 dest_port->ip_srights++;
771 ip_reference(dest_port);
772
773 assert(ip_active(reply_port));
774 assert(reply_port->ip_receiver_name ==
775 (mach_port_name_t)hdr->msgh_local_port);
776 assert(reply_port->ip_receiver == space);
777
778 reply_port->ip_sorights++;
779 ip_reference(reply_port);
780
781 hdr->msgh_bits =
782 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
783 MACH_MSG_TYPE_PORT_SEND_ONCE);
784 hdr->msgh_remote_port = dest_port;
785 hdr->msgh_local_port = reply_port;
786
787 /* make sure we can queue to the destination */
788
789 if (dest_port->ip_receiver == ipc_space_kernel) {
790 /*
791 * The kernel server has a reference to
792 * the reply port, which it hands back
793 * to us in the reply message. We do
794 * not need to keep another reference to
795 * it.
796 */
797 ip_unlock(reply_port);
798
799 assert(ip_active(dest_port));
800 dest_port->ip_messages.imq_seqno++;
801 ip_unlock(dest_port);
802 goto kernel_send;
803 }
804
805 if (imq_full(&dest_port->ip_messages)) {
806 HOT(c_mmot_cold_013++);
807 goto abort_request_send_receive;
808 }
809
810 /* optimized ipc_mqueue_copyin */
811
812 rcv_object = (ipc_object_t) reply_port;
813 io_reference(rcv_object);
814 rcv_mqueue = &reply_port->ip_messages;
815 io_unlock(rcv_object);
816 HOT(c_mmot_hot_fSR_ok++);
817 goto fast_send_receive;
818
819 abort_request_copyin:
820 is_read_unlock(space);
821 goto slow_copyin;
822
823 abort_request_send_receive:
824 ip_unlock(dest_port);
825 ip_unlock(reply_port);
826 goto slow_send;
827 }
828
829 case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
830 register ipc_entry_num_t size;
831 register ipc_entry_t table;
832
833 /* sending a reply message */
834
835 {
836 register mach_port_name_t reply_name =
837 (mach_port_name_t)hdr->msgh_local_port;
838
839 if (reply_name != MACH_PORT_NULL) {
840 HOT(c_mmot_cold_018++);
841 goto slow_copyin;
842 }
843 }
844
845 is_write_lock(space);
846 assert(space->is_active);
847
848 /* optimized ipc_entry_lookup */
849
850 size = space->is_table_size;
851 table = space->is_table;
852
853 {
854 register ipc_entry_t entry;
855 register mach_port_gen_t gen;
856 register mach_port_index_t index;
857 ipc_table_index_t *requests;
858
859 {
860 register mach_port_name_t dest_name =
861 (mach_port_name_t)hdr->msgh_remote_port;
862
863 index = MACH_PORT_INDEX(dest_name);
864 gen = MACH_PORT_GEN(dest_name);
865 }
866
867 if (index >= size) {
868 HOT(c_mmot_cold_019++);
869 goto abort_reply_dest_copyin;
870 }
871
872 entry = &table[index];
873
874 /* check generation, collision bit, and type bit */
875
876 if ((entry->ie_bits & (IE_BITS_GEN_MASK|
877 IE_BITS_COLLISION|
878 MACH_PORT_TYPE_SEND_ONCE)) !=
879 (gen | MACH_PORT_TYPE_SEND_ONCE)) {
880 HOT(c_mmot_cold_020++);
881 goto abort_reply_dest_copyin;
882 }
883
884 /* optimized ipc_right_copyin */
885
886 assert(IE_BITS_TYPE(entry->ie_bits) ==
887 MACH_PORT_TYPE_SEND_ONCE);
888 assert(IE_BITS_UREFS(entry->ie_bits) == 1);
889
890 if (entry->ie_request != 0) {
891 HOT(c_mmot_cold_021++);
892 goto abort_reply_dest_copyin;
893 }
894
895 dest_port = (ipc_port_t) entry->ie_object;
896 assert(dest_port != IP_NULL);
897
898 ip_lock(dest_port);
899 if (!ip_active(dest_port)) {
900 ip_unlock(dest_port);
901 HOT(c_mmot_cold_022++);
902 goto abort_reply_dest_copyin;
903 }
904
905 assert(dest_port->ip_sorights > 0);
906
907 /* optimized ipc_entry_dealloc */
908
909
910 entry->ie_bits = gen;
911 entry->ie_next = table->ie_next;
912 table->ie_next = index;
913 entry->ie_object = IO_NULL;
914 }
915
916 hdr->msgh_bits =
917 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
918 0);
919 hdr->msgh_remote_port = dest_port;
920
921 /* make sure we can queue to the destination */
922
923 assert(dest_port->ip_receiver != ipc_space_kernel);
924
925 /* optimized ipc_entry_lookup/ipc_mqueue_copyin */
926
927 {
928 register ipc_entry_t entry;
929 register ipc_entry_bits_t bits;
930
931 {
932 register mach_port_index_t index;
933 register mach_port_gen_t gen;
934
935 index = MACH_PORT_INDEX(rcv_name);
936 gen = MACH_PORT_GEN(rcv_name);
937
938 if (index < size) {
939 entry = &table[index];
940 bits = entry->ie_bits;
941 if (IE_BITS_GEN(bits) != gen ||
942 (bits & IE_BITS_COLLISION)) {
943 entry = IE_NULL;
944 }
945 } else {
946 entry = IE_NULL;
947 }
948 if (entry == IE_NULL) {
949 entry = ipc_entry_lookup(space, rcv_name);
950 if (entry == IE_NULL) {
951 HOT(c_mmot_cold_024++);
952 goto abort_reply_rcv_copyin;
953 }
954 bits = entry->ie_bits;
955 }
956
957 }
958
959 /* check type bits; looking for receive or set */
960 #if 0
961 /*
962 * JMM - The check below for messages in the receive
963 * mqueue is insufficient to work with port sets, since
964 * the messages stay in the port queues. For now, don't
965 * allow portsets (but receiving on portsets when sending
966 * a message to a send-once right is actually a very
967 * common case (so we should re-enable).
968 */
969 if (bits & MACH_PORT_TYPE_PORT_SET) {
970 register ipc_pset_t rcv_pset;
971
972 rcv_pset = (ipc_pset_t) entry->ie_object;
973 assert(rcv_pset != IPS_NULL);
974
975 ips_lock(rcv_pset);
976 assert(ips_active(rcv_pset));
977
978 rcv_object = (ipc_object_t) rcv_pset;
979 rcv_mqueue = &rcv_pset->ips_messages;
980 } else
981 #endif /* 0 */
982 if (bits & MACH_PORT_TYPE_RECEIVE) {
983 register ipc_port_t rcv_port;
984
985 rcv_port = (ipc_port_t) entry->ie_object;
986 assert(rcv_port != IP_NULL);
987
988 if (!ip_lock_try(rcv_port)) {
989 HOT(c_mmot_cold_025++);
990 goto abort_reply_rcv_copyin;
991 }
992 assert(ip_active(rcv_port));
993
994 if (rcv_port->ip_pset_count != 0) {
995 ip_unlock(rcv_port);
996 HOT(c_mmot_cold_026++);
997 goto abort_reply_rcv_copyin;
998 }
999
1000 rcv_object = (ipc_object_t) rcv_port;
1001 rcv_mqueue = &rcv_port->ip_messages;
1002 } else {
1003 HOT(c_mmot_cold_027++);
1004 goto abort_reply_rcv_copyin;
1005 }
1006 }
1007
1008 is_write_unlock(space);
1009 io_reference(rcv_object);
1010 io_unlock(rcv_object);
1011 HOT(c_mmot_hot_fSR_ok++);
1012 goto fast_send_receive;
1013
1014 abort_reply_dest_copyin:
1015 is_write_unlock(space);
1016 HOT(c_mmot_cold_029++);
1017 goto slow_copyin;
1018
1019 abort_reply_rcv_copyin:
1020 ip_unlock(dest_port);
1021 is_write_unlock(space);
1022 HOT(c_mmot_cold_030++);
1023 goto slow_send;
1024 }
1025
1026 default:
1027 HOT(c_mmot_cold_031++);
1028 goto slow_copyin;
1029 }
1030 /*NOTREACHED*/
1031
1032 fast_send_receive:
1033 /*
1034 * optimized ipc_mqueue_send/ipc_mqueue_receive
1035 *
1036 * Finished get/copyin of kmsg and copyin of rcv_name.
1037 * space is unlocked, dest_port is locked,
1038 * we can queue kmsg to dest_port,
1039 * rcv_mqueue is set, and rcv_object holds a ref
1040 * so the mqueue cannot go away.
1041 *
1042 * JMM - For now, rcv_object is just a port. Portsets
1043 * are disabled for the time being.
1044 */
1045
1046 assert(ip_active(dest_port));
1047 assert(dest_port->ip_receiver != ipc_space_kernel);
1048 assert(!imq_full(&dest_port->ip_messages) ||
1049 (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) ==
1050 MACH_MSG_TYPE_PORT_SEND_ONCE));
1051 assert((hdr->msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0);
1052
1053 {
1054 register ipc_mqueue_t dest_mqueue;
1055 wait_queue_t waitq;
1056 thread_t receiver;
1057 #if THREAD_SWAPPER
1058 thread_act_t rcv_act;
1059 #endif
1060 spl_t s;
1061
1062 s = splsched();
1063 dest_mqueue = &dest_port->ip_messages;
1064 waitq = &dest_mqueue->imq_wait_queue;
1065 imq_lock(dest_mqueue);
1066
1067 wait_queue_peek64_locked(waitq, IPC_MQUEUE_RECEIVE, &receiver, &waitq);
1068 /* queue still locked, thread locked - but still on q */
1069
1070 if (receiver == THREAD_NULL) {
1071 abort_send_receive:
1072 imq_unlock(dest_mqueue);
1073 splx(s);
1074 ip_unlock(dest_port);
1075 ipc_object_release(rcv_object);
1076 HOT(c_mmot_cold_032++);
1077 goto slow_send;
1078 }
1079
1080 assert(receiver->wait_queue == waitq);
1081 assert(receiver->wait_event == IPC_MQUEUE_RECEIVE);
1082
1083 /*
1084 * See if it is still running on another processor (trying to
1085 * block itself). If so, fall off.
1086 *
1087 * JMM - We have an opportunity here. Since the thread is locked
1088 * and we find it runnable, it must still be trying to get into
1089 * thread_block on itself. We could just "hand him the message"
1090 * and let him go (thread_go_locked()) and then fall down into a
1091 * slow receive for ourselves. Only his RECEIVE_TOO_LARGE handling
1092 * runs afoul of that. Clean this up!
1093 */
1094 if ((receiver->state & (TH_RUN|TH_WAIT)) != TH_WAIT) {
1095 assert(NCPUS > 1);
1096 HOT(c_mmot_cold_033++);
1097 fall_off:
1098 thread_unlock(receiver);
1099 if (waitq != &dest_mqueue->imq_wait_queue)
1100 wait_queue_unlock(waitq);
1101 goto abort_send_receive;
1102 }
1103
1104 /*
1105 * Check that the receiver can stay on the hot path.
1106 */
1107 if (send_size + REQUESTED_TRAILER_SIZE(receiver->ith_option) >
1108 receiver->ith_msize) {
1109 /*
1110 * The receiver can't accept the message.
1111 */
1112 HOT(c_mmot_bad_rcvr++);
1113 goto fall_off;
1114 }
1115
1116 #if THREAD_SWAPPER
1117 /*
1118 * Receiver looks okay -- is it swapped in?
1119 */
1120 rcv_act = receiver->top_act;
1121 if (rcv_act->swap_state != TH_SW_IN &&
1122 rcv_act->swap_state != TH_SW_UNSWAPPABLE) {
1123 HOT(c_mmot_rcvr_swapped++);
1124 goto fall_off;
1125 }
1126
1127 /*
1128 * Make sure receiver stays swapped in (if we can).
1129 */
1130 if (!act_lock_try(rcv_act)) { /* out of order! */
1131 HOT(c_mmot_rcvr_locked++);
1132 goto fall_off;
1133 }
1134
1135 /*
1136 * Check for task swapping in progress affecting
1137 * receiver. Since rcv_act is attached to a shuttle,
1138 * its swap_state is covered by shuttle's thread_lock()
1139 * (sigh).
1140 */
1141 if ((rcv_act->swap_state != TH_SW_IN &&
1142 rcv_act->swap_state != TH_SW_UNSWAPPABLE) ||
1143 rcv_act->ast & AST_SWAPOUT) {
1144 act_unlock(rcv_act);
1145 HOT(c_mmot_rcvr_tswapped++);
1146 goto fall_off;
1147 }
1148
1149 /*
1150 * We don't need to make receiver unswappable here -- holding
1151 * act_lock() of rcv_act is sufficient to prevent either thread
1152 * or task swapping from changing its state (see swapout_scan(),
1153 * task_swapout()). Don't release lock till receiver's state
1154 * is consistent. Its task may then be marked for swapout,
1155 * but that's life.
1156 */
1157 /*
1158 * NB: act_lock(rcv_act) still held
1159 */
1160 #endif /* THREAD_SWAPPER */
1161
1162 /*
1163 * Before committing to the handoff, make sure that we are
1164 * really going to block (i.e. there are no messages already
1165 * queued for us. This violates lock ordering, so make sure
1166 * we don't deadlock. After the trylock succeeds below, we
1167 * may have up to 3 message queues locked:
1168 * - the dest port mqueue
1169 * - a portset mqueue (where waiting receiver was found)
1170 * - finally our own rcv_mqueue
1171 *
1172 * JMM - Need to make this check appropriate for portsets as
1173 * well before re-enabling them.
1174 */
1175 if (!imq_lock_try(rcv_mqueue)) {
1176 goto fall_off;
1177 }
1178 if (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages) != IKM_NULL) {
1179 imq_unlock(rcv_mqueue);
1180 HOT(c_mmot_cold_033++);
1181 goto fall_off;
1182 }
1183
1184 /* At this point we are committed to do the "handoff". */
1185 c_mach_msg_trap_switch_fast++;
1186
1187 /*
1188 * JMM - Go ahead and pull the receiver from the runq. If the
1189 * runq wasn't the one for the mqueue, unlock it.
1190 */
1191 wait_queue_pull_thread_locked(waitq,
1192 receiver,
1193 (waitq != &dest_mqueue->imq_wait_queue));
1194
1195 /*
1196 * Store the kmsg and seqno where the receiver can pick it up.
1197 */
1198 receiver->ith_state = MACH_MSG_SUCCESS;
1199 receiver->ith_kmsg = kmsg;
1200 receiver->ith_seqno = dest_mqueue->imq_seqno++;
1201
1202 /*
1203 * Inline thread_go_locked
1204 *
1205 * JMM - Including hacked in version of setrun scheduler op
1206 * that doesn't try to put thread on a runq.
1207 */
1208 {
1209 receiver->state &= ~(TH_WAIT|TH_UNINT);
1210 hw_atomic_add(&receiver->processor_set->run_count, 1);
1211 receiver->state |= TH_RUN;
1212 receiver->wait_result = THREAD_AWAKENED;
1213
1214 receiver->computation_metered = 0;
1215 receiver->reason = AST_NONE;
1216 }
1217
1218 thread_unlock(receiver);
1219 #if THREAD_SWAPPER
1220 act_unlock(rcv_act);
1221 #endif /* THREAD_SWAPPER */
1222
1223 imq_unlock(dest_mqueue);
1224 ip_unlock(dest_port);
1225 current_task()->messages_sent++;
1226
1227
1228 /*
1229 * Put self on receive port's queue.
1230 * Also save state that the sender of
1231 * our reply message needs to determine if it
1232 * can hand off directly back to us.
1233 */
1234 self->ith_msg = (rcv_msg) ? rcv_msg : msg;
1235 self->ith_object = rcv_object; /* still holds reference */
1236 self->ith_msize = rcv_size;
1237 self->ith_option = option;
1238 self->ith_scatter_list_size = scatter_list_size;
1239 self->ith_continuation = thread_syscall_return;
1240
1241 waitq = &rcv_mqueue->imq_wait_queue;
1242 (void)wait_queue_assert_wait64_locked(waitq,
1243 IPC_MQUEUE_RECEIVE,
1244 THREAD_ABORTSAFE,
1245 TRUE); /* unlock? */
1246 /* rcv_mqueue is unlocked */
1247
1248 /*
1249 * Switch directly to receiving thread, and block
1250 * this thread as though it had called ipc_mqueue_receive.
1251 */
1252 thread_run(self, ipc_mqueue_receive_continue, receiver);
1253 /* NOTREACHED */
1254 }
1255
1256 fast_copyout:
1257 /*
1258 * Nothing locked and no references held, except
1259 * we have kmsg with msgh_seqno filled in. Must
1260 * still check against rcv_size and do
1261 * ipc_kmsg_copyout/ipc_kmsg_put.
1262 */
1263
1264 reply_size = send_size + trailer->msgh_trailer_size;
1265 if (rcv_size < reply_size) {
1266 HOT(c_mmot_g_slow_copyout6++);
1267 goto slow_copyout;
1268 }
1269
1270 /* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */
1271
1272 switch (hdr->msgh_bits) {
1273 case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
1274 MACH_MSG_TYPE_PORT_SEND_ONCE): {
1275 ipc_port_t reply_port =
1276 (ipc_port_t) hdr->msgh_local_port;
1277 mach_port_name_t dest_name, reply_name;
1278
1279 /* receiving a request message */
1280
1281 if (!IP_VALID(reply_port)) {
1282 HOT(c_mmot_g_slow_copyout5++);
1283 goto slow_copyout;
1284 }
1285
1286 is_write_lock(space);
1287 assert(space->is_active);
1288
1289 /*
1290 * To do an atomic copyout, need simultaneous
1291 * locks on both ports and the space. If
1292 * dest_port == reply_port, and simple locking is
1293 * enabled, then we will abort. Otherwise it's
1294 * OK to unlock twice.
1295 */
1296
1297 ip_lock(dest_port);
1298 if (!ip_active(dest_port) ||
1299 !ip_lock_try(reply_port)) {
1300 HOT(c_mmot_cold_037++);
1301 goto abort_request_copyout;
1302 }
1303
1304 if (!ip_active(reply_port)) {
1305 ip_unlock(reply_port);
1306 HOT(c_mmot_cold_038++);
1307 goto abort_request_copyout;
1308 }
1309
1310 assert(reply_port->ip_sorights > 0);
1311 ip_unlock(reply_port);
1312
1313 {
1314 register ipc_entry_t table;
1315 register ipc_entry_t entry;
1316 register mach_port_index_t index;
1317
1318 /* optimized ipc_entry_get */
1319
1320 table = space->is_table;
1321 index = table->ie_next;
1322
1323 if (index == 0) {
1324 HOT(c_mmot_cold_039++);
1325 goto abort_request_copyout;
1326 }
1327
1328 entry = &table[index];
1329 table->ie_next = entry->ie_next;
1330 entry->ie_request = 0;
1331
1332 {
1333 register mach_port_gen_t gen;
1334
1335 assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
1336 gen = IE_BITS_NEW_GEN(entry->ie_bits);
1337
1338 reply_name = MACH_PORT_MAKE(index, gen);
1339
1340 /* optimized ipc_right_copyout */
1341
1342 entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
1343 }
1344
1345 assert(MACH_PORT_VALID(reply_name));
1346 entry->ie_object = (ipc_object_t) reply_port;
1347 is_write_unlock(space);
1348 }
1349
1350 /* optimized ipc_object_copyout_dest */
1351
1352 assert(dest_port->ip_srights > 0);
1353 ip_release(dest_port);
1354
1355 if (dest_port->ip_receiver == space)
1356 dest_name = dest_port->ip_receiver_name;
1357 else
1358 dest_name = MACH_PORT_NULL;
1359
1360 if ((--dest_port->ip_srights == 0) &&
1361 (dest_port->ip_nsrequest != IP_NULL)) {
1362 ipc_port_t nsrequest;
1363 mach_port_mscount_t mscount;
1364
1365 /* a rather rare case */
1366
1367 nsrequest = dest_port->ip_nsrequest;
1368 mscount = dest_port->ip_mscount;
1369 dest_port->ip_nsrequest = IP_NULL;
1370 ip_unlock(dest_port);
1371 ipc_notify_no_senders(nsrequest, mscount);
1372 } else
1373 ip_unlock(dest_port);
1374
1375 hdr->msgh_bits =
1376 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
1377 MACH_MSG_TYPE_PORT_SEND);
1378 hdr->msgh_remote_port = (mach_port_t)reply_name;
1379 hdr->msgh_local_port = (mach_port_t)dest_name;
1380 HOT(c_mmot_hot_ok1++);
1381 goto fast_put;
1382
1383 abort_request_copyout:
1384 ip_unlock(dest_port);
1385 is_write_unlock(space);
1386 HOT(c_mmot_g_slow_copyout4++);
1387 goto slow_copyout;
1388 }
1389
1390 case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
1391 register mach_port_name_t dest_name;
1392
1393 /* receiving a reply message */
1394
1395 ip_lock(dest_port);
1396 if (!ip_active(dest_port)) {
1397 ip_unlock(dest_port);
1398 HOT(c_mmot_g_slow_copyout3++);
1399 goto slow_copyout;
1400 }
1401
1402 /* optimized ipc_object_copyout_dest */
1403
1404 assert(dest_port->ip_sorights > 0);
1405
1406 if (dest_port->ip_receiver == space) {
1407 ip_release(dest_port);
1408 dest_port->ip_sorights--;
1409 dest_name = dest_port->ip_receiver_name;
1410 ip_unlock(dest_port);
1411 } else {
1412 ip_unlock(dest_port);
1413
1414 ipc_notify_send_once(dest_port);
1415 dest_name = MACH_PORT_NULL;
1416 }
1417
1418 hdr->msgh_bits = MACH_MSGH_BITS(0,
1419 MACH_MSG_TYPE_PORT_SEND_ONCE);
1420 hdr->msgh_remote_port = MACH_PORT_NULL;
1421 hdr->msgh_local_port = (ipc_port_t)dest_name;
1422 HOT(c_mmot_hot_ok2++);
1423 goto fast_put;
1424 }
1425
1426 case MACH_MSGH_BITS_COMPLEX|
1427 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
1428 register mach_port_name_t dest_name;
1429
1430 /* receiving a complex reply message */
1431
1432 ip_lock(dest_port);
1433 if (!ip_active(dest_port)) {
1434 ip_unlock(dest_port);
1435 HOT(c_mmot_g_slow_copyout1++);
1436 goto slow_copyout;
1437 }
1438
1439 /* optimized ipc_object_copyout_dest */
1440
1441 assert(dest_port->ip_sorights > 0);
1442
1443 if (dest_port->ip_receiver == space) {
1444 ip_release(dest_port);
1445 dest_port->ip_sorights--;
1446 dest_name = dest_port->ip_receiver_name;
1447 ip_unlock(dest_port);
1448 } else {
1449 ip_unlock(dest_port);
1450
1451 ipc_notify_send_once(dest_port);
1452 dest_name = MACH_PORT_NULL;
1453 }
1454
1455 hdr->msgh_bits =
1456 MACH_MSGH_BITS_COMPLEX |
1457 MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND_ONCE);
1458 hdr->msgh_remote_port = MACH_PORT_NULL;
1459 hdr->msgh_local_port = (mach_port_t)dest_name;
1460
1461 mr = ipc_kmsg_copyout_body(kmsg, space,
1462 current_map(),
1463 MACH_MSG_BODY_NULL);
1464 if (mr != MACH_MSG_SUCCESS) {
1465 if (ipc_kmsg_put(msg, kmsg, hdr->msgh_size +
1466 trailer->msgh_trailer_size) ==
1467 MACH_RCV_INVALID_DATA)
1468 return MACH_RCV_INVALID_DATA;
1469 else
1470 return mr | MACH_RCV_BODY_ERROR;
1471 }
1472 HOT(c_mmot_hot_ok3++);
1473 goto fast_put;
1474 }
1475
1476 default:
1477 HOT(c_mmot_g_slow_copyout2++);
1478 goto slow_copyout;
1479 }
1480 /*NOTREACHED*/
1481
1482 fast_put:
1483 mr = ipc_kmsg_put(rcv_msg ? rcv_msg : msg,
1484 kmsg,
1485 hdr->msgh_size + trailer->msgh_trailer_size);
1486 if (mr != MACH_MSG_SUCCESS) {
1487 return MACH_RCV_INVALID_DATA;
1488 }
1489 current_task()->messages_received++;
1490 return mr;
1491
1492
1493 /* BEGINNING OF WARM PATH */
1494
1495 /*
1496 * The slow path has a few non-register temporary
1497 * variables used only for call-by-reference.
1498 */
1499
1500 slow_copyin:
1501 {
1502 ipc_kmsg_t temp_kmsg;
1503 mach_port_seqno_t temp_seqno;
1504 ipc_object_t temp_rcv_object;
1505 ipc_mqueue_t temp_rcv_mqueue;
1506 register mach_port_name_t reply_name =
1507 (mach_port_name_t)hdr->msgh_local_port;
1508
1509
1510 /*
1511 * We have the message data in kmsg, but
1512 * we still need to copyin, send it,
1513 * receive a reply, and do copyout.
1514 */
1515
1516 mr = ipc_kmsg_copyin(kmsg, space, current_map(),
1517 MACH_PORT_NULL);
1518 if (mr != MACH_MSG_SUCCESS) {
1519 ipc_kmsg_free(kmsg);
1520 return(mr);
1521 }
1522
1523 /* try to get back on optimized path */
1524
1525 if ((reply_name != rcv_name) ||
1526 (hdr->msgh_bits & MACH_MSGH_BITS_CIRCULAR)) {
1527 HOT(c_mmot_cold_048++);
1528 goto slow_send;
1529 }
1530
1531 dest_port = (ipc_port_t) hdr->msgh_remote_port;
1532 assert(IP_VALID(dest_port));
1533
1534 ip_lock(dest_port);
1535 if (!ip_active(dest_port)) {
1536 ip_unlock(dest_port);
1537 goto slow_send;
1538 }
1539
1540 if (dest_port->ip_receiver == ipc_space_kernel) {
1541 dest_port->ip_messages.imq_seqno++;
1542 ip_unlock(dest_port);
1543 goto kernel_send;
1544 }
1545
1546 if (!imq_full(&dest_port->ip_messages) ||
1547 (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) ==
1548 MACH_MSG_TYPE_PORT_SEND_ONCE))
1549 {
1550 /*
1551 * Try an optimized ipc_mqueue_copyin.
1552 * It will work if this is a request message.
1553 */
1554
1555 register ipc_port_t reply_port;
1556
1557 reply_port = (ipc_port_t) hdr->msgh_local_port;
1558 if (IP_VALID(reply_port)) {
1559 if (ip_lock_try(reply_port)) {
1560 if (ip_active(reply_port) &&
1561 reply_port->ip_receiver == space &&
1562 reply_port->ip_receiver_name == rcv_name &&
1563 reply_port->ip_pset_count == 0)
1564 {
1565 /* Grab a reference to the reply port. */
1566 rcv_object = (ipc_object_t) reply_port;
1567 io_reference(rcv_object);
1568 rcv_mqueue = &reply_port->ip_messages;
1569 io_unlock(rcv_object);
1570 HOT(c_mmot_getback_FastSR++);
1571 goto fast_send_receive;
1572 }
1573 ip_unlock(reply_port);
1574 }
1575 }
1576 }
1577
1578 ip_unlock(dest_port);
1579 HOT(c_mmot_cold_050++);
1580 goto slow_send;
1581
1582 kernel_send:
1583 /*
1584 * Special case: send message to kernel services.
1585 * The request message has been copied into the
1586 * kmsg. Nothing is locked.
1587 */
1588
1589 {
1590 register ipc_port_t reply_port;
1591 mach_port_seqno_t local_seqno;
1592 spl_t s;
1593
1594 /*
1595 * Perform the kernel function.
1596 */
1597 c_mmot_kernel_send++;
1598
1599 current_task()->messages_sent++;
1600
1601 kmsg = ipc_kobject_server(kmsg);
1602 if (kmsg == IKM_NULL) {
1603 /*
1604 * No reply. Take the
1605 * slow receive path.
1606 */
1607 HOT(c_mmot_cold_051++);
1608 goto slow_get_rcv_port;
1609 }
1610
1611 /*
1612 * Check that:
1613 * the reply port is alive
1614 * we hold the receive right
1615 * the name has not changed.
1616 * the port is not in a set
1617 * If any of these are not true,
1618 * we cannot directly receive the reply
1619 * message.
1620 */
1621 hdr = &kmsg->ikm_header;
1622 send_size = hdr->msgh_size;
1623 trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr +
1624 round_msg(send_size));
1625 reply_port = (ipc_port_t) hdr->msgh_remote_port;
1626 ip_lock(reply_port);
1627
1628 if ((!ip_active(reply_port)) ||
1629 (reply_port->ip_receiver != space) ||
1630 (reply_port->ip_receiver_name != rcv_name) ||
1631 (reply_port->ip_pset_count != 0))
1632 {
1633 ip_unlock(reply_port);
1634 ipc_kmsg_send_always(kmsg);
1635 HOT(c_mmot_cold_052++);
1636 goto slow_get_rcv_port;
1637 }
1638
1639 s = splsched();
1640 rcv_mqueue = &reply_port->ip_messages;
1641 imq_lock(rcv_mqueue);
1642
1643 /* keep port locked, and don`t change ref count yet */
1644
1645 /*
1646 * If there are messages on the port
1647 * or other threads waiting for a message,
1648 * we cannot directly receive the reply.
1649 */
1650 if (!wait_queue_empty(&rcv_mqueue->imq_wait_queue) ||
1651 (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages) != IKM_NULL))
1652 {
1653 imq_unlock(rcv_mqueue);
1654 splx(s);
1655 ip_unlock(reply_port);
1656 ipc_kmsg_send_always(kmsg);
1657 HOT(c_mmot_cold_053++);
1658 goto slow_get_rcv_port;
1659 }
1660
1661 /*
1662 * We can directly receive this reply.
1663 * Since there were no messages queued
1664 * on the reply port, there should be
1665 * no threads blocked waiting to send.
1666 */
1667 dest_port = reply_port;
1668 local_seqno = rcv_mqueue->imq_seqno++;
1669 imq_unlock(rcv_mqueue);
1670 splx(s);
1671
1672 /*
1673 * inline ipc_object_release.
1674 * Port is still locked.
1675 * Reference count was not incremented.
1676 */
1677 ip_check_unlock(reply_port);
1678
1679 if (option & MACH_RCV_TRAILER_MASK) {
1680 trailer->msgh_seqno = local_seqno;
1681 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option);
1682 }
1683 /* copy out the kernel reply */
1684 HOT(c_mmot_fastkernelreply++);
1685 goto fast_copyout;
1686 }
1687
1688 slow_send:
1689 /*
1690 * Nothing is locked. We have acquired kmsg, but
1691 * we still need to send it and receive a reply.
1692 */
1693
1694 mr = ipc_kmsg_send(kmsg, MACH_MSG_OPTION_NONE,
1695 MACH_MSG_TIMEOUT_NONE);
1696 if (mr != MACH_MSG_SUCCESS) {
1697 mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
1698 current_map(),
1699 MACH_MSG_BODY_NULL);
1700
1701 (void) ipc_kmsg_put(msg, kmsg, hdr->msgh_size);
1702 return(mr);
1703 }
1704
1705 slow_get_rcv_port:
1706 /*
1707 * We have sent the message. Copy in the receive port.
1708 */
1709 mr = ipc_mqueue_copyin(space, rcv_name,
1710 &temp_rcv_mqueue, &temp_rcv_object);
1711 if (mr != MACH_MSG_SUCCESS) {
1712 return(mr);
1713 }
1714 rcv_mqueue = temp_rcv_mqueue;
1715 rcv_object = temp_rcv_object;
1716 /* hold ref for rcv_object */
1717
1718 slow_receive:
1719 /*
1720 * Now we have sent the request and copied in rcv_name,
1721 * and hold ref for rcv_object (to keep mqueue alive).
1722 * Just receive a reply and try to get back to fast path.
1723 */
1724
1725 self->ith_continuation = (void (*)(mach_msg_return_t))0;
1726 ipc_mqueue_receive(rcv_mqueue,
1727 MACH_MSG_OPTION_NONE,
1728 MACH_MSG_SIZE_MAX,
1729 MACH_MSG_TIMEOUT_NONE,
1730 THREAD_ABORTSAFE);
1731
1732 mr = self->ith_state;
1733 temp_kmsg = self->ith_kmsg;
1734 temp_seqno = self->ith_seqno;
1735
1736 ipc_object_release(rcv_object);
1737
1738 if (mr != MACH_MSG_SUCCESS) {
1739 return(mr);
1740 }
1741
1742 kmsg = temp_kmsg;
1743 hdr = &kmsg->ikm_header;
1744 send_size = hdr->msgh_size;
1745 trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr +
1746 round_msg(send_size));
1747 if (option & MACH_RCV_TRAILER_MASK) {
1748 trailer->msgh_seqno = temp_seqno;
1749 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option);
1750 }
1751 dest_port = (ipc_port_t) hdr->msgh_remote_port;
1752 HOT(c_mmot_cold_055++);
1753 goto fast_copyout;
1754
1755 slow_copyout:
1756 /*
1757 * Nothing locked and no references held, except
1758 * we have kmsg with msgh_seqno filled in. Must
1759 * still check against rcv_size and do
1760 * ipc_kmsg_copyout/ipc_kmsg_put.
1761 */
1762
1763 reply_size = send_size + trailer->msgh_trailer_size;
1764 if (rcv_size < reply_size) {
1765 if (msg_receive_error(kmsg, msg, option, temp_seqno,
1766 space) == MACH_RCV_INVALID_DATA) {
1767 mr = MACH_RCV_INVALID_DATA;
1768 return(mr);
1769 }
1770 else {
1771 mr = MACH_RCV_TOO_LARGE;
1772 return(mr);
1773 }
1774 }
1775
1776 mr = ipc_kmsg_copyout(kmsg, space, current_map(),
1777 MACH_PORT_NULL, MACH_MSG_BODY_NULL);
1778 if (mr != MACH_MSG_SUCCESS) {
1779 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
1780 if (ipc_kmsg_put(msg, kmsg, reply_size) ==
1781 MACH_RCV_INVALID_DATA)
1782 mr = MACH_RCV_INVALID_DATA;
1783 }
1784 else {
1785 if (msg_receive_error(kmsg, msg, option,
1786 temp_seqno, space) == MACH_RCV_INVALID_DATA)
1787 mr = MACH_RCV_INVALID_DATA;
1788 }
1789
1790 return(mr);
1791 }
1792
1793 /* try to get back on optimized path */
1794 HOT(c_mmot_getback_fast_put++);
1795 goto fast_put;
1796
1797 /*NOTREACHED*/
1798 }
1799 } /* END OF HOT PATH */
1800 #endif /* ENABLE_HOTPATH */
1801
1802 if (option & MACH_SEND_MSG) {
1803 mr = mach_msg_send(msg, option, send_size,
1804 timeout, notify);
1805 if (mr != MACH_MSG_SUCCESS) {
1806 return mr;
1807 }
1808 }
1809
1810 if (option & MACH_RCV_MSG) {
1811 mach_msg_header_t *rcv;
1812
1813 /*
1814 * 1. MACH_RCV_OVERWRITE is on, and rcv_msg is our scatter list
1815 * and receive buffer
1816 * 2. MACH_RCV_OVERWRITE is off, and rcv_msg might be the
1817 * alternate receive buffer (separate send and receive buffers).
1818 */
1819 if (option & MACH_RCV_OVERWRITE)
1820 rcv = rcv_msg;
1821 else if (rcv_msg != MACH_MSG_NULL)
1822 rcv = rcv_msg;
1823 else
1824 rcv = msg;
1825 mr = mach_msg_receive(rcv, option, rcv_size, rcv_name,
1826 timeout, thread_syscall_return, scatter_list_size);
1827 thread_syscall_return(mr);
1828 }
1829
1830 return MACH_MSG_SUCCESS;
1831 }
1832
1833 /*
1834 * Routine: mach_msg_trap [mach trap]
1835 * Purpose:
1836 * Possibly send a message; possibly receive a message.
1837 * Conditions:
1838 * Nothing locked.
1839 * Returns:
1840 * All of mach_msg_send and mach_msg_receive error codes.
1841 */
1842
1843 mach_msg_return_t
1844 mach_msg_trap(
1845 mach_msg_header_t *msg,
1846 mach_msg_option_t option,
1847 mach_msg_size_t send_size,
1848 mach_msg_size_t rcv_size,
1849 mach_port_name_t rcv_name,
1850 mach_msg_timeout_t timeout,
1851 mach_port_name_t notify)
1852 {
1853 return mach_msg_overwrite_trap(msg,
1854 option,
1855 send_size,
1856 rcv_size,
1857 rcv_name,
1858 timeout,
1859 notify,
1860 (mach_msg_header_t *)0,
1861 (mach_msg_size_t)0);
1862 }
1863
1864
1865 /*
1866 * Routine: msg_receive_error [internal]
1867 * Purpose:
1868 * Builds a minimal header/trailer and copies it to
1869 * the user message buffer. Invoked when in the case of a
1870 * MACH_RCV_TOO_LARGE or MACH_RCV_BODY_ERROR error.
1871 * Conditions:
1872 * Nothing locked.
1873 * Returns:
1874 * MACH_MSG_SUCCESS minimal header/trailer copied
1875 * MACH_RCV_INVALID_DATA copyout to user buffer failed
1876 */
1877
1878 mach_msg_return_t
1879 msg_receive_error(
1880 ipc_kmsg_t kmsg,
1881 mach_msg_header_t *msg,
1882 mach_msg_option_t option,
1883 mach_port_seqno_t seqno,
1884 ipc_space_t space)
1885 {
1886 mach_msg_format_0_trailer_t *trailer;
1887
1888 /*
1889 * Copy out the destination port in the message.
1890 * Destroy all other rights and memory in the message.
1891 */
1892 ipc_kmsg_copyout_dest(kmsg, space);
1893
1894 /*
1895 * Build a minimal message with the requested trailer.
1896 */
1897 trailer = (mach_msg_format_0_trailer_t *)
1898 ((vm_offset_t)&kmsg->ikm_header +
1899 round_msg(sizeof(mach_msg_header_t)));
1900 kmsg->ikm_header.msgh_size = sizeof(mach_msg_header_t);
1901 bcopy( (char *)&trailer_template,
1902 (char *)trailer,
1903 sizeof(trailer_template));
1904 if (option & MACH_RCV_TRAILER_MASK) {
1905 trailer->msgh_seqno = seqno;
1906 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option);
1907 }
1908
1909 /*
1910 * Copy the message to user space
1911 */
1912 if (ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size +
1913 trailer->msgh_trailer_size) == MACH_RCV_INVALID_DATA)
1914 return(MACH_RCV_INVALID_DATA);
1915 else
1916 return(MACH_MSG_SUCCESS);
1917 }