]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ipc/ipc_entry.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_entry.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: ipc/ipc_entry.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Primitive functions to manipulate translation entries.
64 */
65
66#include <mach_debug.h>
67
68#include <mach/kern_return.h>
69#include <mach/port.h>
70#include <kern/assert.h>
71#include <kern/sched_prim.h>
72#include <kern/zalloc.h>
73#include <kern/misc_protos.h>
74#include <ipc/port.h>
75#include <ipc/ipc_entry.h>
76#include <ipc/ipc_space.h>
77#include <ipc/ipc_object.h>
78#include <ipc/ipc_hash.h>
79#include <ipc/ipc_table.h>
80#include <ipc/ipc_port.h>
81#include <string.h>
82
83/*
84 * Routine: ipc_entry_lookup
85 * Purpose:
86 * Searches for an entry, given its name.
87 * Conditions:
88 * The space must be read or write locked throughout.
89 * The space must be active.
90 */
91
92ipc_entry_t
93ipc_entry_lookup(
94 ipc_space_t space,
95 mach_port_name_t name)
96{
97 mach_port_index_t index;
98 ipc_entry_t entry;
99
100 assert(is_active(space));
101
102 index = MACH_PORT_INDEX(name);
103 if (index < space->is_table_size) {
104 entry = &space->is_table[index];
105 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
106 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) {
107 entry = IE_NULL;
108 }
109 }
110 else {
111 entry = IE_NULL;
112 }
113
114 assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
115 return entry;
116}
117
118
119/*
120 * Routine: ipc_entries_hold
121 * Purpose:
122 * Verifies that there are at least 'entries_needed'
123 * free list members
124 * Conditions:
125 * The space is write-locked and active throughout.
126 * An object may be locked. Will not allocate memory.
127 * Returns:
128 * KERN_SUCCESS Free entries were found.
129 * KERN_NO_SPACE No entry allocated.
130 */
131
132kern_return_t
133ipc_entries_hold(
134 ipc_space_t space,
135 uint32_t entries_needed)
136{
137
138 ipc_entry_t table;
139 mach_port_index_t next_free = 0;
140 uint32_t i;
141
142 assert(is_active(space));
143
144 table = &space->is_table[0];
145
146 for (i = 0; i < entries_needed; i++) {
147 next_free = table[next_free].ie_next;
148 if (next_free == 0) {
149 return KERN_NO_SPACE;
150 }
151 assert(next_free < space->is_table_size);
152 assert(table[next_free].ie_object == IO_NULL);
153 }
154 return KERN_SUCCESS;
155}
156
157/*
158 * Routine: ipc_entry_claim
159 * Purpose:
160 * Take formal ownership of a held entry.
161 * Conditions:
162 * The space is write-locked and active throughout.
163 * An object may be locked. Will not allocate memory.
164 *
165 * Note: The returned entry must be marked as modified before
166 * releasing the space lock
167 */
168
169kern_return_t
170ipc_entry_claim(
171 ipc_space_t space,
172 mach_port_name_t *namep,
173 ipc_entry_t *entryp)
174{
175 ipc_entry_t entry;
176 ipc_entry_t table;
177 mach_port_index_t first_free;
178 mach_port_gen_t gen;
179 mach_port_name_t new_name;
180
181 table = &space->is_table[0];
182
183 first_free = table->ie_next;
184 assert(first_free != 0);
185
186 entry = &table[first_free];
187 table->ie_next = entry->ie_next;
188 space->is_table_free--;
189
190 assert(table->ie_next < space->is_table_size);
191
192 /*
193 * Initialize the new entry: increment gencount and reset
194 * rollover point if it rolled over, and clear ie_request.
195 */
196 gen = ipc_entry_new_gen(entry->ie_bits);
197 if (__improbable(ipc_entry_gen_rolled(entry->ie_bits, gen))) {
198 ipc_entry_bits_t roll = ipc_space_get_rollpoint(space);
199 gen = ipc_entry_new_rollpoint(roll);
200 }
201 entry->ie_bits = gen;
202 entry->ie_request = IE_REQ_NONE;
203
204 /*
205 * The new name can't be MACH_PORT_NULL because index
206 * is non-zero. It can't be MACH_PORT_DEAD because
207 * the table isn't allowed to grow big enough.
208 * (See comment in ipc/ipc_table.h.)
209 */
210 new_name = MACH_PORT_MAKE(first_free, gen);
211 assert(MACH_PORT_VALID(new_name));
212 *namep = new_name;
213 *entryp = entry;
214
215 return KERN_SUCCESS;
216}
217
218/*
219 * Routine: ipc_entry_get
220 * Purpose:
221 * Tries to allocate an entry out of the space.
222 * Conditions:
223 * The space is write-locked and active throughout.
224 * An object may be locked. Will not allocate memory.
225 * Returns:
226 * KERN_SUCCESS A free entry was found.
227 * KERN_NO_SPACE No entry allocated.
228 */
229
230kern_return_t
231ipc_entry_get(
232 ipc_space_t space,
233 mach_port_name_t *namep,
234 ipc_entry_t *entryp)
235{
236 kern_return_t kr;
237
238 kr = ipc_entries_hold(space, 1);
239 if (KERN_SUCCESS != kr)
240 return kr;
241
242 return ipc_entry_claim(space, namep, entryp);
243}
244
245/*
246 * Routine: ipc_entry_alloc
247 * Purpose:
248 * Allocate an entry out of the space.
249 * Conditions:
250 * The space is not locked before, but it is write-locked after
251 * if the call is successful. May allocate memory.
252 * Returns:
253 * KERN_SUCCESS An entry was allocated.
254 * KERN_INVALID_TASK The space is dead.
255 * KERN_NO_SPACE No room for an entry in the space.
256 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
257 */
258
259kern_return_t
260ipc_entry_alloc(
261 ipc_space_t space,
262 mach_port_name_t *namep,
263 ipc_entry_t *entryp)
264{
265 kern_return_t kr;
266
267 is_write_lock(space);
268
269 for (;;) {
270 if (!is_active(space)) {
271 is_write_unlock(space);
272 return KERN_INVALID_TASK;
273 }
274
275 kr = ipc_entry_get(space, namep, entryp);
276 if (kr == KERN_SUCCESS)
277 return kr;
278
279 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
280 if (kr != KERN_SUCCESS)
281 return kr; /* space is unlocked */
282 }
283}
284
285/*
286 * Routine: ipc_entry_alloc_name
287 * Purpose:
288 * Allocates/finds an entry with a specific name.
289 * If an existing entry is returned, its type will be nonzero.
290 * Conditions:
291 * The space is not locked before, but it is write-locked after
292 * if the call is successful. May allocate memory.
293 * Returns:
294 * KERN_SUCCESS Found existing entry with same name.
295 * KERN_SUCCESS Allocated a new entry.
296 * KERN_INVALID_TASK The space is dead.
297 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
298 * KERN_FAILURE Couldn't allocate requested name.
299 */
300
301kern_return_t
302ipc_entry_alloc_name(
303 ipc_space_t space,
304 mach_port_name_t name,
305 ipc_entry_t *entryp)
306{
307 mach_port_index_t index = MACH_PORT_INDEX(name);
308 mach_port_gen_t gen = MACH_PORT_GEN(name);
309
310 if (index > ipc_table_max_entries())
311 return KERN_NO_SPACE;
312
313 assert(MACH_PORT_VALID(name));
314
315
316 is_write_lock(space);
317
318 for (;;) {
319 ipc_entry_t entry;
320
321 if (!is_active(space)) {
322 is_write_unlock(space);
323 return KERN_INVALID_TASK;
324 }
325
326 /*
327 * If we are under the table cutoff,
328 * there are usually four cases:
329 * 1) The entry is reserved (index 0)
330 * 2) The entry is inuse, for the same name
331 * 3) The entry is inuse, for a different name
332 * 4) The entry is free
333 * For a task with a "fast" IPC space, we disallow
334 * cases 1) and 3), because ports cannot be renamed.
335 */
336 if (index < space->is_table_size) {
337 ipc_entry_t table = space->is_table;
338
339 entry = &table[index];
340
341 if (index == 0) {
342 /* case #1 - the entry is reserved */
343 assert(!IE_BITS_TYPE(entry->ie_bits));
344 assert(!IE_BITS_GEN(entry->ie_bits));
345 is_write_unlock(space);
346 return KERN_FAILURE;
347 } else if (IE_BITS_TYPE(entry->ie_bits)) {
348 if (IE_BITS_GEN(entry->ie_bits) == gen) {
349 /* case #2 -- the entry is inuse, for the same name */
350 *entryp = entry;
351 return KERN_SUCCESS;
352 } else {
353 /* case #3 -- the entry is inuse, for a different name. */
354 /* Collisions are not allowed */
355 is_write_unlock(space);
356 return KERN_FAILURE;
357 }
358 } else {
359 mach_port_index_t free_index, next_index;
360
361 /*
362 * case #4 -- the entry is free
363 * Rip the entry out of the free list.
364 */
365
366 for (free_index = 0;
367 (next_index = table[free_index].ie_next)
368 != index;
369 free_index = next_index)
370 continue;
371
372 table[free_index].ie_next =
373 table[next_index].ie_next;
374 space->is_table_free--;
375
376 /* mark the previous entry modified - reconstructing the name */
377 ipc_entry_modified(space,
378 MACH_PORT_MAKE(free_index,
379 IE_BITS_GEN(table[free_index].ie_bits)),
380 &table[free_index]);
381
382 entry->ie_bits = gen;
383 entry->ie_request = IE_REQ_NONE;
384 *entryp = entry;
385
386 assert(entry->ie_object == IO_NULL);
387 return KERN_SUCCESS;
388 }
389 }
390
391 /*
392 * We grow the table so that the name
393 * index fits in the array space.
394 * Because the space will be unlocked,
395 * we must restart.
396 */
397 kern_return_t kr;
398 kr = ipc_entry_grow_table(space, index + 1);
399 assert(kr != KERN_NO_SPACE);
400 if (kr != KERN_SUCCESS) {
401 /* space is unlocked */
402 return kr;
403 }
404 continue;
405 }
406}
407
408/*
409 * Routine: ipc_entry_dealloc
410 * Purpose:
411 * Deallocates an entry from a space.
412 * Conditions:
413 * The space must be write-locked throughout.
414 * The space must be active.
415 */
416
417void
418ipc_entry_dealloc(
419 ipc_space_t space,
420 mach_port_name_t name,
421 ipc_entry_t entry)
422{
423 ipc_entry_t table;
424 ipc_entry_num_t size;
425 mach_port_index_t index;
426
427 assert(is_active(space));
428 assert(entry->ie_object == IO_NULL);
429 assert(entry->ie_request == IE_REQ_NONE);
430
431#if 1
432 if (entry->ie_request != IE_REQ_NONE)
433 panic("ipc_entry_dealloc()\n");
434#endif
435
436 index = MACH_PORT_INDEX(name);
437 table = space->is_table;
438 size = space->is_table_size;
439
440 if ((index < size) && (entry == &table[index])) {
441 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
442 entry->ie_bits &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
443 entry->ie_next = table->ie_next;
444 table->ie_next = index;
445 space->is_table_free++;
446 } else {
447 /*
448 * Nothing to do. The entry does not match
449 * so there is nothing to deallocate.
450 */
451 assert(index < size);
452 assert(entry == &table[index]);
453 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
454 }
455 ipc_entry_modified(space, name, entry);
456}
457
458/*
459 * Routine: ipc_entry_modified
460 * Purpose:
461 * Note that an entry was modified in a space.
462 * Conditions:
463 * Assumes exclusive write access to the space,
464 * either through a write lock or being the cleaner
465 * on an inactive space.
466 */
467
468void
469ipc_entry_modified(
470 ipc_space_t space,
471 mach_port_name_t name,
472 __assert_only ipc_entry_t entry)
473{
474 ipc_entry_t table;
475 ipc_entry_num_t size;
476 mach_port_index_t index;
477
478 index = MACH_PORT_INDEX(name);
479 table = space->is_table;
480 size = space->is_table_size;
481
482 assert(index < size);
483 assert(entry == &table[index]);
484
485 assert(space->is_low_mod <= size);
486 assert(space->is_high_mod < size);
487
488 if (index < space->is_low_mod)
489 space->is_low_mod = index;
490 if (index > space->is_high_mod)
491 space->is_high_mod = index;
492}
493
494#define IPC_ENTRY_GROW_STATS 1
495#if IPC_ENTRY_GROW_STATS
496static uint64_t ipc_entry_grow_count = 0;
497static uint64_t ipc_entry_grow_rescan = 0;
498static uint64_t ipc_entry_grow_rescan_max = 0;
499static uint64_t ipc_entry_grow_rescan_entries = 0;
500static uint64_t ipc_entry_grow_rescan_entries_max = 0;
501static uint64_t ipc_entry_grow_freelist_entries = 0;
502static uint64_t ipc_entry_grow_freelist_entries_max = 0;
503#endif
504
505/*
506 * Routine: ipc_entry_grow_table
507 * Purpose:
508 * Grows the table in a space.
509 * Conditions:
510 * The space must be write-locked and active before.
511 * If successful, the space is also returned locked.
512 * On failure, the space is returned unlocked.
513 * Allocates memory.
514 * Returns:
515 * KERN_SUCCESS Grew the table.
516 * KERN_SUCCESS Somebody else grew the table.
517 * KERN_SUCCESS The space died.
518 * KERN_NO_SPACE Table has maximum size already.
519 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
520 */
521
522kern_return_t
523ipc_entry_grow_table(
524 ipc_space_t space,
525 ipc_table_elems_t target_size)
526{
527 ipc_entry_num_t osize, size, nsize, psize;
528
529 ipc_entry_t otable, table;
530 ipc_table_size_t oits, its, nits;
531 mach_port_index_t i, free_index;
532 mach_port_index_t low_mod, hi_mod;
533 ipc_table_index_t sanity;
534#if IPC_ENTRY_GROW_STATS
535 uint64_t rescan_count = 0;
536#endif
537 assert(is_active(space));
538
539 if (is_growing(space)) {
540 /*
541 * Somebody else is growing the table.
542 * We just wait for them to finish.
543 */
544
545 is_write_sleep(space);
546 return KERN_SUCCESS;
547 }
548
549 otable = space->is_table;
550
551 its = space->is_table_next;
552 size = its->its_size;
553
554 /*
555 * Since is_table_next points to the next natural size
556 * we can identify the current size entry.
557 */
558 oits = its - 1;
559 osize = oits->its_size;
560
561 /*
562 * If there is no target size, then the new size is simply
563 * specified by is_table_next. If there is a target
564 * size, then search for the next entry.
565 */
566 if (target_size != ITS_SIZE_NONE) {
567 if (target_size <= osize) {
568 /* the space is locked */
569 return KERN_SUCCESS;
570 }
571
572 psize = osize;
573 while ((psize != size) && (target_size > size)) {
574 psize = size;
575 its++;
576 size = its->its_size;
577 }
578 if (psize == size) {
579 is_write_unlock(space);
580 return KERN_NO_SPACE;
581 }
582 }
583
584 if (osize == size) {
585 is_write_unlock(space);
586 return KERN_NO_SPACE;
587 }
588
589 nits = its + 1;
590 nsize = nits->its_size;
591 assert((osize < size) && (size <= nsize));
592
593 /*
594 * We'll attempt to grow the table.
595 *
596 * Because we will be copying without the space lock, reset
597 * the lowest_mod index to just beyond the end of the current
598 * table. Modification of entries (other than hashes) will
599 * bump this downward, and we only have to reprocess entries
600 * above that mark. Eventually, we'll get done.
601 */
602 is_start_growing(space);
603 space->is_low_mod = osize;
604 space->is_high_mod = 0;
605#if IPC_ENTRY_GROW_STATS
606 ipc_entry_grow_count++;
607#endif
608 is_write_unlock(space);
609
610 table = it_entries_alloc(its);
611 if (table == IE_NULL) {
612 is_write_lock(space);
613 is_done_growing(space);
614 is_write_unlock(space);
615 thread_wakeup((event_t) space);
616 return KERN_RESOURCE_SHORTAGE;
617 }
618
619 ipc_space_rand_freelist(space, table, osize, size);
620
621 /* clear out old entries in new table */
622 memset((void *)table, 0, osize * sizeof(*table));
623
624 low_mod = 0;
625 hi_mod = osize - 1;
626 rescan:
627 /*
628 * Within the range of the table that changed, determine what we
629 * have to take action on. For each entry, take a snapshot of the
630 * corresponding entry in the old table (so it won't change
631 * during this iteration). The snapshot may not be self-consistent
632 * (if we caught it in the middle of being changed), so be very
633 * cautious with the values.
634 */
635 for (i = low_mod; i <= hi_mod; i++) {
636 ipc_entry_t entry = &table[i];
637 struct ipc_entry osnap = otable[i];
638
639 if (entry->ie_object != osnap.ie_object ||
640 IE_BITS_TYPE(entry->ie_bits) != IE_BITS_TYPE(osnap.ie_bits)) {
641
642 if (entry->ie_object != IO_NULL &&
643 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND)
644 ipc_hash_table_delete(table, size, entry->ie_object, i, entry);
645
646 entry->ie_object = osnap.ie_object;
647 entry->ie_bits = osnap.ie_bits;
648 entry->ie_request = osnap.ie_request; /* or ie_next */
649
650 if (entry->ie_object != IO_NULL &&
651 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND)
652 ipc_hash_table_insert(table, size, entry->ie_object, i, entry);
653 } else {
654 assert(entry->ie_object == osnap.ie_object);
655 entry->ie_bits = osnap.ie_bits;
656 entry->ie_request = osnap.ie_request; /* or ie_next */
657 }
658
659 }
660 table[0].ie_next = otable[0].ie_next; /* always rebase the freelist */
661
662 /*
663 * find the end of the freelist (should be short). But be careful,
664 * the list items can change so only follow through truly free entries
665 * (no problem stopping short in those cases, because we'll rescan).
666 */
667 free_index = 0;
668 for (sanity = 0; sanity < osize; sanity++) {
669 if (table[free_index].ie_object != IPC_OBJECT_NULL)
670 break;
671 i = table[free_index].ie_next;
672 if (i == 0 || i >= osize)
673 break;
674 free_index = i;
675 }
676#if IPC_ENTRY_GROW_STATS
677 ipc_entry_grow_freelist_entries += sanity;
678 if (sanity > ipc_entry_grow_freelist_entries_max)
679 ipc_entry_grow_freelist_entries_max = sanity;
680#endif
681
682 is_write_lock(space);
683
684 /*
685 * We need to do a wakeup on the space,
686 * to rouse waiting threads. We defer
687 * this until the space is unlocked,
688 * because we don't want them to spin.
689 */
690
691 if (!is_active(space)) {
692 /*
693 * The space died while it was unlocked.
694 */
695
696 is_done_growing(space);
697 is_write_unlock(space);
698 thread_wakeup((event_t) space);
699 it_entries_free(its, table);
700 is_write_lock(space);
701 return KERN_SUCCESS;
702 }
703
704 /* If the space changed while unlocked, go back and process the changes */
705 if (space->is_low_mod < osize) {
706 assert(space->is_high_mod > 0);
707 low_mod = space->is_low_mod;
708 space->is_low_mod = osize;
709 hi_mod = space->is_high_mod;
710 space->is_high_mod = 0;
711 is_write_unlock(space);
712#if IPC_ENTRY_GROW_STATS
713 rescan_count++;
714 if (rescan_count > ipc_entry_grow_rescan_max)
715 ipc_entry_grow_rescan_max = rescan_count;
716
717 ipc_entry_grow_rescan++;
718 ipc_entry_grow_rescan_entries += hi_mod - low_mod + 1;
719 if (hi_mod - low_mod + 1 > ipc_entry_grow_rescan_entries_max)
720 ipc_entry_grow_rescan_entries_max = hi_mod - low_mod + 1;
721#endif
722 goto rescan;
723 }
724
725 /* link new free entries onto the rest of the freelist */
726 assert(table[free_index].ie_next == 0 &&
727 table[free_index].ie_object == IO_NULL);
728 table[free_index].ie_next = osize;
729
730 assert(space->is_table == otable);
731 assert((space->is_table_next == its) ||
732 (target_size != ITS_SIZE_NONE));
733 assert(space->is_table_size == osize);
734
735 space->is_table = table;
736 space->is_table_size = size;
737 space->is_table_next = nits;
738 space->is_table_free += size - osize;
739
740 is_done_growing(space);
741 is_write_unlock(space);
742
743 thread_wakeup((event_t) space);
744
745 /*
746 * Now we need to free the old table.
747 */
748 it_entries_free(oits, otable);
749 is_write_lock(space);
750
751 return KERN_SUCCESS;
752}
753
754
755/*
756 * Routine: ipc_entry_name_mask
757 * Purpose:
758 * Ensure a mach port name has the default ipc entry
759 * generation bits set. This can be used to ensure that
760 * a name passed in by user space matches names generated
761 * by the kernel.
762 * Conditions:
763 * None.
764 * Returns:
765 * 'name' input with default generation bits masked or added
766 * as appropriate.
767 */
768mach_port_name_t
769ipc_entry_name_mask(mach_port_name_t name)
770{
771#ifndef NO_PORT_GEN
772 static mach_port_name_t null_name = MACH_PORT_MAKE(0, IE_BITS_GEN_MASK + IE_BITS_GEN_ONE);
773 return name | null_name;
774#else
775 static mach_port_name_t null_name = MACH_PORT_MAKE(0, ~(IE_BITS_GEN_MASK + IE_BITS_GEN_ONE));
776 return name & ~null_name;
777#endif
778}