2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: ipc/ipc_entry.c
57 * Primitive functions to manipulate translation entries.
61 #include <mach_debug.h>
63 #include <mach/kern_return.h>
64 #include <mach/port.h>
65 #include <kern/assert.h>
66 #include <kern/sched_prim.h>
67 #include <kern/zalloc.h>
68 #include <kern/misc_protos.h>
70 #include <kern/task.h>
73 #include <ipc/ipc_entry.h>
74 #include <ipc/ipc_space.h>
75 #include <ipc/ipc_splay.h>
76 #include <ipc/ipc_object.h>
77 #include <ipc/ipc_hash.h>
78 #include <ipc/ipc_table.h>
79 #include <ipc/ipc_port.h>
82 zone_t ipc_tree_entry_zone
;
87 * Forward declarations
89 boolean_t
ipc_entry_tree_collision(
91 mach_port_name_t name
);
94 * Routine: ipc_entry_tree_collision
96 * Checks if "name" collides with an allocated name
97 * in the space's tree. That is, returns TRUE
98 * if the splay tree contains a name with the same
101 * The space is locked (read or write) and active.
105 ipc_entry_tree_collision(
107 mach_port_name_t name
)
109 mach_port_index_t index
;
110 mach_port_name_t lower
, upper
;
112 assert(space
->is_active
);
115 * Check if we collide with the next smaller name
116 * or the next larger name.
119 ipc_splay_tree_bounds(&space
->is_tree
, name
, &lower
, &upper
);
121 index
= MACH_PORT_INDEX(name
);
122 return (((lower
!= ~0) && (MACH_PORT_INDEX(lower
) == index
)) ||
123 ((upper
!= 0) && (MACH_PORT_INDEX(upper
) == index
)));
127 * Routine: ipc_entry_lookup
129 * Searches for an entry, given its name.
131 * The space must be read or write locked throughout.
132 * The space must be active.
138 mach_port_name_t name
)
140 mach_port_index_t index
;
143 assert(space
->is_active
);
146 index
= MACH_PORT_INDEX(name
);
148 * If space is fast, we assume no splay tree and name within table
149 * bounds, but still check generation numbers (if enabled) and
150 * look for null entries.
152 if (is_fast_space(space
)) {
153 entry
= &space
->is_table
[index
];
154 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
) ||
155 IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
159 if (index
< space
->is_table_size
) {
160 entry
= &space
->is_table
[index
];
161 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
))
162 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
163 assert(space
->is_tree_total
> 0);
167 else if (IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
169 } else if (space
->is_tree_total
== 0)
173 entry
= (ipc_entry_t
)
174 ipc_splay_tree_lookup(&space
->is_tree
, name
);
175 /* with sub-space introduction, an entry may appear in */
176 /* the splay tree and yet not show rights for this subspace */
177 if(entry
!= IE_NULL
) {
178 if(!(IE_BITS_TYPE(entry
->ie_bits
)))
183 assert((entry
== IE_NULL
) || IE_BITS_TYPE(entry
->ie_bits
));
188 * Routine: ipc_entry_get
190 * Tries to allocate an entry out of the space.
192 * The space is write-locked and active throughout.
193 * An object may be locked. Will not allocate memory.
195 * KERN_SUCCESS A free entry was found.
196 * KERN_NO_SPACE No entry allocated.
202 mach_port_name_t
*namep
,
206 mach_port_index_t first_free
;
207 ipc_entry_t free_entry
;
209 assert(space
->is_active
);
212 table
= space
->is_table
;
213 first_free
= table
->ie_next
;
216 return KERN_NO_SPACE
;
218 free_entry
= &table
[first_free
];
219 table
->ie_next
= free_entry
->ie_next
;
223 * Initialize the new entry. We need only
224 * increment the generation number and clear ie_request.
227 mach_port_name_t new_name
;
230 gen
= IE_BITS_NEW_GEN(free_entry
->ie_bits
);
231 free_entry
->ie_bits
= gen
;
232 free_entry
->ie_request
= 0;
235 * The new name can't be MACH_PORT_NULL because index
236 * is non-zero. It can't be MACH_PORT_DEAD because
237 * the table isn't allowed to grow big enough.
238 * (See comment in ipc/ipc_table.h.)
240 new_name
= MACH_PORT_MAKE(first_free
, gen
);
241 assert(MACH_PORT_VALID(new_name
));
245 assert(free_entry
->ie_object
== IO_NULL
);
247 *entryp
= free_entry
;
252 * Routine: ipc_entry_alloc
254 * Allocate an entry out of the space.
256 * The space is not locked before, but it is write-locked after
257 * if the call is successful. May allocate memory.
259 * KERN_SUCCESS An entry was allocated.
260 * KERN_INVALID_TASK The space is dead.
261 * KERN_NO_SPACE No room for an entry in the space.
262 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
268 mach_port_name_t
*namep
,
273 is_write_lock(space
);
276 if (!space
->is_active
) {
277 is_write_unlock(space
);
278 return KERN_INVALID_TASK
;
281 kr
= ipc_entry_get(space
, namep
, entryp
);
282 if (kr
== KERN_SUCCESS
)
285 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
286 if (kr
!= KERN_SUCCESS
)
287 return kr
; /* space is unlocked */
292 * Routine: ipc_entry_alloc_name
294 * Allocates/finds an entry with a specific name.
295 * If an existing entry is returned, its type will be nonzero.
297 * The space is not locked before, but it is write-locked after
298 * if the call is successful. May allocate memory.
300 * KERN_SUCCESS Found existing entry with same name.
301 * KERN_SUCCESS Allocated a new entry.
302 * KERN_INVALID_TASK The space is dead.
303 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
307 ipc_entry_alloc_name(
309 mach_port_name_t name
,
312 mach_port_index_t index
= MACH_PORT_INDEX(name
);
313 mach_port_gen_t gen
= MACH_PORT_GEN(name
);
314 ipc_tree_entry_t tentry
= ITE_NULL
;
316 assert(MACH_PORT_VALID(name
));
319 is_write_lock(space
);
323 ipc_tree_entry_t tentry2
;
324 ipc_table_size_t its
;
326 if (!space
->is_active
) {
327 is_write_unlock(space
);
328 if (tentry
) ite_free(tentry
);
329 return KERN_INVALID_TASK
;
333 * If we are under the table cutoff,
334 * there are usually four cases:
335 * 1) The entry is reserved (index 0)
336 * 2) The entry is inuse, for the same name
337 * 3) The entry is inuse, for a different name
338 * 4) The entry is free
339 * For a task with a "fast" IPC space, we disallow
340 * cases 1) and 3), because ports cannot be renamed.
342 if (index
< space
->is_table_size
) {
343 ipc_entry_t table
= space
->is_table
;
345 entry
= &table
[index
];
348 assert(!IE_BITS_TYPE(entry
->ie_bits
));
349 assert(!IE_BITS_GEN(entry
->ie_bits
));
350 } else if (IE_BITS_TYPE(entry
->ie_bits
)) {
351 if (IE_BITS_GEN(entry
->ie_bits
) == gen
) {
357 mach_port_index_t free_index
, next_index
;
360 * Rip the entry out of the free list.
364 (next_index
= table
[free_index
].ie_next
)
366 free_index
= next_index
)
369 table
[free_index
].ie_next
=
370 table
[next_index
].ie_next
;
372 entry
->ie_bits
= gen
;
373 entry
->ie_request
= 0;
376 assert(entry
->ie_object
== IO_NULL
);
377 if (is_fast_space(space
))
386 * In a fast space, ipc_entry_alloc_name may be
387 * used only to add a right to a port name already
388 * known in this space.
390 if (is_fast_space(space
)) {
391 is_write_unlock(space
);
397 * Before trying to allocate any memory,
398 * check if the entry already exists in the tree.
399 * This avoids spurious resource errors.
400 * The splay tree makes a subsequent lookup/insert
401 * of the same name cheap, so this costs little.
404 if ((space
->is_tree_total
> 0) &&
405 ((tentry2
= ipc_splay_tree_lookup(&space
->is_tree
, name
))
407 assert(tentry2
->ite_space
== space
);
408 assert(IE_BITS_TYPE(tentry2
->ite_bits
));
410 *entryp
= &tentry2
->ite_entry
;
411 if (tentry
) ite_free(tentry
);
415 its
= space
->is_table_next
;
418 * Check if the table should be grown.
420 * Note that if space->is_table_size == its->its_size,
421 * then we won't ever try to grow the table.
423 * Note that we are optimistically assuming that name
424 * doesn't collide with any existing names. (So if
425 * it were entered into the tree, is_tree_small would
426 * be incremented.) This is OK, because even in that
427 * case, we don't lose memory by growing the table.
429 if ((space
->is_table_size
<= index
) &&
430 (index
< its
->its_size
) &&
431 (((its
->its_size
- space
->is_table_size
) *
432 sizeof(struct ipc_entry
)) <
433 ((space
->is_tree_small
+ 1) *
434 sizeof(struct ipc_tree_entry
)))) {
438 * Can save space by growing the table.
439 * Because the space will be unlocked,
443 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
444 assert(kr
!= KERN_NO_SPACE
);
445 if (kr
!= KERN_SUCCESS
) {
446 /* space is unlocked */
447 if (tentry
) ite_free(tentry
);
455 * If a splay-tree entry was allocated previously,
456 * go ahead and insert it into the tree.
459 if (tentry
!= ITE_NULL
) {
461 space
->is_tree_total
++;
463 if (index
< space
->is_table_size
) {
464 entry
= &space
->is_table
[index
];
465 entry
->ie_bits
|= IE_BITS_COLLISION
;
466 } else if ((index
< its
->its_size
) &&
467 !ipc_entry_tree_collision(space
, name
))
468 space
->is_tree_small
++;
470 ipc_splay_tree_insert(&space
->is_tree
, name
, tentry
);
471 tentry
->ite_bits
= 0;
472 tentry
->ite_request
= 0;
473 tentry
->ite_object
= IO_NULL
;
474 tentry
->ite_space
= space
;
475 *entryp
= &tentry
->ite_entry
;
480 * Allocate a tree entry and try again.
483 is_write_unlock(space
);
484 tentry
= ite_alloc();
485 if (tentry
== ITE_NULL
)
486 return KERN_RESOURCE_SHORTAGE
;
487 is_write_lock(space
);
492 * Routine: ipc_entry_dealloc
494 * Deallocates an entry from a space.
496 * The space must be write-locked throughout.
497 * The space must be active.
503 mach_port_name_t name
,
507 ipc_entry_num_t size
;
508 mach_port_index_t index
;
510 assert(space
->is_active
);
511 assert(entry
->ie_object
== IO_NULL
);
512 assert(entry
->ie_request
== 0);
514 index
= MACH_PORT_INDEX(name
);
515 table
= space
->is_table
;
516 size
= space
->is_table_size
;
518 if (is_fast_space(space
)) {
519 assert(index
< size
);
520 assert(entry
== &table
[index
]);
521 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
522 assert(!(entry
->ie_bits
& IE_BITS_COLLISION
));
523 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
524 entry
->ie_next
= table
->ie_next
;
525 table
->ie_next
= index
;
530 if ((index
< size
) && (entry
== &table
[index
])) {
531 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
533 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
534 struct ipc_splay_tree small
, collisions
;
535 ipc_tree_entry_t tentry
;
536 mach_port_name_t tname
;
538 ipc_entry_bits_t bits
;
541 /* must move an entry from tree to table */
543 ipc_splay_tree_split(&space
->is_tree
,
544 MACH_PORT_MAKE(index
+1, 0),
546 ipc_splay_tree_split(&collisions
,
547 MACH_PORT_MAKE(index
, 0),
550 pick
= ipc_splay_tree_pick(&collisions
,
553 assert(MACH_PORT_INDEX(tname
) == index
);
555 entry
->ie_object
= obj
= tentry
->ite_object
;
556 entry
->ie_bits
= tentry
->ite_bits
|MACH_PORT_GEN(tname
);
557 entry
->ie_request
= tentry
->ite_request
;
559 assert(tentry
->ite_space
== space
);
561 if (IE_BITS_TYPE(tentry
->ite_bits
)==MACH_PORT_TYPE_SEND
) {
562 ipc_hash_global_delete(space
, obj
,
564 ipc_hash_local_insert(space
, obj
,
568 ipc_splay_tree_delete(&collisions
, tname
, tentry
);
570 assert(space
->is_tree_total
> 0);
571 space
->is_tree_total
--;
573 /* check if collision bit should still be on */
575 pick
= ipc_splay_tree_pick(&collisions
,
578 entry
->ie_bits
|= IE_BITS_COLLISION
;
579 ipc_splay_tree_join(&space
->is_tree
,
583 ipc_splay_tree_join(&space
->is_tree
, &small
);
586 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
587 entry
->ie_next
= table
->ie_next
;
588 table
->ie_next
= index
;
592 ipc_tree_entry_t tentry
= (ipc_tree_entry_t
) entry
;
594 assert(tentry
->ite_space
== space
);
596 ipc_splay_tree_delete(&space
->is_tree
, name
, tentry
);
598 assert(space
->is_tree_total
> 0);
599 space
->is_tree_total
--;
602 ipc_entry_t ientry
= &table
[index
];
604 assert(ientry
->ie_bits
& IE_BITS_COLLISION
);
606 if (!ipc_entry_tree_collision(space
, name
))
607 ientry
->ie_bits
&= ~IE_BITS_COLLISION
;
609 } else if ((index
< space
->is_table_next
->its_size
) &&
610 !ipc_entry_tree_collision(space
, name
)) {
612 assert(space
->is_tree_small
> 0);
614 space
->is_tree_small
--;
620 * Routine: ipc_entry_grow_table
622 * Grows the table in a space.
624 * The space must be write-locked and active before.
625 * If successful, it is also returned locked.
628 * KERN_SUCCESS Grew the table.
629 * KERN_SUCCESS Somebody else grew the table.
630 * KERN_SUCCESS The space died.
631 * KERN_NO_SPACE Table has maximum size already.
632 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
636 ipc_entry_grow_table(
640 ipc_entry_num_t osize
, size
, nsize
, psize
;
643 boolean_t reallocated
=FALSE
;
645 ipc_entry_t otable
, table
;
646 ipc_table_size_t oits
, its
, nits
;
647 mach_port_index_t i
, free_index
;
649 assert(space
->is_active
);
651 if (space
->is_growing
) {
653 * Somebody else is growing the table.
654 * We just wait for them to finish.
657 is_write_sleep(space
);
661 otable
= space
->is_table
;
663 its
= space
->is_table_next
;
664 size
= its
->its_size
;
667 * Since is_table_next points to the next natural size
668 * we can identify the current size entry.
671 osize
= oits
->its_size
;
674 * If there is no target size, then the new size is simply
675 * specified by is_table_next. If there is a target
676 * size, then search for the next entry.
678 if (target_size
!= ITS_SIZE_NONE
) {
679 if (target_size
<= osize
) {
680 is_write_unlock(space
);
685 while ((psize
!= size
) && (target_size
> size
)) {
688 size
= its
->its_size
;
691 is_write_unlock(space
);
692 return KERN_NO_SPACE
;
697 is_write_unlock(space
);
698 return KERN_NO_SPACE
;
702 nsize
= nits
->its_size
;
704 assert((osize
< size
) && (size
<= nsize
));
707 * OK, we'll attempt to grow the table.
708 * The realloc requires that the old table
709 * remain in existence.
712 space
->is_growing
= TRUE
;
713 is_write_unlock(space
);
715 if (it_entries_reallocable(oits
)) {
716 table
= it_entries_realloc(oits
, otable
, its
);
720 table
= it_entries_alloc(its
);
723 is_write_lock(space
);
724 space
->is_growing
= FALSE
;
727 * We need to do a wakeup on the space,
728 * to rouse waiting threads. We defer
729 * this until the space is unlocked,
730 * because we don't want them to spin.
733 if (table
== IE_NULL
) {
734 is_write_unlock(space
);
735 thread_wakeup((event_t
) space
);
736 return KERN_RESOURCE_SHORTAGE
;
739 if (!space
->is_active
) {
741 * The space died while it was unlocked.
744 is_write_unlock(space
);
745 thread_wakeup((event_t
) space
);
746 it_entries_free(its
, table
);
747 is_write_lock(space
);
751 assert(space
->is_table
== otable
);
752 assert((space
->is_table_next
== its
) ||
753 (target_size
!= ITS_SIZE_NONE
));
754 assert(space
->is_table_size
== osize
);
756 space
->is_table
= table
;
757 space
->is_table_size
= size
;
758 space
->is_table_next
= nits
;
761 * If we did a realloc, it remapped the data.
762 * Otherwise we copy by hand first. Then we have
763 * to zero the new part and the old local hash
767 (void) memcpy((void *) table
, (const void *) otable
,
768 osize
* (sizeof(struct ipc_entry
)));
770 for (i
= 0; i
< osize
; i
++)
771 table
[i
].ie_index
= 0;
773 (void) memset((void *) (table
+ osize
) , 0,
774 ((size
- osize
) * (sizeof(struct ipc_entry
))));
777 * Put old entries into the reverse hash table.
779 for (i
= 0; i
< osize
; i
++) {
780 ipc_entry_t entry
= &table
[i
];
782 if (IE_BITS_TYPE(entry
->ie_bits
)==MACH_PORT_TYPE_SEND
) {
783 ipc_hash_local_insert(space
, entry
->ie_object
,
789 * If there are entries in the splay tree,
790 * then we have work to do:
791 * 1) transfer entries to the table
792 * 2) update is_tree_small
794 assert(!is_fast_space(space
) || space
->is_tree_total
== 0);
795 if (space
->is_tree_total
> 0) {
796 mach_port_index_t index
;
798 struct ipc_splay_tree ignore
;
799 struct ipc_splay_tree move
;
800 struct ipc_splay_tree small
;
801 ipc_entry_num_t nosmall
;
802 ipc_tree_entry_t tentry
;
805 * The splay tree divides into four regions,
806 * based on the index of the entries:
807 * 1) 0 <= index < osize
808 * 2) osize <= index < size
809 * 3) size <= index < nsize
812 * Entries in the first part are ignored.
813 * Entries in the second part, that don't
814 * collide, are moved into the table.
815 * Entries in the third part, that don't
816 * collide, are counted for is_tree_small.
817 * Entries in the fourth part are ignored.
820 ipc_splay_tree_split(&space
->is_tree
,
821 MACH_PORT_MAKE(nsize
, 0),
823 ipc_splay_tree_split(&small
,
824 MACH_PORT_MAKE(size
, 0),
826 ipc_splay_tree_split(&move
,
827 MACH_PORT_MAKE(osize
, 0),
830 /* move entries into the table */
832 for (tentry
= ipc_splay_traverse_start(&move
);
834 tentry
= ipc_splay_traverse_next(&move
, delete)) {
836 mach_port_name_t name
;
838 mach_port_type_t type
;
839 ipc_entry_bits_t bits
;
843 name
= tentry
->ite_name
;
844 gen
= MACH_PORT_GEN(name
);
845 index
= MACH_PORT_INDEX(name
);
847 assert(tentry
->ite_space
== space
);
848 assert((osize
<= index
) && (index
< size
));
850 entry
= &table
[index
];
851 bits
= entry
->ie_bits
;
852 if (IE_BITS_TYPE(bits
)) {
853 assert(IE_BITS_GEN(bits
) != gen
);
854 entry
->ie_bits
|= IE_BITS_COLLISION
;
859 bits
= tentry
->ite_bits
;
860 type
= IE_BITS_TYPE(bits
);
861 assert(type
!= MACH_PORT_TYPE_NONE
);
863 entry
->ie_bits
= bits
| gen
;
864 entry
->ie_request
= tentry
->ite_request
;
865 entry
->ie_object
= obj
= tentry
->ite_object
;
867 if (type
== MACH_PORT_TYPE_SEND
) {
868 ipc_hash_global_delete(space
, obj
,
870 ipc_hash_local_insert(space
, obj
,
873 space
->is_tree_total
--;
876 ipc_splay_traverse_finish(&move
);
878 /* count entries for is_tree_small */
880 nosmall
= 0; index
= 0;
881 for (tentry
= ipc_splay_traverse_start(&small
);
883 tentry
= ipc_splay_traverse_next(&small
, FALSE
)) {
884 mach_port_index_t nindex
;
886 nindex
= MACH_PORT_INDEX(tentry
->ite_name
);
888 if (nindex
!= index
) {
893 ipc_splay_traverse_finish(&small
);
895 assert(nosmall
<= (nsize
- size
));
896 assert(nosmall
<= space
->is_tree_total
);
897 space
->is_tree_small
= nosmall
;
899 /* put the splay tree back together */
901 ipc_splay_tree_join(&space
->is_tree
, &small
);
902 ipc_splay_tree_join(&space
->is_tree
, &move
);
903 ipc_splay_tree_join(&space
->is_tree
, &ignore
);
907 * Add entries in the new part which still aren't used
908 * to the free list. Add them in reverse order,
909 * and set the generation number to -1, so that
910 * early allocations produce "natural" names.
913 free_index
= table
[0].ie_next
;
914 for (i
= size
-1; i
>= osize
; --i
) {
915 ipc_entry_t entry
= &table
[i
];
917 if (entry
->ie_bits
== 0) {
918 entry
->ie_bits
= IE_BITS_GEN_MASK
;
919 entry
->ie_next
= free_index
;
923 table
[0].ie_next
= free_index
;
926 * Now we need to free the old table.
927 * If the space dies or grows while unlocked,
928 * then we can quit here.
930 is_write_unlock(space
);
931 thread_wakeup((event_t
) space
);
933 it_entries_free(oits
, otable
);
934 is_write_lock(space
);
935 if (!space
->is_active
|| (space
->is_table_next
!= nits
))
939 * We might have moved enough entries from
940 * the splay tree into the table that
941 * the table can be profitably grown again.
943 * Note that if size == nsize, then
944 * space->is_tree_small == 0.
946 } while ((space
->is_tree_small
> 0) &&
947 (((nsize
- size
) * sizeof(struct ipc_entry
)) <
948 (space
->is_tree_small
* sizeof(struct ipc_tree_entry
))));
955 #include <ddb/db_output.h>
956 #define printf kdbprintf
958 ipc_entry_t
db_ipc_object_by_name(
960 mach_port_name_t name
);
964 db_ipc_object_by_name(
966 mach_port_name_t name
)
968 ipc_space_t space
= task
->itk_space
;
972 entry
= ipc_entry_lookup(space
, name
);
973 if(entry
!= IE_NULL
) {
974 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
975 task
, name
, entry
->ie_object
);
976 return (ipc_entry_t
) entry
->ie_object
;
980 #endif /* MACH_KDB */