2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: ipc/ipc_entry.c
57 * Primitive functions to manipulate translation entries.
61 #include <mach_debug.h>
63 #include <mach/kern_return.h>
64 #include <mach/port.h>
65 #include <kern/assert.h>
66 #include <kern/sched_prim.h>
67 #include <kern/zalloc.h>
68 #include <kern/misc_protos.h>
70 #include <kern/task.h>
73 #include <ipc/ipc_entry.h>
74 #include <ipc/ipc_space.h>
75 #include <ipc/ipc_splay.h>
76 #include <ipc/ipc_object.h>
77 #include <ipc/ipc_hash.h>
78 #include <ipc/ipc_table.h>
79 #include <ipc/ipc_port.h>
82 zone_t ipc_tree_entry_zone
;
87 * Forward declarations
89 boolean_t
ipc_entry_tree_collision(
91 mach_port_name_t name
);
94 * Routine: ipc_entry_tree_collision
96 * Checks if "name" collides with an allocated name
97 * in the space's tree. That is, returns TRUE
98 * if the splay tree contains a name with the same
101 * The space is locked (read or write) and active.
105 ipc_entry_tree_collision(
107 mach_port_name_t name
)
109 mach_port_index_t index
;
110 mach_port_name_t lower
, upper
;
112 assert(space
->is_active
);
115 * Check if we collide with the next smaller name
116 * or the next larger name.
119 ipc_splay_tree_bounds(&space
->is_tree
, name
, &lower
, &upper
);
121 index
= MACH_PORT_INDEX(name
);
122 return (((lower
!= (mach_port_name_t
)~0) &&
123 (MACH_PORT_INDEX(lower
) == index
)) ||
124 ((upper
!= 0) && (MACH_PORT_INDEX(upper
) == index
)));
128 * Routine: ipc_entry_lookup
130 * Searches for an entry, given its name.
132 * The space must be read or write locked throughout.
133 * The space must be active.
139 mach_port_name_t name
)
141 mach_port_index_t index
;
144 assert(space
->is_active
);
147 index
= MACH_PORT_INDEX(name
);
149 * If space is fast, we assume no splay tree and name within table
150 * bounds, but still check generation numbers (if enabled) and
151 * look for null entries.
153 if (is_fast_space(space
)) {
154 entry
= &space
->is_table
[index
];
155 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
) ||
156 IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
160 if (index
< space
->is_table_size
) {
161 entry
= &space
->is_table
[index
];
162 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
))
163 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
164 assert(space
->is_tree_total
> 0);
168 else if (IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
170 } else if (space
->is_tree_total
== 0)
174 entry
= (ipc_entry_t
)
175 ipc_splay_tree_lookup(&space
->is_tree
, name
);
176 /* with sub-space introduction, an entry may appear in */
177 /* the splay tree and yet not show rights for this subspace */
178 if(entry
!= IE_NULL
) {
179 if(!(IE_BITS_TYPE(entry
->ie_bits
)))
184 assert((entry
== IE_NULL
) || IE_BITS_TYPE(entry
->ie_bits
));
189 * Routine: ipc_entry_get
191 * Tries to allocate an entry out of the space.
193 * The space is write-locked and active throughout.
194 * An object may be locked. Will not allocate memory.
196 * KERN_SUCCESS A free entry was found.
197 * KERN_NO_SPACE No entry allocated.
203 mach_port_name_t
*namep
,
207 mach_port_index_t first_free
;
208 ipc_entry_t free_entry
;
210 assert(space
->is_active
);
213 table
= space
->is_table
;
214 first_free
= table
->ie_next
;
217 return KERN_NO_SPACE
;
219 free_entry
= &table
[first_free
];
220 table
->ie_next
= free_entry
->ie_next
;
224 * Initialize the new entry. We need only
225 * increment the generation number and clear ie_request.
228 mach_port_name_t new_name
;
231 gen
= IE_BITS_NEW_GEN(free_entry
->ie_bits
);
232 free_entry
->ie_bits
= gen
;
233 free_entry
->ie_request
= 0;
236 * The new name can't be MACH_PORT_NULL because index
237 * is non-zero. It can't be MACH_PORT_DEAD because
238 * the table isn't allowed to grow big enough.
239 * (See comment in ipc/ipc_table.h.)
241 new_name
= MACH_PORT_MAKE(first_free
, gen
);
242 assert(MACH_PORT_VALID(new_name
));
246 assert(free_entry
->ie_object
== IO_NULL
);
248 *entryp
= free_entry
;
253 * Routine: ipc_entry_alloc
255 * Allocate an entry out of the space.
257 * The space is not locked before, but it is write-locked after
258 * if the call is successful. May allocate memory.
260 * KERN_SUCCESS An entry was allocated.
261 * KERN_INVALID_TASK The space is dead.
262 * KERN_NO_SPACE No room for an entry in the space.
263 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
269 mach_port_name_t
*namep
,
274 is_write_lock(space
);
277 if (!space
->is_active
) {
278 is_write_unlock(space
);
279 return KERN_INVALID_TASK
;
282 kr
= ipc_entry_get(space
, namep
, entryp
);
283 if (kr
== KERN_SUCCESS
)
286 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
287 if (kr
!= KERN_SUCCESS
)
288 return kr
; /* space is unlocked */
293 * Routine: ipc_entry_alloc_name
295 * Allocates/finds an entry with a specific name.
296 * If an existing entry is returned, its type will be nonzero.
298 * The space is not locked before, but it is write-locked after
299 * if the call is successful. May allocate memory.
301 * KERN_SUCCESS Found existing entry with same name.
302 * KERN_SUCCESS Allocated a new entry.
303 * KERN_INVALID_TASK The space is dead.
304 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
308 ipc_entry_alloc_name(
310 mach_port_name_t name
,
313 mach_port_index_t index
= MACH_PORT_INDEX(name
);
314 mach_port_gen_t gen
= MACH_PORT_GEN(name
);
315 ipc_tree_entry_t tentry
= ITE_NULL
;
317 assert(MACH_PORT_VALID(name
));
320 is_write_lock(space
);
324 ipc_tree_entry_t tentry2
;
325 ipc_table_size_t its
;
327 if (!space
->is_active
) {
328 is_write_unlock(space
);
329 if (tentry
) ite_free(tentry
);
330 return KERN_INVALID_TASK
;
334 * If we are under the table cutoff,
335 * there are usually four cases:
336 * 1) The entry is reserved (index 0)
337 * 2) The entry is inuse, for the same name
338 * 3) The entry is inuse, for a different name
339 * 4) The entry is free
340 * For a task with a "fast" IPC space, we disallow
341 * cases 1) and 3), because ports cannot be renamed.
343 if (index
< space
->is_table_size
) {
344 ipc_entry_t table
= space
->is_table
;
346 entry
= &table
[index
];
349 assert(!IE_BITS_TYPE(entry
->ie_bits
));
350 assert(!IE_BITS_GEN(entry
->ie_bits
));
351 } else if (IE_BITS_TYPE(entry
->ie_bits
)) {
352 if (IE_BITS_GEN(entry
->ie_bits
) == gen
) {
358 mach_port_index_t free_index
, next_index
;
361 * Rip the entry out of the free list.
365 (next_index
= table
[free_index
].ie_next
)
367 free_index
= next_index
)
370 table
[free_index
].ie_next
=
371 table
[next_index
].ie_next
;
373 entry
->ie_bits
= gen
;
374 entry
->ie_request
= 0;
377 assert(entry
->ie_object
== IO_NULL
);
378 if (is_fast_space(space
))
387 * In a fast space, ipc_entry_alloc_name may be
388 * used only to add a right to a port name already
389 * known in this space.
391 if (is_fast_space(space
)) {
392 is_write_unlock(space
);
398 * Before trying to allocate any memory,
399 * check if the entry already exists in the tree.
400 * This avoids spurious resource errors.
401 * The splay tree makes a subsequent lookup/insert
402 * of the same name cheap, so this costs little.
405 if ((space
->is_tree_total
> 0) &&
406 ((tentry2
= ipc_splay_tree_lookup(&space
->is_tree
, name
))
408 assert(tentry2
->ite_space
== space
);
409 assert(IE_BITS_TYPE(tentry2
->ite_bits
));
411 *entryp
= &tentry2
->ite_entry
;
412 if (tentry
) ite_free(tentry
);
416 its
= space
->is_table_next
;
419 * Check if the table should be grown.
421 * Note that if space->is_table_size == its->its_size,
422 * then we won't ever try to grow the table.
424 * Note that we are optimistically assuming that name
425 * doesn't collide with any existing names. (So if
426 * it were entered into the tree, is_tree_small would
427 * be incremented.) This is OK, because even in that
428 * case, we don't lose memory by growing the table.
430 if ((space
->is_table_size
<= index
) &&
431 (index
< its
->its_size
) &&
432 (((its
->its_size
- space
->is_table_size
) *
433 sizeof(struct ipc_entry
)) <
434 ((space
->is_tree_small
+ 1) *
435 sizeof(struct ipc_tree_entry
)))) {
439 * Can save space by growing the table.
440 * Because the space will be unlocked,
444 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
445 assert(kr
!= KERN_NO_SPACE
);
446 if (kr
!= KERN_SUCCESS
) {
447 /* space is unlocked */
448 if (tentry
) ite_free(tentry
);
456 * If a splay-tree entry was allocated previously,
457 * go ahead and insert it into the tree.
460 if (tentry
!= ITE_NULL
) {
462 space
->is_tree_total
++;
464 if (index
< space
->is_table_size
) {
465 entry
= &space
->is_table
[index
];
466 entry
->ie_bits
|= IE_BITS_COLLISION
;
467 } else if ((index
< its
->its_size
) &&
468 !ipc_entry_tree_collision(space
, name
))
469 space
->is_tree_small
++;
471 ipc_splay_tree_insert(&space
->is_tree
, name
, tentry
);
472 tentry
->ite_bits
= 0;
473 tentry
->ite_request
= 0;
474 tentry
->ite_object
= IO_NULL
;
475 tentry
->ite_space
= space
;
476 *entryp
= &tentry
->ite_entry
;
481 * Allocate a tree entry and try again.
484 is_write_unlock(space
);
485 tentry
= ite_alloc();
486 if (tentry
== ITE_NULL
)
487 return KERN_RESOURCE_SHORTAGE
;
488 is_write_lock(space
);
493 * Routine: ipc_entry_dealloc
495 * Deallocates an entry from a space.
497 * The space must be write-locked throughout.
498 * The space must be active.
504 mach_port_name_t name
,
508 ipc_entry_num_t size
;
509 mach_port_index_t index
;
511 assert(space
->is_active
);
512 assert(entry
->ie_object
== IO_NULL
);
513 assert(entry
->ie_request
== 0);
515 index
= MACH_PORT_INDEX(name
);
516 table
= space
->is_table
;
517 size
= space
->is_table_size
;
519 if (is_fast_space(space
)) {
520 assert(index
< size
);
521 assert(entry
== &table
[index
]);
522 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
523 assert(!(entry
->ie_bits
& IE_BITS_COLLISION
));
524 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
525 entry
->ie_next
= table
->ie_next
;
526 table
->ie_next
= index
;
531 if ((index
< size
) && (entry
== &table
[index
])) {
532 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
534 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
535 struct ipc_splay_tree small
, collisions
;
536 ipc_tree_entry_t tentry
;
537 mach_port_name_t tname
;
541 /* must move an entry from tree to table */
543 ipc_splay_tree_split(&space
->is_tree
,
544 MACH_PORT_MAKE(index
+1, 0),
546 ipc_splay_tree_split(&collisions
,
547 MACH_PORT_MAKE(index
, 0),
550 pick
= ipc_splay_tree_pick(&collisions
,
553 assert(MACH_PORT_INDEX(tname
) == index
);
555 entry
->ie_object
= obj
= tentry
->ite_object
;
556 entry
->ie_bits
= tentry
->ite_bits
|MACH_PORT_GEN(tname
);
557 entry
->ie_request
= tentry
->ite_request
;
559 assert(tentry
->ite_space
== space
);
561 if (IE_BITS_TYPE(tentry
->ite_bits
)==MACH_PORT_TYPE_SEND
) {
562 ipc_hash_global_delete(space
, obj
,
564 ipc_hash_local_insert(space
, obj
,
568 ipc_splay_tree_delete(&collisions
, tname
, tentry
);
570 assert(space
->is_tree_total
> 0);
571 space
->is_tree_total
--;
573 /* check if collision bit should still be on */
575 pick
= ipc_splay_tree_pick(&collisions
,
578 entry
->ie_bits
|= IE_BITS_COLLISION
;
579 ipc_splay_tree_join(&space
->is_tree
,
583 ipc_splay_tree_join(&space
->is_tree
, &small
);
586 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
587 entry
->ie_next
= table
->ie_next
;
588 table
->ie_next
= index
;
592 ipc_tree_entry_t tentry
= (ipc_tree_entry_t
) entry
;
594 assert(tentry
->ite_space
== space
);
596 ipc_splay_tree_delete(&space
->is_tree
, name
, tentry
);
598 assert(space
->is_tree_total
> 0);
599 space
->is_tree_total
--;
602 ipc_entry_t ientry
= &table
[index
];
604 assert(ientry
->ie_bits
& IE_BITS_COLLISION
);
606 if (!ipc_entry_tree_collision(space
, name
))
607 ientry
->ie_bits
&= ~IE_BITS_COLLISION
;
609 } else if ((index
< space
->is_table_next
->its_size
) &&
610 !ipc_entry_tree_collision(space
, name
)) {
612 assert(space
->is_tree_small
> 0);
614 space
->is_tree_small
--;
620 * Routine: ipc_entry_grow_table
622 * Grows the table in a space.
624 * The space must be write-locked and active before.
625 * If successful, it is also returned locked.
628 * KERN_SUCCESS Grew the table.
629 * KERN_SUCCESS Somebody else grew the table.
630 * KERN_SUCCESS The space died.
631 * KERN_NO_SPACE Table has maximum size already.
632 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
636 ipc_entry_grow_table(
638 ipc_table_elems_t target_size
)
640 ipc_entry_num_t osize
, size
, nsize
, psize
;
643 boolean_t reallocated
=FALSE
;
645 ipc_entry_t otable
, table
;
646 ipc_table_size_t oits
, its
, nits
;
647 mach_port_index_t i
, free_index
;
649 assert(space
->is_active
);
651 if (space
->is_growing
) {
653 * Somebody else is growing the table.
654 * We just wait for them to finish.
657 is_write_sleep(space
);
661 otable
= space
->is_table
;
663 its
= space
->is_table_next
;
664 size
= its
->its_size
;
667 * Since is_table_next points to the next natural size
668 * we can identify the current size entry.
671 osize
= oits
->its_size
;
674 * If there is no target size, then the new size is simply
675 * specified by is_table_next. If there is a target
676 * size, then search for the next entry.
678 if (target_size
!= ITS_SIZE_NONE
) {
679 if (target_size
<= osize
) {
680 is_write_unlock(space
);
685 while ((psize
!= size
) && (target_size
> size
)) {
688 size
= its
->its_size
;
691 is_write_unlock(space
);
692 return KERN_NO_SPACE
;
697 is_write_unlock(space
);
698 return KERN_NO_SPACE
;
702 nsize
= nits
->its_size
;
704 assert((osize
< size
) && (size
<= nsize
));
707 * OK, we'll attempt to grow the table.
708 * The realloc requires that the old table
709 * remain in existence.
712 space
->is_growing
= TRUE
;
713 is_write_unlock(space
);
715 if (it_entries_reallocable(oits
)) {
716 table
= it_entries_realloc(oits
, otable
, its
);
720 table
= it_entries_alloc(its
);
723 is_write_lock(space
);
724 space
->is_growing
= FALSE
;
727 * We need to do a wakeup on the space,
728 * to rouse waiting threads. We defer
729 * this until the space is unlocked,
730 * because we don't want them to spin.
733 if (table
== IE_NULL
) {
734 is_write_unlock(space
);
735 thread_wakeup((event_t
) space
);
736 return KERN_RESOURCE_SHORTAGE
;
739 if (!space
->is_active
) {
741 * The space died while it was unlocked.
744 is_write_unlock(space
);
745 thread_wakeup((event_t
) space
);
746 it_entries_free(its
, table
);
747 is_write_lock(space
);
751 assert(space
->is_table
== otable
);
752 assert((space
->is_table_next
== its
) ||
753 (target_size
!= ITS_SIZE_NONE
));
754 assert(space
->is_table_size
== osize
);
756 space
->is_table
= table
;
757 space
->is_table_size
= size
;
758 space
->is_table_next
= nits
;
761 * If we did a realloc, it remapped the data.
762 * Otherwise we copy by hand first. Then we have
763 * to zero the new part and the old local hash
767 (void) memcpy((void *) table
, (const void *) otable
,
768 osize
* (sizeof(struct ipc_entry
)));
770 for (i
= 0; i
< osize
; i
++)
771 table
[i
].ie_index
= 0;
773 (void) memset((void *) (table
+ osize
) , 0,
774 ((size
- osize
) * (sizeof(struct ipc_entry
))));
777 * Put old entries into the reverse hash table.
779 for (i
= 0; i
< osize
; i
++) {
780 ipc_entry_t entry
= &table
[i
];
782 if (IE_BITS_TYPE(entry
->ie_bits
)==MACH_PORT_TYPE_SEND
) {
783 ipc_hash_local_insert(space
, entry
->ie_object
,
789 * If there are entries in the splay tree,
790 * then we have work to do:
791 * 1) transfer entries to the table
792 * 2) update is_tree_small
794 assert(!is_fast_space(space
) || space
->is_tree_total
== 0);
795 if (space
->is_tree_total
> 0) {
796 mach_port_index_t index
;
798 struct ipc_splay_tree ignore
;
799 struct ipc_splay_tree move
;
800 struct ipc_splay_tree small
;
801 ipc_entry_num_t nosmall
;
802 ipc_tree_entry_t tentry
;
805 * The splay tree divides into four regions,
806 * based on the index of the entries:
807 * 1) 0 <= index < osize
808 * 2) osize <= index < size
809 * 3) size <= index < nsize
812 * Entries in the first part are ignored.
813 * Entries in the second part, that don't
814 * collide, are moved into the table.
815 * Entries in the third part, that don't
816 * collide, are counted for is_tree_small.
817 * Entries in the fourth part are ignored.
820 ipc_splay_tree_split(&space
->is_tree
,
821 MACH_PORT_MAKE(nsize
, 0),
823 ipc_splay_tree_split(&small
,
824 MACH_PORT_MAKE(size
, 0),
826 ipc_splay_tree_split(&move
,
827 MACH_PORT_MAKE(osize
, 0),
830 /* move entries into the table */
832 for (tentry
= ipc_splay_traverse_start(&move
);
834 tentry
= ipc_splay_traverse_next(&move
, delete)) {
836 mach_port_name_t name
;
838 mach_port_type_t type
;
839 ipc_entry_bits_t bits
;
843 name
= tentry
->ite_name
;
844 gen
= MACH_PORT_GEN(name
);
845 index
= MACH_PORT_INDEX(name
);
847 assert(tentry
->ite_space
== space
);
848 assert((osize
<= index
) && (index
< size
));
850 entry
= &table
[index
];
851 bits
= entry
->ie_bits
;
852 if (IE_BITS_TYPE(bits
)) {
853 assert(IE_BITS_GEN(bits
) != gen
);
854 entry
->ie_bits
|= IE_BITS_COLLISION
;
859 bits
= tentry
->ite_bits
;
860 type
= IE_BITS_TYPE(bits
);
861 assert(type
!= MACH_PORT_TYPE_NONE
);
863 entry
->ie_bits
= bits
| gen
;
864 entry
->ie_request
= tentry
->ite_request
;
865 entry
->ie_object
= obj
= tentry
->ite_object
;
867 if (type
== MACH_PORT_TYPE_SEND
) {
868 ipc_hash_global_delete(space
, obj
,
870 ipc_hash_local_insert(space
, obj
,
873 space
->is_tree_total
--;
876 ipc_splay_traverse_finish(&move
);
878 /* count entries for is_tree_small */
880 nosmall
= 0; index
= 0;
881 for (tentry
= ipc_splay_traverse_start(&small
);
883 tentry
= ipc_splay_traverse_next(&small
, FALSE
)) {
884 mach_port_index_t nindex
;
886 nindex
= MACH_PORT_INDEX(tentry
->ite_name
);
888 if (nindex
!= index
) {
893 ipc_splay_traverse_finish(&small
);
895 assert(nosmall
<= (nsize
- size
));
896 assert(nosmall
<= space
->is_tree_total
);
897 space
->is_tree_small
= nosmall
;
899 /* put the splay tree back together */
901 ipc_splay_tree_join(&space
->is_tree
, &small
);
902 ipc_splay_tree_join(&space
->is_tree
, &move
);
903 ipc_splay_tree_join(&space
->is_tree
, &ignore
);
907 * Add entries in the new part which still aren't used
908 * to the free list. Add them in reverse order,
909 * and set the generation number to -1, so that
910 * early allocations produce "natural" names.
913 free_index
= table
[0].ie_next
;
914 for (i
= size
-1; i
>= osize
; --i
) {
915 ipc_entry_t entry
= &table
[i
];
917 if (entry
->ie_bits
== 0) {
918 entry
->ie_bits
= IE_BITS_GEN_MASK
;
919 entry
->ie_next
= free_index
;
923 table
[0].ie_next
= free_index
;
926 * Now we need to free the old table.
927 * If the space dies or grows while unlocked,
928 * then we can quit here.
930 is_write_unlock(space
);
931 thread_wakeup((event_t
) space
);
933 it_entries_free(oits
, otable
);
934 is_write_lock(space
);
935 if (!space
->is_active
|| (space
->is_table_next
!= nits
))
939 * We might have moved enough entries from
940 * the splay tree into the table that
941 * the table can be profitably grown again.
943 * Note that if size == nsize, then
944 * space->is_tree_small == 0.
946 } while ((space
->is_tree_small
> 0) &&
947 (((nsize
- size
) * sizeof(struct ipc_entry
)) <
948 (space
->is_tree_small
* sizeof(struct ipc_tree_entry
))));
955 #include <ddb/db_output.h>
956 #define printf kdbprintf
958 ipc_entry_t
db_ipc_object_by_name(
960 mach_port_name_t name
);
964 db_ipc_object_by_name(
966 mach_port_name_t name
)
968 ipc_space_t space
= task
->itk_space
;
972 entry
= ipc_entry_lookup(space
, name
);
973 if(entry
!= IE_NULL
) {
974 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
975 task
, name
, entry
->ie_object
);
976 return (ipc_entry_t
) entry
->ie_object
;
980 #endif /* MACH_KDB */