2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: ipc/ipc_entry.c
60 * Primitive functions to manipulate translation entries.
64 #include <mach_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/port.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/zalloc.h>
71 #include <kern/misc_protos.h>
73 #include <kern/task.h>
76 #include <ipc/ipc_entry.h>
77 #include <ipc/ipc_space.h>
78 #include <ipc/ipc_splay.h>
79 #include <ipc/ipc_object.h>
80 #include <ipc/ipc_hash.h>
81 #include <ipc/ipc_table.h>
82 #include <ipc/ipc_port.h>
85 zone_t ipc_tree_entry_zone
;
90 * Forward declarations
92 boolean_t
ipc_entry_tree_collision(
94 mach_port_name_t name
);
97 * Routine: ipc_entry_tree_collision
99 * Checks if "name" collides with an allocated name
100 * in the space's tree. That is, returns TRUE
101 * if the splay tree contains a name with the same
104 * The space is locked (read or write) and active.
108 ipc_entry_tree_collision(
110 mach_port_name_t name
)
112 mach_port_index_t index
;
113 mach_port_name_t lower
, upper
;
115 assert(space
->is_active
);
118 * Check if we collide with the next smaller name
119 * or the next larger name.
122 ipc_splay_tree_bounds(&space
->is_tree
, name
, &lower
, &upper
);
124 index
= MACH_PORT_INDEX(name
);
125 return (((lower
!= ~0) && (MACH_PORT_INDEX(lower
) == index
)) ||
126 ((upper
!= 0) && (MACH_PORT_INDEX(upper
) == index
)));
130 * Routine: ipc_entry_lookup
132 * Searches for an entry, given its name.
134 * The space must be read or write locked throughout.
135 * The space must be active.
141 mach_port_name_t name
)
143 mach_port_index_t index
;
146 assert(space
->is_active
);
149 index
= MACH_PORT_INDEX(name
);
151 * If space is fast, we assume no splay tree and name within table
152 * bounds, but still check generation numbers (if enabled) and
153 * look for null entries.
155 if (is_fast_space(space
)) {
156 entry
= &space
->is_table
[index
];
157 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
) ||
158 IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
162 if (index
< space
->is_table_size
) {
163 entry
= &space
->is_table
[index
];
164 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
))
165 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
166 assert(space
->is_tree_total
> 0);
170 else if (IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
172 } else if (space
->is_tree_total
== 0)
176 entry
= (ipc_entry_t
)
177 ipc_splay_tree_lookup(&space
->is_tree
, name
);
178 /* with sub-space introduction, an entry may appear in */
179 /* the splay tree and yet not show rights for this subspace */
180 if(entry
!= IE_NULL
) {
181 if(!(IE_BITS_TYPE(entry
->ie_bits
)))
186 assert((entry
== IE_NULL
) || IE_BITS_TYPE(entry
->ie_bits
));
191 * Routine: ipc_entry_get
193 * Tries to allocate an entry out of the space.
195 * The space is write-locked and active throughout.
196 * An object may be locked. Will not allocate memory.
198 * KERN_SUCCESS A free entry was found.
199 * KERN_NO_SPACE No entry allocated.
205 mach_port_name_t
*namep
,
209 mach_port_index_t first_free
;
210 ipc_entry_t free_entry
;
212 assert(space
->is_active
);
215 table
= space
->is_table
;
216 first_free
= table
->ie_next
;
219 return KERN_NO_SPACE
;
221 free_entry
= &table
[first_free
];
222 table
->ie_next
= free_entry
->ie_next
;
226 * Initialize the new entry. We need only
227 * increment the generation number and clear ie_request.
230 mach_port_name_t new_name
;
233 gen
= IE_BITS_NEW_GEN(free_entry
->ie_bits
);
234 free_entry
->ie_bits
= gen
;
235 free_entry
->ie_request
= 0;
238 * The new name can't be MACH_PORT_NULL because index
239 * is non-zero. It can't be MACH_PORT_DEAD because
240 * the table isn't allowed to grow big enough.
241 * (See comment in ipc/ipc_table.h.)
243 new_name
= MACH_PORT_MAKE(first_free
, gen
);
244 assert(MACH_PORT_VALID(new_name
));
248 assert(free_entry
->ie_object
== IO_NULL
);
250 *entryp
= free_entry
;
255 * Routine: ipc_entry_alloc
257 * Allocate an entry out of the space.
259 * The space is not locked before, but it is write-locked after
260 * if the call is successful. May allocate memory.
262 * KERN_SUCCESS An entry was allocated.
263 * KERN_INVALID_TASK The space is dead.
264 * KERN_NO_SPACE No room for an entry in the space.
265 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
271 mach_port_name_t
*namep
,
276 is_write_lock(space
);
279 if (!space
->is_active
) {
280 is_write_unlock(space
);
281 return KERN_INVALID_TASK
;
284 kr
= ipc_entry_get(space
, namep
, entryp
);
285 if (kr
== KERN_SUCCESS
)
288 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
289 if (kr
!= KERN_SUCCESS
)
290 return kr
; /* space is unlocked */
295 * Routine: ipc_entry_alloc_name
297 * Allocates/finds an entry with a specific name.
298 * If an existing entry is returned, its type will be nonzero.
300 * The space is not locked before, but it is write-locked after
301 * if the call is successful. May allocate memory.
303 * KERN_SUCCESS Found existing entry with same name.
304 * KERN_SUCCESS Allocated a new entry.
305 * KERN_INVALID_TASK The space is dead.
306 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
310 ipc_entry_alloc_name(
312 mach_port_name_t name
,
315 mach_port_index_t index
= MACH_PORT_INDEX(name
);
316 mach_port_gen_t gen
= MACH_PORT_GEN(name
);
317 ipc_tree_entry_t tentry
= ITE_NULL
;
319 assert(MACH_PORT_VALID(name
));
322 is_write_lock(space
);
326 ipc_tree_entry_t tentry2
;
327 ipc_table_size_t its
;
329 if (!space
->is_active
) {
330 is_write_unlock(space
);
331 if (tentry
) ite_free(tentry
);
332 return KERN_INVALID_TASK
;
336 * If we are under the table cutoff,
337 * there are usually four cases:
338 * 1) The entry is reserved (index 0)
339 * 2) The entry is inuse, for the same name
340 * 3) The entry is inuse, for a different name
341 * 4) The entry is free
342 * For a task with a "fast" IPC space, we disallow
343 * cases 1) and 3), because ports cannot be renamed.
345 if (index
< space
->is_table_size
) {
346 ipc_entry_t table
= space
->is_table
;
348 entry
= &table
[index
];
351 assert(!IE_BITS_TYPE(entry
->ie_bits
));
352 assert(!IE_BITS_GEN(entry
->ie_bits
));
353 } else if (IE_BITS_TYPE(entry
->ie_bits
)) {
354 if (IE_BITS_GEN(entry
->ie_bits
) == gen
) {
360 mach_port_index_t free_index
, next_index
;
363 * Rip the entry out of the free list.
367 (next_index
= table
[free_index
].ie_next
)
369 free_index
= next_index
)
372 table
[free_index
].ie_next
=
373 table
[next_index
].ie_next
;
375 entry
->ie_bits
= gen
;
376 entry
->ie_request
= 0;
379 assert(entry
->ie_object
== IO_NULL
);
380 if (is_fast_space(space
))
389 * In a fast space, ipc_entry_alloc_name may be
390 * used only to add a right to a port name already
391 * known in this space.
393 if (is_fast_space(space
)) {
394 is_write_unlock(space
);
400 * Before trying to allocate any memory,
401 * check if the entry already exists in the tree.
402 * This avoids spurious resource errors.
403 * The splay tree makes a subsequent lookup/insert
404 * of the same name cheap, so this costs little.
407 if ((space
->is_tree_total
> 0) &&
408 ((tentry2
= ipc_splay_tree_lookup(&space
->is_tree
, name
))
410 assert(tentry2
->ite_space
== space
);
411 assert(IE_BITS_TYPE(tentry2
->ite_bits
));
413 *entryp
= &tentry2
->ite_entry
;
414 if (tentry
) ite_free(tentry
);
418 its
= space
->is_table_next
;
421 * Check if the table should be grown.
423 * Note that if space->is_table_size == its->its_size,
424 * then we won't ever try to grow the table.
426 * Note that we are optimistically assuming that name
427 * doesn't collide with any existing names. (So if
428 * it were entered into the tree, is_tree_small would
429 * be incremented.) This is OK, because even in that
430 * case, we don't lose memory by growing the table.
432 if ((space
->is_table_size
<= index
) &&
433 (index
< its
->its_size
) &&
434 (((its
->its_size
- space
->is_table_size
) *
435 sizeof(struct ipc_entry
)) <
436 ((space
->is_tree_small
+ 1) *
437 sizeof(struct ipc_tree_entry
)))) {
441 * Can save space by growing the table.
442 * Because the space will be unlocked,
446 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
447 assert(kr
!= KERN_NO_SPACE
);
448 if (kr
!= KERN_SUCCESS
) {
449 /* space is unlocked */
450 if (tentry
) ite_free(tentry
);
458 * If a splay-tree entry was allocated previously,
459 * go ahead and insert it into the tree.
462 if (tentry
!= ITE_NULL
) {
464 space
->is_tree_total
++;
466 if (index
< space
->is_table_size
) {
467 entry
= &space
->is_table
[index
];
468 entry
->ie_bits
|= IE_BITS_COLLISION
;
469 } else if ((index
< its
->its_size
) &&
470 !ipc_entry_tree_collision(space
, name
))
471 space
->is_tree_small
++;
473 ipc_splay_tree_insert(&space
->is_tree
, name
, tentry
);
474 tentry
->ite_bits
= 0;
475 tentry
->ite_request
= 0;
476 tentry
->ite_object
= IO_NULL
;
477 tentry
->ite_space
= space
;
478 *entryp
= &tentry
->ite_entry
;
483 * Allocate a tree entry and try again.
486 is_write_unlock(space
);
487 tentry
= ite_alloc();
488 if (tentry
== ITE_NULL
)
489 return KERN_RESOURCE_SHORTAGE
;
490 is_write_lock(space
);
495 * Routine: ipc_entry_dealloc
497 * Deallocates an entry from a space.
499 * The space must be write-locked throughout.
500 * The space must be active.
506 mach_port_name_t name
,
510 ipc_entry_num_t size
;
511 mach_port_index_t index
;
513 assert(space
->is_active
);
514 assert(entry
->ie_object
== IO_NULL
);
515 assert(entry
->ie_request
== 0);
517 index
= MACH_PORT_INDEX(name
);
518 table
= space
->is_table
;
519 size
= space
->is_table_size
;
521 if (is_fast_space(space
)) {
522 assert(index
< size
);
523 assert(entry
== &table
[index
]);
524 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
525 assert(!(entry
->ie_bits
& IE_BITS_COLLISION
));
526 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
527 entry
->ie_next
= table
->ie_next
;
528 table
->ie_next
= index
;
533 if ((index
< size
) && (entry
== &table
[index
])) {
534 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
536 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
537 struct ipc_splay_tree small
, collisions
;
538 ipc_tree_entry_t tentry
;
539 mach_port_name_t tname
;
541 ipc_entry_bits_t bits
;
544 /* must move an entry from tree to table */
546 ipc_splay_tree_split(&space
->is_tree
,
547 MACH_PORT_MAKE(index
+1, 0),
549 ipc_splay_tree_split(&collisions
,
550 MACH_PORT_MAKE(index
, 0),
553 pick
= ipc_splay_tree_pick(&collisions
,
556 assert(MACH_PORT_INDEX(tname
) == index
);
558 entry
->ie_object
= obj
= tentry
->ite_object
;
559 entry
->ie_bits
= tentry
->ite_bits
|MACH_PORT_GEN(tname
);
560 entry
->ie_request
= tentry
->ite_request
;
562 assert(tentry
->ite_space
== space
);
564 if (IE_BITS_TYPE(tentry
->ite_bits
)==MACH_PORT_TYPE_SEND
) {
565 ipc_hash_global_delete(space
, obj
,
567 ipc_hash_local_insert(space
, obj
,
571 ipc_splay_tree_delete(&collisions
, tname
, tentry
);
573 assert(space
->is_tree_total
> 0);
574 space
->is_tree_total
--;
576 /* check if collision bit should still be on */
578 pick
= ipc_splay_tree_pick(&collisions
,
581 entry
->ie_bits
|= IE_BITS_COLLISION
;
582 ipc_splay_tree_join(&space
->is_tree
,
586 ipc_splay_tree_join(&space
->is_tree
, &small
);
589 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
590 entry
->ie_next
= table
->ie_next
;
591 table
->ie_next
= index
;
595 ipc_tree_entry_t tentry
= (ipc_tree_entry_t
) entry
;
597 assert(tentry
->ite_space
== space
);
599 ipc_splay_tree_delete(&space
->is_tree
, name
, tentry
);
601 assert(space
->is_tree_total
> 0);
602 space
->is_tree_total
--;
605 ipc_entry_t ientry
= &table
[index
];
607 assert(ientry
->ie_bits
& IE_BITS_COLLISION
);
609 if (!ipc_entry_tree_collision(space
, name
))
610 ientry
->ie_bits
&= ~IE_BITS_COLLISION
;
612 } else if ((index
< space
->is_table_next
->its_size
) &&
613 !ipc_entry_tree_collision(space
, name
)) {
615 assert(space
->is_tree_small
> 0);
617 space
->is_tree_small
--;
623 * Routine: ipc_entry_grow_table
625 * Grows the table in a space.
627 * The space must be write-locked and active before.
628 * If successful, it is also returned locked.
631 * KERN_SUCCESS Grew the table.
632 * KERN_SUCCESS Somebody else grew the table.
633 * KERN_SUCCESS The space died.
634 * KERN_NO_SPACE Table has maximum size already.
635 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
639 ipc_entry_grow_table(
643 ipc_entry_num_t osize
, size
, nsize
, psize
;
646 boolean_t reallocated
=FALSE
;
648 ipc_entry_t otable
, table
;
649 ipc_table_size_t oits
, its
, nits
;
650 mach_port_index_t i
, free_index
;
652 assert(space
->is_active
);
654 if (space
->is_growing
) {
656 * Somebody else is growing the table.
657 * We just wait for them to finish.
660 is_write_sleep(space
);
664 otable
= space
->is_table
;
666 its
= space
->is_table_next
;
667 size
= its
->its_size
;
670 * Since is_table_next points to the next natural size
671 * we can identify the current size entry.
674 osize
= oits
->its_size
;
677 * If there is no target size, then the new size is simply
678 * specified by is_table_next. If there is a target
679 * size, then search for the next entry.
681 if (target_size
!= ITS_SIZE_NONE
) {
682 if (target_size
<= osize
) {
683 is_write_unlock(space
);
688 while ((psize
!= size
) && (target_size
> size
)) {
691 size
= its
->its_size
;
694 is_write_unlock(space
);
695 return KERN_NO_SPACE
;
700 is_write_unlock(space
);
701 return KERN_NO_SPACE
;
705 nsize
= nits
->its_size
;
707 assert((osize
< size
) && (size
<= nsize
));
710 * OK, we'll attempt to grow the table.
711 * The realloc requires that the old table
712 * remain in existence.
715 space
->is_growing
= TRUE
;
716 is_write_unlock(space
);
718 if (it_entries_reallocable(oits
)) {
719 table
= it_entries_realloc(oits
, otable
, its
);
723 table
= it_entries_alloc(its
);
726 is_write_lock(space
);
727 space
->is_growing
= FALSE
;
730 * We need to do a wakeup on the space,
731 * to rouse waiting threads. We defer
732 * this until the space is unlocked,
733 * because we don't want them to spin.
736 if (table
== IE_NULL
) {
737 is_write_unlock(space
);
738 thread_wakeup((event_t
) space
);
739 return KERN_RESOURCE_SHORTAGE
;
742 if (!space
->is_active
) {
744 * The space died while it was unlocked.
747 is_write_unlock(space
);
748 thread_wakeup((event_t
) space
);
749 it_entries_free(its
, table
);
750 is_write_lock(space
);
754 assert(space
->is_table
== otable
);
755 assert((space
->is_table_next
== its
) ||
756 (target_size
!= ITS_SIZE_NONE
));
757 assert(space
->is_table_size
== osize
);
759 space
->is_table
= table
;
760 space
->is_table_size
= size
;
761 space
->is_table_next
= nits
;
764 * If we did a realloc, it remapped the data.
765 * Otherwise we copy by hand first. Then we have
766 * to zero the new part and the old local hash
770 (void) memcpy((void *) table
, (const void *) otable
,
771 osize
* (sizeof(struct ipc_entry
)));
773 for (i
= 0; i
< osize
; i
++)
774 table
[i
].ie_index
= 0;
776 (void) memset((void *) (table
+ osize
) , 0,
777 ((size
- osize
) * (sizeof(struct ipc_entry
))));
780 * Put old entries into the reverse hash table.
782 for (i
= 0; i
< osize
; i
++) {
783 ipc_entry_t entry
= &table
[i
];
785 if (IE_BITS_TYPE(entry
->ie_bits
)==MACH_PORT_TYPE_SEND
) {
786 ipc_hash_local_insert(space
, entry
->ie_object
,
792 * If there are entries in the splay tree,
793 * then we have work to do:
794 * 1) transfer entries to the table
795 * 2) update is_tree_small
797 assert(!is_fast_space(space
) || space
->is_tree_total
== 0);
798 if (space
->is_tree_total
> 0) {
799 mach_port_index_t index
;
801 struct ipc_splay_tree ignore
;
802 struct ipc_splay_tree move
;
803 struct ipc_splay_tree small
;
804 ipc_entry_num_t nosmall
;
805 ipc_tree_entry_t tentry
;
808 * The splay tree divides into four regions,
809 * based on the index of the entries:
810 * 1) 0 <= index < osize
811 * 2) osize <= index < size
812 * 3) size <= index < nsize
815 * Entries in the first part are ignored.
816 * Entries in the second part, that don't
817 * collide, are moved into the table.
818 * Entries in the third part, that don't
819 * collide, are counted for is_tree_small.
820 * Entries in the fourth part are ignored.
823 ipc_splay_tree_split(&space
->is_tree
,
824 MACH_PORT_MAKE(nsize
, 0),
826 ipc_splay_tree_split(&small
,
827 MACH_PORT_MAKE(size
, 0),
829 ipc_splay_tree_split(&move
,
830 MACH_PORT_MAKE(osize
, 0),
833 /* move entries into the table */
835 for (tentry
= ipc_splay_traverse_start(&move
);
837 tentry
= ipc_splay_traverse_next(&move
, delete)) {
839 mach_port_name_t name
;
841 mach_port_type_t type
;
842 ipc_entry_bits_t bits
;
846 name
= tentry
->ite_name
;
847 gen
= MACH_PORT_GEN(name
);
848 index
= MACH_PORT_INDEX(name
);
850 assert(tentry
->ite_space
== space
);
851 assert((osize
<= index
) && (index
< size
));
853 entry
= &table
[index
];
854 bits
= entry
->ie_bits
;
855 if (IE_BITS_TYPE(bits
)) {
856 assert(IE_BITS_GEN(bits
) != gen
);
857 entry
->ie_bits
|= IE_BITS_COLLISION
;
862 bits
= tentry
->ite_bits
;
863 type
= IE_BITS_TYPE(bits
);
864 assert(type
!= MACH_PORT_TYPE_NONE
);
866 entry
->ie_bits
= bits
| gen
;
867 entry
->ie_request
= tentry
->ite_request
;
868 entry
->ie_object
= obj
= tentry
->ite_object
;
870 if (type
== MACH_PORT_TYPE_SEND
) {
871 ipc_hash_global_delete(space
, obj
,
873 ipc_hash_local_insert(space
, obj
,
876 space
->is_tree_total
--;
879 ipc_splay_traverse_finish(&move
);
881 /* count entries for is_tree_small */
883 nosmall
= 0; index
= 0;
884 for (tentry
= ipc_splay_traverse_start(&small
);
886 tentry
= ipc_splay_traverse_next(&small
, FALSE
)) {
887 mach_port_index_t nindex
;
889 nindex
= MACH_PORT_INDEX(tentry
->ite_name
);
891 if (nindex
!= index
) {
896 ipc_splay_traverse_finish(&small
);
898 assert(nosmall
<= (nsize
- size
));
899 assert(nosmall
<= space
->is_tree_total
);
900 space
->is_tree_small
= nosmall
;
902 /* put the splay tree back together */
904 ipc_splay_tree_join(&space
->is_tree
, &small
);
905 ipc_splay_tree_join(&space
->is_tree
, &move
);
906 ipc_splay_tree_join(&space
->is_tree
, &ignore
);
910 * Add entries in the new part which still aren't used
911 * to the free list. Add them in reverse order,
912 * and set the generation number to -1, so that
913 * early allocations produce "natural" names.
916 free_index
= table
[0].ie_next
;
917 for (i
= size
-1; i
>= osize
; --i
) {
918 ipc_entry_t entry
= &table
[i
];
920 if (entry
->ie_bits
== 0) {
921 entry
->ie_bits
= IE_BITS_GEN_MASK
;
922 entry
->ie_next
= free_index
;
926 table
[0].ie_next
= free_index
;
929 * Now we need to free the old table.
930 * If the space dies or grows while unlocked,
931 * then we can quit here.
933 is_write_unlock(space
);
934 thread_wakeup((event_t
) space
);
936 it_entries_free(oits
, otable
);
937 is_write_lock(space
);
938 if (!space
->is_active
|| (space
->is_table_next
!= nits
))
942 * We might have moved enough entries from
943 * the splay tree into the table that
944 * the table can be profitably grown again.
946 * Note that if size == nsize, then
947 * space->is_tree_small == 0.
949 } while ((space
->is_tree_small
> 0) &&
950 (((nsize
- size
) * sizeof(struct ipc_entry
)) <
951 (space
->is_tree_small
* sizeof(struct ipc_tree_entry
))));
958 #include <ddb/db_output.h>
959 #define printf kdbprintf
961 ipc_entry_t
db_ipc_object_by_name(
963 mach_port_name_t name
);
967 db_ipc_object_by_name(
969 mach_port_name_t name
)
971 ipc_space_t space
= task
->itk_space
;
975 entry
= ipc_entry_lookup(space
, name
);
976 if(entry
!= IE_NULL
) {
977 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
978 task
, name
, entry
->ie_object
);
979 return (ipc_entry_t
) entry
->ie_object
;
983 #endif /* MACH_KDB */