2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: ipc/ipc_entry.c
57 * Primitive functions to manipulate translation entries.
61 #include <mach_debug.h>
63 #include <mach/kern_return.h>
64 #include <mach/port.h>
65 #include <kern/assert.h>
66 #include <kern/sched_prim.h>
67 #include <kern/zalloc.h>
68 #include <kern/misc_protos.h>
70 #include <kern/task.h>
73 #include <ipc/ipc_entry.h>
74 #include <ipc/ipc_space.h>
75 #include <ipc/ipc_splay.h>
76 #include <ipc/ipc_object.h>
77 #include <ipc/ipc_hash.h>
78 #include <ipc/ipc_table.h>
79 #include <ipc/ipc_port.h>
82 zone_t ipc_tree_entry_zone
;
87 * Forward declarations
89 boolean_t
ipc_entry_tree_collision(
91 mach_port_name_t name
);
94 * Routine: ipc_entry_tree_collision
96 * Checks if "name" collides with an allocated name
97 * in the space's tree. That is, returns TRUE
98 * if the splay tree contains a name with the same
101 * The space is locked (read or write) and active.
105 ipc_entry_tree_collision(
107 mach_port_name_t name
)
109 mach_port_index_t index
;
110 mach_port_name_t lower
, upper
;
112 assert(space
->is_active
);
115 * Check if we collide with the next smaller name
116 * or the next larger name.
119 ipc_splay_tree_bounds(&space
->is_tree
, name
, &lower
, &upper
);
121 index
= MACH_PORT_INDEX(name
);
122 return (((lower
!= ~0) && (MACH_PORT_INDEX(lower
) == index
)) ||
123 ((upper
!= 0) && (MACH_PORT_INDEX(upper
) == index
)));
127 * Routine: ipc_entry_lookup
129 * Searches for an entry, given its name.
131 * The space must be read or write locked throughout.
132 * The space must be active.
138 mach_port_name_t name
)
140 mach_port_index_t index
;
143 assert(space
->is_active
);
146 index
= MACH_PORT_INDEX(name
);
148 * If space is fast, we assume no splay tree and name within table
149 * bounds, but still check generation numbers (if enabled) and
150 * look for null entries.
152 if (is_fast_space(space
)) {
153 entry
= &space
->is_table
[index
];
154 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
) ||
155 IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
159 if (index
< space
->is_table_size
) {
160 entry
= &space
->is_table
[index
];
161 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
))
162 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
163 assert(space
->is_tree_total
> 0);
167 else if (IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
169 } else if (space
->is_tree_total
== 0)
173 entry
= (ipc_entry_t
)
174 ipc_splay_tree_lookup(&space
->is_tree
, name
);
175 /* with sub-space introduction, an entry may appear in */
176 /* the splay tree and yet not show rights for this subspace */
177 if(entry
!= IE_NULL
) {
178 if(!(IE_BITS_TYPE(entry
->ie_bits
)))
183 assert((entry
== IE_NULL
) || IE_BITS_TYPE(entry
->ie_bits
));
188 * Routine: ipc_entry_get
190 * Tries to allocate an entry out of the space.
192 * The space is write-locked and active throughout.
193 * An object may be locked. Will not allocate memory.
195 * KERN_SUCCESS A free entry was found.
196 * KERN_NO_SPACE No entry allocated.
202 mach_port_name_t
*namep
,
206 mach_port_index_t first_free
;
207 ipc_entry_t free_entry
;
209 assert(space
->is_active
);
212 table
= space
->is_table
;
213 first_free
= table
->ie_next
;
216 return KERN_NO_SPACE
;
218 free_entry
= &table
[first_free
];
219 table
->ie_next
= free_entry
->ie_next
;
223 * Initialize the new entry. We need only
224 * increment the generation number and clear ie_request.
227 mach_port_name_t new_name
;
230 gen
= IE_BITS_NEW_GEN(free_entry
->ie_bits
);
231 free_entry
->ie_bits
= gen
;
232 free_entry
->ie_request
= 0;
235 * The new name can't be MACH_PORT_NULL because index
236 * is non-zero. It can't be MACH_PORT_DEAD because
237 * the table isn't allowed to grow big enough.
238 * (See comment in ipc/ipc_table.h.)
240 new_name
= MACH_PORT_MAKE(first_free
, gen
);
241 assert(MACH_PORT_VALID(new_name
));
245 assert(free_entry
->ie_object
== IO_NULL
);
247 *entryp
= free_entry
;
252 * Routine: ipc_entry_alloc
254 * Allocate an entry out of the space.
256 * The space is not locked before, but it is write-locked after
257 * if the call is successful. May allocate memory.
259 * KERN_SUCCESS An entry was allocated.
260 * KERN_INVALID_TASK The space is dead.
261 * KERN_NO_SPACE No room for an entry in the space.
262 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
268 mach_port_name_t
*namep
,
273 is_write_lock(space
);
276 if (!space
->is_active
) {
277 is_write_unlock(space
);
278 return KERN_INVALID_TASK
;
281 kr
= ipc_entry_get(space
, namep
, entryp
);
282 if (kr
== KERN_SUCCESS
)
285 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
286 if (kr
!= KERN_SUCCESS
)
287 return kr
; /* space is unlocked */
292 * Routine: ipc_entry_alloc_name
294 * Allocates/finds an entry with a specific name.
295 * If an existing entry is returned, its type will be nonzero.
297 * The space is not locked before, but it is write-locked after
298 * if the call is successful. May allocate memory.
300 * KERN_SUCCESS Found existing entry with same name.
301 * KERN_SUCCESS Allocated a new entry.
302 * KERN_INVALID_TASK The space is dead.
303 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
307 ipc_entry_alloc_name(
309 mach_port_name_t name
,
312 mach_port_index_t index
= MACH_PORT_INDEX(name
);
313 mach_port_gen_t gen
= MACH_PORT_GEN(name
);
314 ipc_tree_entry_t tentry
= ITE_NULL
;
316 assert(MACH_PORT_VALID(name
));
319 is_write_lock(space
);
323 ipc_tree_entry_t tentry2
;
324 ipc_table_size_t its
;
326 if (!space
->is_active
) {
327 is_write_unlock(space
);
328 if (tentry
) ite_free(tentry
);
329 return KERN_INVALID_TASK
;
333 * If we are under the table cutoff,
334 * there are usually four cases:
335 * 1) The entry is reserved (index 0)
336 * 2) The entry is inuse, for the same name
337 * 3) The entry is inuse, for a different name
338 * 4) The entry is free
339 * For a task with a "fast" IPC space, we disallow
340 * cases 1) and 3), because ports cannot be renamed.
342 if (index
< space
->is_table_size
) {
343 ipc_entry_t table
= space
->is_table
;
345 entry
= &table
[index
];
348 assert(!IE_BITS_TYPE(entry
->ie_bits
));
349 assert(!IE_BITS_GEN(entry
->ie_bits
));
350 } else if (IE_BITS_TYPE(entry
->ie_bits
)) {
351 if (IE_BITS_GEN(entry
->ie_bits
) == gen
) {
357 mach_port_index_t free_index
, next_index
;
360 * Rip the entry out of the free list.
364 (next_index
= table
[free_index
].ie_next
)
366 free_index
= next_index
)
369 table
[free_index
].ie_next
=
370 table
[next_index
].ie_next
;
372 entry
->ie_bits
= gen
;
373 entry
->ie_request
= 0;
376 assert(entry
->ie_object
== IO_NULL
);
377 if (is_fast_space(space
))
386 * In a fast space, ipc_entry_alloc_name may be
387 * used only to add a right to a port name already
388 * known in this space.
390 if (is_fast_space(space
)) {
391 is_write_unlock(space
);
397 * Before trying to allocate any memory,
398 * check if the entry already exists in the tree.
399 * This avoids spurious resource errors.
400 * The splay tree makes a subsequent lookup/insert
401 * of the same name cheap, so this costs little.
404 if ((space
->is_tree_total
> 0) &&
405 ((tentry2
= ipc_splay_tree_lookup(&space
->is_tree
, name
))
407 assert(tentry2
->ite_space
== space
);
408 assert(IE_BITS_TYPE(tentry2
->ite_bits
));
410 *entryp
= &tentry2
->ite_entry
;
411 if (tentry
) ite_free(tentry
);
415 its
= space
->is_table_next
;
418 * Check if the table should be grown.
420 * Note that if space->is_table_size == its->its_size,
421 * then we won't ever try to grow the table.
423 * Note that we are optimistically assuming that name
424 * doesn't collide with any existing names. (So if
425 * it were entered into the tree, is_tree_small would
426 * be incremented.) This is OK, because even in that
427 * case, we don't lose memory by growing the table.
429 if ((space
->is_table_size
<= index
) &&
430 (index
< its
->its_size
) &&
431 (((its
->its_size
- space
->is_table_size
) *
432 sizeof(struct ipc_entry
)) <
433 ((space
->is_tree_small
+ 1) *
434 sizeof(struct ipc_tree_entry
)))) {
438 * Can save space by growing the table.
439 * Because the space will be unlocked,
443 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
444 assert(kr
!= KERN_NO_SPACE
);
445 if (kr
!= KERN_SUCCESS
) {
446 /* space is unlocked */
447 if (tentry
) ite_free(tentry
);
455 * If a splay-tree entry was allocated previously,
456 * go ahead and insert it into the tree.
459 if (tentry
!= ITE_NULL
) {
461 space
->is_tree_total
++;
463 if (index
< space
->is_table_size
) {
464 entry
= &space
->is_table
[index
];
465 entry
->ie_bits
|= IE_BITS_COLLISION
;
466 } else if ((index
< its
->its_size
) &&
467 !ipc_entry_tree_collision(space
, name
))
468 space
->is_tree_small
++;
470 ipc_splay_tree_insert(&space
->is_tree
, name
, tentry
);
471 tentry
->ite_bits
= 0;
472 tentry
->ite_request
= 0;
473 tentry
->ite_object
= IO_NULL
;
474 tentry
->ite_space
= space
;
475 *entryp
= &tentry
->ite_entry
;
480 * Allocate a tree entry and try again.
483 is_write_unlock(space
);
484 tentry
= ite_alloc();
485 if (tentry
== ITE_NULL
)
486 return KERN_RESOURCE_SHORTAGE
;
487 is_write_lock(space
);
492 * Routine: ipc_entry_dealloc
494 * Deallocates an entry from a space.
496 * The space must be write-locked throughout.
497 * The space must be active.
503 mach_port_name_t name
,
507 ipc_entry_num_t size
;
508 mach_port_index_t index
;
510 assert(space
->is_active
);
511 assert(entry
->ie_object
== IO_NULL
);
512 assert(entry
->ie_request
== 0);
514 index
= MACH_PORT_INDEX(name
);
515 table
= space
->is_table
;
516 size
= space
->is_table_size
;
518 if (is_fast_space(space
)) {
519 assert(index
< size
);
520 assert(entry
== &table
[index
]);
521 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
522 assert(!(entry
->ie_bits
& IE_BITS_COLLISION
));
523 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
524 entry
->ie_next
= table
->ie_next
;
525 table
->ie_next
= index
;
530 if ((index
< size
) && (entry
== &table
[index
])) {
531 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
533 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
534 struct ipc_splay_tree small
, collisions
;
535 ipc_tree_entry_t tentry
;
536 mach_port_name_t tname
;
538 ipc_entry_bits_t bits
;
541 /* must move an entry from tree to table */
543 ipc_splay_tree_split(&space
->is_tree
,
544 MACH_PORT_MAKE(index
+1, 0),
546 ipc_splay_tree_split(&collisions
,
547 MACH_PORT_MAKE(index
, 0),
550 pick
= ipc_splay_tree_pick(&collisions
,
553 assert(MACH_PORT_INDEX(tname
) == index
);
555 entry
->ie_object
= obj
= tentry
->ite_object
;
556 entry
->ie_bits
= tentry
->ite_bits
|MACH_PORT_GEN(tname
);
557 entry
->ie_request
= tentry
->ite_request
;
559 assert(tentry
->ite_space
== space
);
561 if (IE_BITS_TYPE(tentry
->ite_bits
)==MACH_PORT_TYPE_SEND
) {
562 ipc_hash_global_delete(space
, obj
,
564 ipc_hash_local_insert(space
, obj
,
568 ipc_splay_tree_delete(&collisions
, tname
, tentry
);
570 assert(space
->is_tree_total
> 0);
571 space
->is_tree_total
--;
573 /* check if collision bit should still be on */
575 pick
= ipc_splay_tree_pick(&collisions
,
578 entry
->ie_bits
|= IE_BITS_COLLISION
;
579 ipc_splay_tree_join(&space
->is_tree
,
583 ipc_splay_tree_join(&space
->is_tree
, &small
);
586 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
587 entry
->ie_next
= table
->ie_next
;
588 table
->ie_next
= index
;
592 ipc_tree_entry_t tentry
= (ipc_tree_entry_t
) entry
;
594 assert(tentry
->ite_space
== space
);
596 ipc_splay_tree_delete(&space
->is_tree
, name
, tentry
);
598 assert(space
->is_tree_total
> 0);
599 space
->is_tree_total
--;
602 ipc_entry_t ientry
= &table
[index
];
604 assert(ientry
->ie_bits
& IE_BITS_COLLISION
);
606 if (!ipc_entry_tree_collision(space
, name
))
607 ientry
->ie_bits
&= ~IE_BITS_COLLISION
;
609 } else if ((index
< space
->is_table_next
->its_size
) &&
610 !ipc_entry_tree_collision(space
, name
)) {
612 assert(space
->is_tree_small
> 0);
614 space
->is_tree_small
--;
620 * Routine: ipc_entry_grow_table
622 * Grows the table in a space.
624 * The space must be write-locked and active before.
625 * If successful, it is also returned locked.
628 * KERN_SUCCESS Grew the table.
629 * KERN_SUCCESS Somebody else grew the table.
630 * KERN_SUCCESS The space died.
631 * KERN_NO_SPACE Table has maximum size already.
632 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
636 ipc_entry_grow_table(
640 ipc_entry_num_t osize
, size
, nsize
, psize
;
643 boolean_t reallocated
=FALSE
;
645 ipc_entry_t otable
, table
;
646 ipc_table_size_t oits
, its
, nits
;
647 mach_port_index_t i
, free_index
;
649 assert(space
->is_active
);
651 if (space
->is_growing
) {
653 * Somebody else is growing the table.
654 * We just wait for them to finish.
657 assert_wait((event_t
) space
, THREAD_UNINT
);
658 is_write_unlock(space
);
659 thread_block((void (*)(void)) 0);
660 is_write_lock(space
);
664 otable
= space
->is_table
;
666 its
= space
->is_table_next
;
667 size
= its
->its_size
;
670 * Since is_table_next points to the next natural size
671 * we can identify the current size entry.
674 osize
= oits
->its_size
;
677 * If there is no target size, then the new size is simply
678 * specified by is_table_next. If there is a target
679 * size, then search for the next entry.
681 if (target_size
!= ITS_SIZE_NONE
) {
682 if (target_size
<= osize
) {
683 is_write_unlock(space
);
688 while ((psize
!= size
) && (target_size
> size
)) {
691 size
= its
->its_size
;
694 is_write_unlock(space
);
695 return KERN_NO_SPACE
;
699 nsize
= nits
->its_size
;
702 is_write_unlock(space
);
703 return KERN_NO_SPACE
;
706 assert((osize
< size
) && (size
<= nsize
));
709 * OK, we'll attempt to grow the table.
710 * The realloc requires that the old table
711 * remain in existence.
714 space
->is_growing
= TRUE
;
715 is_write_unlock(space
);
717 if (it_entries_reallocable(oits
)) {
718 table
= it_entries_realloc(oits
, otable
, its
);
722 table
= it_entries_alloc(its
);
725 is_write_lock(space
);
726 space
->is_growing
= FALSE
;
729 * We need to do a wakeup on the space,
730 * to rouse waiting threads. We defer
731 * this until the space is unlocked,
732 * because we don't want them to spin.
735 if (table
== IE_NULL
) {
736 is_write_unlock(space
);
737 thread_wakeup((event_t
) space
);
738 return KERN_RESOURCE_SHORTAGE
;
741 if (!space
->is_active
) {
743 * The space died while it was unlocked.
746 is_write_unlock(space
);
747 thread_wakeup((event_t
) space
);
748 it_entries_free(its
, table
);
749 is_write_lock(space
);
753 assert(space
->is_table
== otable
);
754 assert((space
->is_table_next
== its
) ||
755 (target_size
!= ITS_SIZE_NONE
));
756 assert(space
->is_table_size
== osize
);
758 space
->is_table
= table
;
759 space
->is_table_size
= size
;
760 space
->is_table_next
= nits
;
763 * If we did a realloc, it remapped the data.
764 * Otherwise we copy by hand first. Then we have
765 * to zero the new part and the old local hash
769 (void) memcpy((void *) table
, (const void *) otable
,
770 osize
* (sizeof(struct ipc_entry
)));
772 for (i
= 0; i
< osize
; i
++)
773 table
[i
].ie_index
= 0;
775 (void) memset((void *) (table
+ osize
) , 0,
776 ((size
- osize
) * (sizeof(struct ipc_entry
))));
779 * Put old entries into the reverse hash table.
781 for (i
= 0; i
< osize
; i
++) {
782 ipc_entry_t entry
= &table
[i
];
784 if (IE_BITS_TYPE(entry
->ie_bits
)==MACH_PORT_TYPE_SEND
) {
785 ipc_hash_local_insert(space
, entry
->ie_object
,
791 * If there are entries in the splay tree,
792 * then we have work to do:
793 * 1) transfer entries to the table
794 * 2) update is_tree_small
796 assert(!is_fast_space(space
) || space
->is_tree_total
== 0);
797 if (space
->is_tree_total
> 0) {
798 mach_port_index_t index
;
800 struct ipc_splay_tree ignore
;
801 struct ipc_splay_tree move
;
802 struct ipc_splay_tree small
;
803 ipc_entry_num_t nosmall
;
804 ipc_tree_entry_t tentry
;
807 * The splay tree divides into four regions,
808 * based on the index of the entries:
809 * 1) 0 <= index < osize
810 * 2) osize <= index < size
811 * 3) size <= index < nsize
814 * Entries in the first part are ignored.
815 * Entries in the second part, that don't
816 * collide, are moved into the table.
817 * Entries in the third part, that don't
818 * collide, are counted for is_tree_small.
819 * Entries in the fourth part are ignored.
822 ipc_splay_tree_split(&space
->is_tree
,
823 MACH_PORT_MAKE(nsize
, 0),
825 ipc_splay_tree_split(&small
,
826 MACH_PORT_MAKE(size
, 0),
828 ipc_splay_tree_split(&move
,
829 MACH_PORT_MAKE(osize
, 0),
832 /* move entries into the table */
834 for (tentry
= ipc_splay_traverse_start(&move
);
836 tentry
= ipc_splay_traverse_next(&move
, delete)) {
838 mach_port_name_t name
;
840 mach_port_type_t type
;
841 ipc_entry_bits_t bits
;
845 name
= tentry
->ite_name
;
846 gen
= MACH_PORT_GEN(name
);
847 index
= MACH_PORT_INDEX(name
);
849 assert(tentry
->ite_space
== space
);
850 assert((osize
<= index
) && (index
< size
));
852 entry
= &table
[index
];
853 bits
= entry
->ie_bits
;
854 if (IE_BITS_TYPE(bits
)) {
855 assert(IE_BITS_GEN(bits
) != gen
);
856 entry
->ie_bits
|= IE_BITS_COLLISION
;
861 bits
= tentry
->ite_bits
;
862 type
= IE_BITS_TYPE(bits
);
863 assert(type
!= MACH_PORT_TYPE_NONE
);
865 entry
->ie_bits
= bits
| gen
;
866 entry
->ie_request
= tentry
->ite_request
;
867 entry
->ie_object
= obj
= tentry
->ite_object
;
869 if (type
== MACH_PORT_TYPE_SEND
) {
870 ipc_hash_global_delete(space
, obj
,
872 ipc_hash_local_insert(space
, obj
,
875 space
->is_tree_total
--;
878 ipc_splay_traverse_finish(&move
);
880 /* count entries for is_tree_small */
882 nosmall
= 0; index
= 0;
883 for (tentry
= ipc_splay_traverse_start(&small
);
885 tentry
= ipc_splay_traverse_next(&small
, FALSE
)) {
886 mach_port_index_t nindex
;
888 nindex
= MACH_PORT_INDEX(tentry
->ite_name
);
890 if (nindex
!= index
) {
895 ipc_splay_traverse_finish(&small
);
897 assert(nosmall
<= (nsize
- size
));
898 assert(nosmall
<= space
->is_tree_total
);
899 space
->is_tree_small
= nosmall
;
901 /* put the splay tree back together */
903 ipc_splay_tree_join(&space
->is_tree
, &small
);
904 ipc_splay_tree_join(&space
->is_tree
, &move
);
905 ipc_splay_tree_join(&space
->is_tree
, &ignore
);
909 * Add entries in the new part which still aren't used
910 * to the free list. Add them in reverse order,
911 * and set the generation number to -1, so that
912 * early allocations produce "natural" names.
915 free_index
= table
[0].ie_next
;
916 for (i
= size
-1; i
>= osize
; --i
) {
917 ipc_entry_t entry
= &table
[i
];
919 if (entry
->ie_bits
== 0) {
920 entry
->ie_bits
= IE_BITS_GEN_MASK
;
921 entry
->ie_next
= free_index
;
925 table
[0].ie_next
= free_index
;
928 * Now we need to free the old table.
929 * If the space dies or grows while unlocked,
930 * then we can quit here.
932 is_write_unlock(space
);
933 thread_wakeup((event_t
) space
);
935 it_entries_free(oits
, otable
);
936 is_write_lock(space
);
937 if (!space
->is_active
|| (space
->is_table_next
!= nits
))
941 * We might have moved enough entries from
942 * the splay tree into the table that
943 * the table can be profitably grown again.
945 * Note that if size == nsize, then
946 * space->is_tree_small == 0.
948 } while ((space
->is_tree_small
> 0) &&
949 (((nsize
- size
) * sizeof(struct ipc_entry
)) <
950 (space
->is_tree_small
* sizeof(struct ipc_tree_entry
))));
957 #include <ddb/db_output.h>
958 #define printf kdbprintf
960 ipc_entry_t
db_ipc_object_by_name(
962 mach_port_name_t name
);
966 db_ipc_object_by_name(
968 mach_port_name_t name
)
970 ipc_space_t space
= task
->itk_space
;
974 entry
= ipc_entry_lookup(space
, name
);
975 if(entry
!= IE_NULL
) {
976 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
977 task
, name
, entry
->ie_object
);
978 return (ipc_entry_t
) entry
->ie_object
;
982 #endif /* MACH_KDB */