2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: ipc/ipc_entry.c
63 * Primitive functions to manipulate translation entries.
67 #include <mach_debug.h>
69 #include <mach/kern_return.h>
70 #include <mach/port.h>
71 #include <kern/assert.h>
72 #include <kern/sched_prim.h>
73 #include <kern/zalloc.h>
74 #include <kern/misc_protos.h>
76 #include <kern/task.h>
79 #include <ipc/ipc_entry.h>
80 #include <ipc/ipc_space.h>
81 #include <ipc/ipc_splay.h>
82 #include <ipc/ipc_object.h>
83 #include <ipc/ipc_hash.h>
84 #include <ipc/ipc_table.h>
85 #include <ipc/ipc_port.h>
88 zone_t ipc_tree_entry_zone
;
93 * Forward declarations
95 boolean_t
ipc_entry_tree_collision(
97 mach_port_name_t name
);
100 * Routine: ipc_entry_tree_collision
102 * Checks if "name" collides with an allocated name
103 * in the space's tree. That is, returns TRUE
104 * if the splay tree contains a name with the same
107 * The space is locked (read or write) and active.
111 ipc_entry_tree_collision(
113 mach_port_name_t name
)
115 mach_port_index_t index
;
116 mach_port_name_t lower
, upper
;
118 assert(space
->is_active
);
121 * Check if we collide with the next smaller name
122 * or the next larger name.
125 ipc_splay_tree_bounds(&space
->is_tree
, name
, &lower
, &upper
);
127 index
= MACH_PORT_INDEX(name
);
128 return (((lower
!= (mach_port_name_t
)~0) &&
129 (MACH_PORT_INDEX(lower
) == index
)) ||
130 ((upper
!= 0) && (MACH_PORT_INDEX(upper
) == index
)));
134 * Routine: ipc_entry_lookup
136 * Searches for an entry, given its name.
138 * The space must be read or write locked throughout.
139 * The space must be active.
145 mach_port_name_t name
)
147 mach_port_index_t index
;
150 assert(space
->is_active
);
153 index
= MACH_PORT_INDEX(name
);
155 * If space is fast, we assume no splay tree and name within table
156 * bounds, but still check generation numbers (if enabled) and
157 * look for null entries.
159 if (is_fast_space(space
)) {
160 entry
= &space
->is_table
[index
];
161 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
) ||
162 IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
166 if (index
< space
->is_table_size
) {
167 entry
= &space
->is_table
[index
];
168 if (IE_BITS_GEN(entry
->ie_bits
) != MACH_PORT_GEN(name
))
169 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
170 assert(space
->is_tree_total
> 0);
174 else if (IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
)
176 } else if (space
->is_tree_total
== 0)
180 entry
= (ipc_entry_t
)
181 ipc_splay_tree_lookup(&space
->is_tree
, name
);
182 /* with sub-space introduction, an entry may appear in */
183 /* the splay tree and yet not show rights for this subspace */
184 if(entry
!= IE_NULL
) {
185 if(!(IE_BITS_TYPE(entry
->ie_bits
)))
190 assert((entry
== IE_NULL
) || IE_BITS_TYPE(entry
->ie_bits
));
195 * Routine: ipc_entry_get
197 * Tries to allocate an entry out of the space.
199 * The space is write-locked and active throughout.
200 * An object may be locked. Will not allocate memory.
202 * KERN_SUCCESS A free entry was found.
203 * KERN_NO_SPACE No entry allocated.
209 mach_port_name_t
*namep
,
213 mach_port_index_t first_free
;
214 ipc_entry_t free_entry
;
216 assert(space
->is_active
);
219 table
= space
->is_table
;
220 first_free
= table
->ie_next
;
223 return KERN_NO_SPACE
;
225 free_entry
= &table
[first_free
];
226 table
->ie_next
= free_entry
->ie_next
;
230 * Initialize the new entry. We need only
231 * increment the generation number and clear ie_request.
234 mach_port_name_t new_name
;
237 gen
= IE_BITS_NEW_GEN(free_entry
->ie_bits
);
238 free_entry
->ie_bits
= gen
;
239 free_entry
->ie_request
= IE_REQ_NONE
;
242 * The new name can't be MACH_PORT_NULL because index
243 * is non-zero. It can't be MACH_PORT_DEAD because
244 * the table isn't allowed to grow big enough.
245 * (See comment in ipc/ipc_table.h.)
247 new_name
= MACH_PORT_MAKE(first_free
, gen
);
248 assert(MACH_PORT_VALID(new_name
));
252 assert(free_entry
->ie_object
== IO_NULL
);
254 *entryp
= free_entry
;
259 * Routine: ipc_entry_alloc
261 * Allocate an entry out of the space.
263 * The space is not locked before, but it is write-locked after
264 * if the call is successful. May allocate memory.
266 * KERN_SUCCESS An entry was allocated.
267 * KERN_INVALID_TASK The space is dead.
268 * KERN_NO_SPACE No room for an entry in the space.
269 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
275 mach_port_name_t
*namep
,
280 is_write_lock(space
);
283 if (!space
->is_active
) {
284 is_write_unlock(space
);
285 return KERN_INVALID_TASK
;
288 kr
= ipc_entry_get(space
, namep
, entryp
);
289 if (kr
== KERN_SUCCESS
)
292 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
293 if (kr
!= KERN_SUCCESS
)
294 return kr
; /* space is unlocked */
299 * Routine: ipc_entry_alloc_name
301 * Allocates/finds an entry with a specific name.
302 * If an existing entry is returned, its type will be nonzero.
304 * The space is not locked before, but it is write-locked after
305 * if the call is successful. May allocate memory.
307 * KERN_SUCCESS Found existing entry with same name.
308 * KERN_SUCCESS Allocated a new entry.
309 * KERN_INVALID_TASK The space is dead.
310 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
314 ipc_entry_alloc_name(
316 mach_port_name_t name
,
319 mach_port_index_t index
= MACH_PORT_INDEX(name
);
320 mach_port_gen_t gen
= MACH_PORT_GEN(name
);
321 ipc_tree_entry_t tentry
= ITE_NULL
;
323 assert(MACH_PORT_VALID(name
));
326 is_write_lock(space
);
330 ipc_tree_entry_t tentry2
;
331 ipc_table_size_t its
;
333 if (!space
->is_active
) {
334 is_write_unlock(space
);
335 if (tentry
) ite_free(tentry
);
336 return KERN_INVALID_TASK
;
340 * If we are under the table cutoff,
341 * there are usually four cases:
342 * 1) The entry is reserved (index 0)
343 * 2) The entry is inuse, for the same name
344 * 3) The entry is inuse, for a different name
345 * 4) The entry is free
346 * For a task with a "fast" IPC space, we disallow
347 * cases 1) and 3), because ports cannot be renamed.
349 if (index
< space
->is_table_size
) {
350 ipc_entry_t table
= space
->is_table
;
352 entry
= &table
[index
];
355 assert(!IE_BITS_TYPE(entry
->ie_bits
));
356 assert(!IE_BITS_GEN(entry
->ie_bits
));
357 } else if (IE_BITS_TYPE(entry
->ie_bits
)) {
358 if (IE_BITS_GEN(entry
->ie_bits
) == gen
) {
364 mach_port_index_t free_index
, next_index
;
367 * Rip the entry out of the free list.
371 (next_index
= table
[free_index
].ie_next
)
373 free_index
= next_index
)
376 table
[free_index
].ie_next
=
377 table
[next_index
].ie_next
;
379 entry
->ie_bits
= gen
;
380 entry
->ie_request
= IE_REQ_NONE
;
383 assert(entry
->ie_object
== IO_NULL
);
384 if (is_fast_space(space
))
393 * In a fast space, ipc_entry_alloc_name may be
394 * used only to add a right to a port name already
395 * known in this space.
397 if (is_fast_space(space
)) {
398 is_write_unlock(space
);
404 * Before trying to allocate any memory,
405 * check if the entry already exists in the tree.
406 * This avoids spurious resource errors.
407 * The splay tree makes a subsequent lookup/insert
408 * of the same name cheap, so this costs little.
411 if ((space
->is_tree_total
> 0) &&
412 ((tentry2
= ipc_splay_tree_lookup(&space
->is_tree
, name
))
414 assert(tentry2
->ite_space
== space
);
415 assert(IE_BITS_TYPE(tentry2
->ite_bits
));
417 *entryp
= &tentry2
->ite_entry
;
418 if (tentry
) ite_free(tentry
);
422 its
= space
->is_table_next
;
425 * Check if the table should be grown.
427 * Note that if space->is_table_size == its->its_size,
428 * then we won't ever try to grow the table.
430 * Note that we are optimistically assuming that name
431 * doesn't collide with any existing names. (So if
432 * it were entered into the tree, is_tree_small would
433 * be incremented.) This is OK, because even in that
434 * case, we don't lose memory by growing the table.
436 if ((space
->is_table_size
<= index
) &&
437 (index
< its
->its_size
) &&
438 (((its
->its_size
- space
->is_table_size
) *
439 sizeof(struct ipc_entry
)) <
440 ((space
->is_tree_small
+ 1) *
441 sizeof(struct ipc_tree_entry
)))) {
445 * Can save space by growing the table.
446 * Because the space will be unlocked,
450 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
451 assert(kr
!= KERN_NO_SPACE
);
452 if (kr
!= KERN_SUCCESS
) {
453 /* space is unlocked */
454 if (tentry
) ite_free(tentry
);
462 * If a splay-tree entry was allocated previously,
463 * go ahead and insert it into the tree.
466 if (tentry
!= ITE_NULL
) {
468 space
->is_tree_total
++;
470 if (index
< space
->is_table_size
) {
471 entry
= &space
->is_table
[index
];
472 entry
->ie_bits
|= IE_BITS_COLLISION
;
473 } else if ((index
< its
->its_size
) &&
474 !ipc_entry_tree_collision(space
, name
))
475 space
->is_tree_small
++;
477 ipc_splay_tree_insert(&space
->is_tree
, name
, tentry
);
478 tentry
->ite_bits
= 0;
479 tentry
->ite_request
= 0;
480 tentry
->ite_object
= IO_NULL
;
481 tentry
->ite_space
= space
;
482 *entryp
= &tentry
->ite_entry
;
487 * Allocate a tree entry and try again.
490 is_write_unlock(space
);
491 tentry
= ite_alloc();
492 if (tentry
== ITE_NULL
)
493 return KERN_RESOURCE_SHORTAGE
;
494 is_write_lock(space
);
499 * Routine: ipc_entry_dealloc
501 * Deallocates an entry from a space.
503 * The space must be write-locked throughout.
504 * The space must be active.
510 mach_port_name_t name
,
514 ipc_entry_num_t size
;
515 mach_port_index_t index
;
517 assert(space
->is_active
);
518 assert(entry
->ie_object
== IO_NULL
);
519 assert(entry
->ie_request
== IE_REQ_NONE
);
522 if (entry
->ie_request
!= IE_REQ_NONE
)
523 panic("ipc_entry_dealloc()\n");
526 index
= MACH_PORT_INDEX(name
);
527 table
= space
->is_table
;
528 size
= space
->is_table_size
;
530 if (is_fast_space(space
)) {
531 assert(index
< size
);
532 assert(entry
== &table
[index
]);
533 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
534 assert(!(entry
->ie_bits
& IE_BITS_COLLISION
));
535 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
536 entry
->ie_next
= table
->ie_next
;
537 table
->ie_next
= index
;
542 if ((index
< size
) && (entry
== &table
[index
])) {
543 assert(IE_BITS_GEN(entry
->ie_bits
) == MACH_PORT_GEN(name
));
545 if (entry
->ie_bits
& IE_BITS_COLLISION
) {
546 struct ipc_splay_tree small
, collisions
;
547 ipc_tree_entry_t tentry
;
548 mach_port_name_t tname
;
552 /* must move an entry from tree to table */
554 ipc_splay_tree_split(&space
->is_tree
,
555 MACH_PORT_MAKE(index
+1, 0),
557 ipc_splay_tree_split(&collisions
,
558 MACH_PORT_MAKE(index
, 0),
561 pick
= ipc_splay_tree_pick(&collisions
,
564 assert(MACH_PORT_INDEX(tname
) == index
);
566 entry
->ie_object
= obj
= tentry
->ite_object
;
567 entry
->ie_bits
= tentry
->ite_bits
|MACH_PORT_GEN(tname
);
568 entry
->ie_request
= tentry
->ite_request
;
570 assert(tentry
->ite_space
== space
);
572 if (IE_BITS_TYPE(tentry
->ite_bits
)==MACH_PORT_TYPE_SEND
) {
573 ipc_hash_global_delete(space
, obj
,
575 ipc_hash_local_insert(space
, obj
,
579 ipc_splay_tree_delete(&collisions
, tname
, tentry
);
581 assert(space
->is_tree_total
> 0);
582 space
->is_tree_total
--;
584 /* check if collision bit should still be on */
586 pick
= ipc_splay_tree_pick(&collisions
,
589 entry
->ie_bits
|= IE_BITS_COLLISION
;
590 ipc_splay_tree_join(&space
->is_tree
,
594 ipc_splay_tree_join(&space
->is_tree
, &small
);
597 entry
->ie_bits
&= IE_BITS_GEN_MASK
;
598 entry
->ie_next
= table
->ie_next
;
599 table
->ie_next
= index
;
603 ipc_tree_entry_t tentry
= (ipc_tree_entry_t
) entry
;
605 assert(tentry
->ite_space
== space
);
607 ipc_splay_tree_delete(&space
->is_tree
, name
, tentry
);
609 assert(space
->is_tree_total
> 0);
610 space
->is_tree_total
--;
613 ipc_entry_t ientry
= &table
[index
];
615 assert(ientry
->ie_bits
& IE_BITS_COLLISION
);
617 if (!ipc_entry_tree_collision(space
, name
))
618 ientry
->ie_bits
&= ~IE_BITS_COLLISION
;
620 } else if ((index
< space
->is_table_next
->its_size
) &&
621 !ipc_entry_tree_collision(space
, name
)) {
623 assert(space
->is_tree_small
> 0);
625 space
->is_tree_small
--;
631 * Routine: ipc_entry_grow_table
633 * Grows the table in a space.
635 * The space must be write-locked and active before.
636 * If successful, it is also returned locked.
639 * KERN_SUCCESS Grew the table.
640 * KERN_SUCCESS Somebody else grew the table.
641 * KERN_SUCCESS The space died.
642 * KERN_NO_SPACE Table has maximum size already.
643 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
647 ipc_entry_grow_table(
649 ipc_table_elems_t target_size
)
651 ipc_entry_num_t osize
, size
, nsize
, psize
;
654 boolean_t reallocated
=FALSE
;
656 ipc_entry_t otable
, table
;
657 ipc_table_size_t oits
, its
, nits
;
658 mach_port_index_t i
, free_index
;
660 assert(space
->is_active
);
662 if (space
->is_growing
) {
664 * Somebody else is growing the table.
665 * We just wait for them to finish.
668 is_write_sleep(space
);
672 otable
= space
->is_table
;
674 its
= space
->is_table_next
;
675 size
= its
->its_size
;
678 * Since is_table_next points to the next natural size
679 * we can identify the current size entry.
682 osize
= oits
->its_size
;
685 * If there is no target size, then the new size is simply
686 * specified by is_table_next. If there is a target
687 * size, then search for the next entry.
689 if (target_size
!= ITS_SIZE_NONE
) {
690 if (target_size
<= osize
) {
691 is_write_unlock(space
);
696 while ((psize
!= size
) && (target_size
> size
)) {
699 size
= its
->its_size
;
702 is_write_unlock(space
);
703 return KERN_NO_SPACE
;
708 is_write_unlock(space
);
709 return KERN_NO_SPACE
;
713 nsize
= nits
->its_size
;
715 assert((osize
< size
) && (size
<= nsize
));
718 * OK, we'll attempt to grow the table.
719 * The realloc requires that the old table
720 * remain in existence.
723 space
->is_growing
= TRUE
;
724 is_write_unlock(space
);
726 if (it_entries_reallocable(oits
)) {
727 table
= it_entries_realloc(oits
, otable
, its
);
731 table
= it_entries_alloc(its
);
734 is_write_lock(space
);
735 space
->is_growing
= FALSE
;
738 * We need to do a wakeup on the space,
739 * to rouse waiting threads. We defer
740 * this until the space is unlocked,
741 * because we don't want them to spin.
744 if (table
== IE_NULL
) {
745 is_write_unlock(space
);
746 thread_wakeup((event_t
) space
);
747 return KERN_RESOURCE_SHORTAGE
;
750 if (!space
->is_active
) {
752 * The space died while it was unlocked.
755 is_write_unlock(space
);
756 thread_wakeup((event_t
) space
);
757 it_entries_free(its
, table
);
758 is_write_lock(space
);
762 assert(space
->is_table
== otable
);
763 assert((space
->is_table_next
== its
) ||
764 (target_size
!= ITS_SIZE_NONE
));
765 assert(space
->is_table_size
== osize
);
767 space
->is_table
= table
;
768 space
->is_table_size
= size
;
769 space
->is_table_next
= nits
;
772 * If we did a realloc, it remapped the data.
773 * Otherwise we copy by hand first. Then we have
774 * to zero the new part and the old local hash
778 (void) memcpy((void *) table
, (const void *) otable
,
779 osize
* (sizeof(struct ipc_entry
)));
781 for (i
= 0; i
< osize
; i
++)
782 table
[i
].ie_index
= 0;
784 (void) memset((void *) (table
+ osize
) , 0,
785 ((size
- osize
) * (sizeof(struct ipc_entry
))));
788 * Put old entries into the reverse hash table.
790 for (i
= 0; i
< osize
; i
++) {
791 ipc_entry_t entry
= &table
[i
];
793 if (IE_BITS_TYPE(entry
->ie_bits
)==MACH_PORT_TYPE_SEND
) {
794 ipc_hash_local_insert(space
, entry
->ie_object
,
800 * If there are entries in the splay tree,
801 * then we have work to do:
802 * 1) transfer entries to the table
803 * 2) update is_tree_small
805 assert(!is_fast_space(space
) || space
->is_tree_total
== 0);
806 if (space
->is_tree_total
> 0) {
807 mach_port_index_t index
;
809 struct ipc_splay_tree ignore
;
810 struct ipc_splay_tree move
;
811 struct ipc_splay_tree small
;
812 ipc_entry_num_t nosmall
;
813 ipc_tree_entry_t tentry
;
816 * The splay tree divides into four regions,
817 * based on the index of the entries:
818 * 1) 0 <= index < osize
819 * 2) osize <= index < size
820 * 3) size <= index < nsize
823 * Entries in the first part are ignored.
824 * Entries in the second part, that don't
825 * collide, are moved into the table.
826 * Entries in the third part, that don't
827 * collide, are counted for is_tree_small.
828 * Entries in the fourth part are ignored.
831 ipc_splay_tree_split(&space
->is_tree
,
832 MACH_PORT_MAKE(nsize
, 0),
834 ipc_splay_tree_split(&small
,
835 MACH_PORT_MAKE(size
, 0),
837 ipc_splay_tree_split(&move
,
838 MACH_PORT_MAKE(osize
, 0),
841 /* move entries into the table */
843 for (tentry
= ipc_splay_traverse_start(&move
);
845 tentry
= ipc_splay_traverse_next(&move
, delete)) {
847 mach_port_name_t name
;
849 mach_port_type_t type
;
850 ipc_entry_bits_t bits
;
854 name
= tentry
->ite_name
;
855 gen
= MACH_PORT_GEN(name
);
856 index
= MACH_PORT_INDEX(name
);
858 assert(tentry
->ite_space
== space
);
859 assert((osize
<= index
) && (index
< size
));
861 entry
= &table
[index
];
862 bits
= entry
->ie_bits
;
863 if (IE_BITS_TYPE(bits
)) {
864 assert(IE_BITS_GEN(bits
) != gen
);
865 entry
->ie_bits
|= IE_BITS_COLLISION
;
870 bits
= tentry
->ite_bits
;
871 type
= IE_BITS_TYPE(bits
);
872 assert(type
!= MACH_PORT_TYPE_NONE
);
874 entry
->ie_bits
= bits
| gen
;
875 entry
->ie_request
= tentry
->ite_request
;
876 entry
->ie_object
= obj
= tentry
->ite_object
;
878 if (type
== MACH_PORT_TYPE_SEND
) {
879 ipc_hash_global_delete(space
, obj
,
881 ipc_hash_local_insert(space
, obj
,
884 space
->is_tree_total
--;
887 ipc_splay_traverse_finish(&move
);
889 /* count entries for is_tree_small */
891 nosmall
= 0; index
= 0;
892 for (tentry
= ipc_splay_traverse_start(&small
);
894 tentry
= ipc_splay_traverse_next(&small
, FALSE
)) {
895 mach_port_index_t nindex
;
897 nindex
= MACH_PORT_INDEX(tentry
->ite_name
);
899 if (nindex
!= index
) {
904 ipc_splay_traverse_finish(&small
);
906 assert(nosmall
<= (nsize
- size
));
907 assert(nosmall
<= space
->is_tree_total
);
908 space
->is_tree_small
= nosmall
;
910 /* put the splay tree back together */
912 ipc_splay_tree_join(&space
->is_tree
, &small
);
913 ipc_splay_tree_join(&space
->is_tree
, &move
);
914 ipc_splay_tree_join(&space
->is_tree
, &ignore
);
918 * Add entries in the new part which still aren't used
919 * to the free list. Add them in reverse order,
920 * and set the generation number to -1, so that
921 * early allocations produce "natural" names.
924 free_index
= table
[0].ie_next
;
925 for (i
= size
-1; i
>= osize
; --i
) {
926 ipc_entry_t entry
= &table
[i
];
928 if (entry
->ie_bits
== 0) {
929 entry
->ie_bits
= IE_BITS_GEN_MASK
;
930 entry
->ie_next
= free_index
;
934 table
[0].ie_next
= free_index
;
937 * Now we need to free the old table.
938 * If the space dies or grows while unlocked,
939 * then we can quit here.
941 is_write_unlock(space
);
942 thread_wakeup((event_t
) space
);
944 it_entries_free(oits
, otable
);
945 is_write_lock(space
);
946 if (!space
->is_active
|| (space
->is_table_next
!= nits
))
950 * We might have moved enough entries from
951 * the splay tree into the table that
952 * the table can be profitably grown again.
954 * Note that if size == nsize, then
955 * space->is_tree_small == 0.
957 } while ((space
->is_tree_small
> 0) &&
958 (((nsize
- size
) * sizeof(struct ipc_entry
)) <
959 (space
->is_tree_small
* sizeof(struct ipc_tree_entry
))));
966 #include <ddb/db_output.h>
967 #define printf kdbprintf
969 ipc_entry_t
db_ipc_object_by_name(
971 mach_port_name_t name
);
975 db_ipc_object_by_name(
977 mach_port_name_t name
)
979 ipc_space_t space
= task
->itk_space
;
983 entry
= ipc_entry_lookup(space
, name
);
984 if(entry
!= IE_NULL
) {
985 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
986 task
, name
, entry
->ie_object
);
987 return (ipc_entry_t
) entry
->ie_object
;
991 #endif /* MACH_KDB */