]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_entry.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_entry.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: ipc/ipc_entry.c
54 * Author: Rich Draves
55 * Date: 1989
56 *
57 * Primitive functions to manipulate translation entries.
58 */
59
60#include <mach_kdb.h>
61#include <mach_debug.h>
62
63#include <mach/kern_return.h>
64#include <mach/port.h>
65#include <kern/assert.h>
66#include <kern/sched_prim.h>
67#include <kern/zalloc.h>
68#include <kern/misc_protos.h>
69#if MACH_KDB
70#include <kern/task.h>
71#endif
72#include <ipc/port.h>
73#include <ipc/ipc_entry.h>
74#include <ipc/ipc_space.h>
75#include <ipc/ipc_splay.h>
76#include <ipc/ipc_object.h>
77#include <ipc/ipc_hash.h>
78#include <ipc/ipc_table.h>
79#include <ipc/ipc_port.h>
80#include <string.h>
81
82zone_t ipc_tree_entry_zone;
83
84
85
86/*
87 * Forward declarations
88 */
89boolean_t ipc_entry_tree_collision(
90 ipc_space_t space,
91 mach_port_name_t name);
92
93/*
94 * Routine: ipc_entry_tree_collision
95 * Purpose:
96 * Checks if "name" collides with an allocated name
97 * in the space's tree. That is, returns TRUE
98 * if the splay tree contains a name with the same
99 * index as "name".
100 * Conditions:
101 * The space is locked (read or write) and active.
102 */
103
104boolean_t
105ipc_entry_tree_collision(
106 ipc_space_t space,
107 mach_port_name_t name)
108{
109 mach_port_index_t index;
110 mach_port_name_t lower, upper;
111
112 assert(space->is_active);
113
114 /*
115 * Check if we collide with the next smaller name
116 * or the next larger name.
117 */
118
119 ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
120
121 index = MACH_PORT_INDEX(name);
122 return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
123 ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
124}
125
126/*
127 * Routine: ipc_entry_lookup
128 * Purpose:
129 * Searches for an entry, given its name.
130 * Conditions:
131 * The space must be read or write locked throughout.
132 * The space must be active.
133 */
134
135ipc_entry_t
136ipc_entry_lookup(
137 ipc_space_t space,
138 mach_port_name_t name)
139{
140 mach_port_index_t index;
141 ipc_entry_t entry;
142
143 assert(space->is_active);
144
145
146 index = MACH_PORT_INDEX(name);
147 /*
148 * If space is fast, we assume no splay tree and name within table
149 * bounds, but still check generation numbers (if enabled) and
150 * look for null entries.
151 */
152 if (is_fast_space(space)) {
153 entry = &space->is_table[index];
154 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
155 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
156 entry = IE_NULL;
157 }
158 else
159 if (index < space->is_table_size) {
160 entry = &space->is_table[index];
161 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
162 if (entry->ie_bits & IE_BITS_COLLISION) {
163 assert(space->is_tree_total > 0);
164 goto tree_lookup;
165 } else
166 entry = IE_NULL;
167 else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
168 entry = IE_NULL;
169 } else if (space->is_tree_total == 0)
170 entry = IE_NULL;
171 else {
172 tree_lookup:
173 entry = (ipc_entry_t)
174 ipc_splay_tree_lookup(&space->is_tree, name);
175 /* with sub-space introduction, an entry may appear in */
176 /* the splay tree and yet not show rights for this subspace */
177 if(entry != IE_NULL) {
178 if(!(IE_BITS_TYPE(entry->ie_bits)))
179 entry = IE_NULL;
180 }
181 }
182
183 assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
184 return entry;
185}
186
187/*
188 * Routine: ipc_entry_get
189 * Purpose:
190 * Tries to allocate an entry out of the space.
191 * Conditions:
192 * The space is write-locked and active throughout.
193 * An object may be locked. Will not allocate memory.
194 * Returns:
195 * KERN_SUCCESS A free entry was found.
196 * KERN_NO_SPACE No entry allocated.
197 */
198
199kern_return_t
200ipc_entry_get(
201 ipc_space_t space,
202 mach_port_name_t *namep,
203 ipc_entry_t *entryp)
204{
205 ipc_entry_t table;
206 mach_port_index_t first_free;
207 ipc_entry_t free_entry;
208
209 assert(space->is_active);
210
211 {
212 table = space->is_table;
213 first_free = table->ie_next;
214
215 if (first_free == 0)
216 return KERN_NO_SPACE;
217
218 free_entry = &table[first_free];
219 table->ie_next = free_entry->ie_next;
220 }
221
222 /*
223 * Initialize the new entry. We need only
224 * increment the generation number and clear ie_request.
225 */
226 {
227 mach_port_name_t new_name;
228 mach_port_gen_t gen;
229
230 gen = IE_BITS_NEW_GEN(free_entry->ie_bits);
231 free_entry->ie_bits = gen;
232 free_entry->ie_request = 0;
233
234 /*
235 * The new name can't be MACH_PORT_NULL because index
236 * is non-zero. It can't be MACH_PORT_DEAD because
237 * the table isn't allowed to grow big enough.
238 * (See comment in ipc/ipc_table.h.)
239 */
240 new_name = MACH_PORT_MAKE(first_free, gen);
241 assert(MACH_PORT_VALID(new_name));
242 *namep = new_name;
243 }
244
245 assert(free_entry->ie_object == IO_NULL);
246
247 *entryp = free_entry;
248 return KERN_SUCCESS;
249}
250
251/*
252 * Routine: ipc_entry_alloc
253 * Purpose:
254 * Allocate an entry out of the space.
255 * Conditions:
256 * The space is not locked before, but it is write-locked after
257 * if the call is successful. May allocate memory.
258 * Returns:
259 * KERN_SUCCESS An entry was allocated.
260 * KERN_INVALID_TASK The space is dead.
261 * KERN_NO_SPACE No room for an entry in the space.
262 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
263 */
264
265kern_return_t
266ipc_entry_alloc(
267 ipc_space_t space,
268 mach_port_name_t *namep,
269 ipc_entry_t *entryp)
270{
271 kern_return_t kr;
272
273 is_write_lock(space);
274
275 for (;;) {
276 if (!space->is_active) {
277 is_write_unlock(space);
278 return KERN_INVALID_TASK;
279 }
280
281 kr = ipc_entry_get(space, namep, entryp);
282 if (kr == KERN_SUCCESS)
283 return kr;
284
285 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
286 if (kr != KERN_SUCCESS)
287 return kr; /* space is unlocked */
288 }
289}
290
291/*
292 * Routine: ipc_entry_alloc_name
293 * Purpose:
294 * Allocates/finds an entry with a specific name.
295 * If an existing entry is returned, its type will be nonzero.
296 * Conditions:
297 * The space is not locked before, but it is write-locked after
298 * if the call is successful. May allocate memory.
299 * Returns:
300 * KERN_SUCCESS Found existing entry with same name.
301 * KERN_SUCCESS Allocated a new entry.
302 * KERN_INVALID_TASK The space is dead.
303 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
304 */
305
306kern_return_t
307ipc_entry_alloc_name(
308 ipc_space_t space,
309 mach_port_name_t name,
310 ipc_entry_t *entryp)
311{
312 mach_port_index_t index = MACH_PORT_INDEX(name);
313 mach_port_gen_t gen = MACH_PORT_GEN(name);
314 ipc_tree_entry_t tentry = ITE_NULL;
315
316 assert(MACH_PORT_VALID(name));
317
318
319 is_write_lock(space);
320
321 for (;;) {
322 ipc_entry_t entry;
323 ipc_tree_entry_t tentry2;
324 ipc_table_size_t its;
325
326 if (!space->is_active) {
327 is_write_unlock(space);
328 if (tentry) ite_free(tentry);
329 return KERN_INVALID_TASK;
330 }
331
332 /*
333 * If we are under the table cutoff,
334 * there are usually four cases:
335 * 1) The entry is reserved (index 0)
336 * 2) The entry is inuse, for the same name
337 * 3) The entry is inuse, for a different name
338 * 4) The entry is free
339 * For a task with a "fast" IPC space, we disallow
340 * cases 1) and 3), because ports cannot be renamed.
341 */
342 if (index < space->is_table_size) {
343 ipc_entry_t table = space->is_table;
344
345 entry = &table[index];
346
347 if (index == 0) {
348 assert(!IE_BITS_TYPE(entry->ie_bits));
349 assert(!IE_BITS_GEN(entry->ie_bits));
350 } else if (IE_BITS_TYPE(entry->ie_bits)) {
351 if (IE_BITS_GEN(entry->ie_bits) == gen) {
352 *entryp = entry;
353 assert(!tentry);
354 return KERN_SUCCESS;
355 }
356 } else {
357 mach_port_index_t free_index, next_index;
358
359 /*
360 * Rip the entry out of the free list.
361 */
362
363 for (free_index = 0;
364 (next_index = table[free_index].ie_next)
365 != index;
366 free_index = next_index)
367 continue;
368
369 table[free_index].ie_next =
370 table[next_index].ie_next;
371
372 entry->ie_bits = gen;
373 entry->ie_request = 0;
374 *entryp = entry;
375
376 assert(entry->ie_object == IO_NULL);
377 if (is_fast_space(space))
378 assert(!tentry);
379 else if (tentry)
380 ite_free(tentry);
381 return KERN_SUCCESS;
382 }
383 }
384
385 /*
386 * In a fast space, ipc_entry_alloc_name may be
387 * used only to add a right to a port name already
388 * known in this space.
389 */
390 if (is_fast_space(space)) {
391 is_write_unlock(space);
392 assert(!tentry);
393 return KERN_FAILURE;
394 }
395
396 /*
397 * Before trying to allocate any memory,
398 * check if the entry already exists in the tree.
399 * This avoids spurious resource errors.
400 * The splay tree makes a subsequent lookup/insert
401 * of the same name cheap, so this costs little.
402 */
403
404 if ((space->is_tree_total > 0) &&
405 ((tentry2 = ipc_splay_tree_lookup(&space->is_tree, name))
406 != ITE_NULL)) {
407 assert(tentry2->ite_space == space);
408 assert(IE_BITS_TYPE(tentry2->ite_bits));
409
410 *entryp = &tentry2->ite_entry;
411 if (tentry) ite_free(tentry);
412 return KERN_SUCCESS;
413 }
414
415 its = space->is_table_next;
416
417 /*
418 * Check if the table should be grown.
419 *
420 * Note that if space->is_table_size == its->its_size,
421 * then we won't ever try to grow the table.
422 *
423 * Note that we are optimistically assuming that name
424 * doesn't collide with any existing names. (So if
425 * it were entered into the tree, is_tree_small would
426 * be incremented.) This is OK, because even in that
427 * case, we don't lose memory by growing the table.
428 */
429 if ((space->is_table_size <= index) &&
430 (index < its->its_size) &&
431 (((its->its_size - space->is_table_size) *
432 sizeof(struct ipc_entry)) <
433 ((space->is_tree_small + 1) *
434 sizeof(struct ipc_tree_entry)))) {
435 kern_return_t kr;
436
437 /*
438 * Can save space by growing the table.
439 * Because the space will be unlocked,
440 * we must restart.
441 */
442
443 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
444 assert(kr != KERN_NO_SPACE);
445 if (kr != KERN_SUCCESS) {
446 /* space is unlocked */
447 if (tentry) ite_free(tentry);
448 return kr;
449 }
450
451 continue;
452 }
453
454 /*
455 * If a splay-tree entry was allocated previously,
456 * go ahead and insert it into the tree.
457 */
458
459 if (tentry != ITE_NULL) {
460
461 space->is_tree_total++;
462
463 if (index < space->is_table_size) {
464 entry = &space->is_table[index];
465 entry->ie_bits |= IE_BITS_COLLISION;
466 } else if ((index < its->its_size) &&
467 !ipc_entry_tree_collision(space, name))
468 space->is_tree_small++;
469
470 ipc_splay_tree_insert(&space->is_tree, name, tentry);
471 tentry->ite_bits = 0;
472 tentry->ite_request = 0;
473 tentry->ite_object = IO_NULL;
474 tentry->ite_space = space;
475 *entryp = &tentry->ite_entry;
476 return KERN_SUCCESS;
477 }
478
479 /*
480 * Allocate a tree entry and try again.
481 */
482
483 is_write_unlock(space);
484 tentry = ite_alloc();
485 if (tentry == ITE_NULL)
486 return KERN_RESOURCE_SHORTAGE;
487 is_write_lock(space);
488 }
489}
490
491/*
492 * Routine: ipc_entry_dealloc
493 * Purpose:
494 * Deallocates an entry from a space.
495 * Conditions:
496 * The space must be write-locked throughout.
497 * The space must be active.
498 */
499
500void
501ipc_entry_dealloc(
502 ipc_space_t space,
503 mach_port_name_t name,
504 ipc_entry_t entry)
505{
506 ipc_entry_t table;
507 ipc_entry_num_t size;
508 mach_port_index_t index;
509
510 assert(space->is_active);
511 assert(entry->ie_object == IO_NULL);
512 assert(entry->ie_request == 0);
513
514 index = MACH_PORT_INDEX(name);
515 table = space->is_table;
516 size = space->is_table_size;
517
518 if (is_fast_space(space)) {
519 assert(index < size);
520 assert(entry == &table[index]);
521 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
522 assert(!(entry->ie_bits & IE_BITS_COLLISION));
523 entry->ie_bits &= IE_BITS_GEN_MASK;
524 entry->ie_next = table->ie_next;
525 table->ie_next = index;
526 return;
527 }
528
529
530 if ((index < size) && (entry == &table[index])) {
531 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
532
533 if (entry->ie_bits & IE_BITS_COLLISION) {
534 struct ipc_splay_tree small, collisions;
535 ipc_tree_entry_t tentry;
536 mach_port_name_t tname;
537 boolean_t pick;
538 ipc_entry_bits_t bits;
539 ipc_object_t obj;
540
541 /* must move an entry from tree to table */
542
543 ipc_splay_tree_split(&space->is_tree,
544 MACH_PORT_MAKE(index+1, 0),
545 &collisions);
546 ipc_splay_tree_split(&collisions,
547 MACH_PORT_MAKE(index, 0),
548 &small);
549
550 pick = ipc_splay_tree_pick(&collisions,
551 &tname, &tentry);
552 assert(pick);
553 assert(MACH_PORT_INDEX(tname) == index);
554
555 entry->ie_object = obj = tentry->ite_object;
556 entry->ie_bits = tentry->ite_bits|MACH_PORT_GEN(tname);
557 entry->ie_request = tentry->ite_request;
558
559 assert(tentry->ite_space == space);
560
561 if (IE_BITS_TYPE(tentry->ite_bits)==MACH_PORT_TYPE_SEND) {
562 ipc_hash_global_delete(space, obj,
563 tname, tentry);
564 ipc_hash_local_insert(space, obj,
565 index, entry);
566 }
567
568 ipc_splay_tree_delete(&collisions, tname, tentry);
569
570 assert(space->is_tree_total > 0);
571 space->is_tree_total--;
572
573 /* check if collision bit should still be on */
574
575 pick = ipc_splay_tree_pick(&collisions,
576 &tname, &tentry);
577 if (pick) {
578 entry->ie_bits |= IE_BITS_COLLISION;
579 ipc_splay_tree_join(&space->is_tree,
580 &collisions);
581 }
582
583 ipc_splay_tree_join(&space->is_tree, &small);
584
585 } else {
586 entry->ie_bits &= IE_BITS_GEN_MASK;
587 entry->ie_next = table->ie_next;
588 table->ie_next = index;
589 }
590
591 } else {
592 ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
593
594 assert(tentry->ite_space == space);
595
596 ipc_splay_tree_delete(&space->is_tree, name, tentry);
597
598 assert(space->is_tree_total > 0);
599 space->is_tree_total--;
600
601 if (index < size) {
602 ipc_entry_t ientry = &table[index];
603
604 assert(ientry->ie_bits & IE_BITS_COLLISION);
605
606 if (!ipc_entry_tree_collision(space, name))
607 ientry->ie_bits &= ~IE_BITS_COLLISION;
608
609 } else if ((index < space->is_table_next->its_size) &&
610 !ipc_entry_tree_collision(space, name)) {
611
612 assert(space->is_tree_small > 0);
613
614 space->is_tree_small--;
615 }
616 }
617}
618
619/*
620 * Routine: ipc_entry_grow_table
621 * Purpose:
622 * Grows the table in a space.
623 * Conditions:
624 * The space must be write-locked and active before.
625 * If successful, it is also returned locked.
626 * Allocates memory.
627 * Returns:
628 * KERN_SUCCESS Grew the table.
629 * KERN_SUCCESS Somebody else grew the table.
630 * KERN_SUCCESS The space died.
631 * KERN_NO_SPACE Table has maximum size already.
632 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
633 */
634
635kern_return_t
636ipc_entry_grow_table(
637 ipc_space_t space,
638 int target_size)
639{
640 ipc_entry_num_t osize, size, nsize, psize;
641
642 do {
643 boolean_t reallocated=FALSE;
644
645 ipc_entry_t otable, table;
646 ipc_table_size_t oits, its, nits;
647 mach_port_index_t i, free_index;
648
649 assert(space->is_active);
650
651 if (space->is_growing) {
652 /*
653 * Somebody else is growing the table.
654 * We just wait for them to finish.
655 */
656
657 assert_wait((event_t) space, THREAD_UNINT);
658 is_write_unlock(space);
659 thread_block((void (*)(void)) 0);
660 is_write_lock(space);
661 return KERN_SUCCESS;
662 }
663
664 otable = space->is_table;
665
666 its = space->is_table_next;
667 size = its->its_size;
668
669 /*
670 * Since is_table_next points to the next natural size
671 * we can identify the current size entry.
672 */
673 oits = its - 1;
674 osize = oits->its_size;
675
676 /*
677 * If there is no target size, then the new size is simply
678 * specified by is_table_next. If there is a target
679 * size, then search for the next entry.
680 */
681 if (target_size != ITS_SIZE_NONE) {
682 if (target_size <= osize) {
683 is_write_unlock(space);
684 return KERN_SUCCESS;
685 }
686
687 psize = osize;
688 while ((psize != size) && (target_size > size)) {
689 psize = size;
690 its++;
691 size = its->its_size;
692 }
693 if (psize == size) {
694 is_write_unlock(space);
695 return KERN_NO_SPACE;
696 }
697 }
698 nits = its + 1;
699 nsize = nits->its_size;
700
701 if (osize == size) {
702 is_write_unlock(space);
703 return KERN_NO_SPACE;
704 }
705
706 assert((osize < size) && (size <= nsize));
707
708 /*
709 * OK, we'll attempt to grow the table.
710 * The realloc requires that the old table
711 * remain in existence.
712 */
713
714 space->is_growing = TRUE;
715 is_write_unlock(space);
716
717 if (it_entries_reallocable(oits)) {
718 table = it_entries_realloc(oits, otable, its);
719 reallocated=TRUE;
720 }
721 else {
722 table = it_entries_alloc(its);
723 }
724
725 is_write_lock(space);
726 space->is_growing = FALSE;
727
728 /*
729 * We need to do a wakeup on the space,
730 * to rouse waiting threads. We defer
731 * this until the space is unlocked,
732 * because we don't want them to spin.
733 */
734
735 if (table == IE_NULL) {
736 is_write_unlock(space);
737 thread_wakeup((event_t) space);
738 return KERN_RESOURCE_SHORTAGE;
739 }
740
741 if (!space->is_active) {
742 /*
743 * The space died while it was unlocked.
744 */
745
746 is_write_unlock(space);
747 thread_wakeup((event_t) space);
748 it_entries_free(its, table);
749 is_write_lock(space);
750 return KERN_SUCCESS;
751 }
752
753 assert(space->is_table == otable);
754 assert((space->is_table_next == its) ||
755 (target_size != ITS_SIZE_NONE));
756 assert(space->is_table_size == osize);
757
758 space->is_table = table;
759 space->is_table_size = size;
760 space->is_table_next = nits;
761
762 /*
763 * If we did a realloc, it remapped the data.
764 * Otherwise we copy by hand first. Then we have
765 * to zero the new part and the old local hash
766 * values.
767 */
768 if (!reallocated)
769 (void) memcpy((void *) table, (const void *) otable,
770 osize * (sizeof(struct ipc_entry)));
771
772 for (i = 0; i < osize; i++)
773 table[i].ie_index = 0;
774
775 (void) memset((void *) (table + osize) , 0,
776 ((size - osize) * (sizeof(struct ipc_entry))));
777
778 /*
779 * Put old entries into the reverse hash table.
780 */
781 for (i = 0; i < osize; i++) {
782 ipc_entry_t entry = &table[i];
783
784 if (IE_BITS_TYPE(entry->ie_bits)==MACH_PORT_TYPE_SEND) {
785 ipc_hash_local_insert(space, entry->ie_object,
786 i, entry);
787 }
788 }
789
790 /*
791 * If there are entries in the splay tree,
792 * then we have work to do:
793 * 1) transfer entries to the table
794 * 2) update is_tree_small
795 */
796 assert(!is_fast_space(space) || space->is_tree_total == 0);
797 if (space->is_tree_total > 0) {
798 mach_port_index_t index;
799 boolean_t delete;
800 struct ipc_splay_tree ignore;
801 struct ipc_splay_tree move;
802 struct ipc_splay_tree small;
803 ipc_entry_num_t nosmall;
804 ipc_tree_entry_t tentry;
805
806 /*
807 * The splay tree divides into four regions,
808 * based on the index of the entries:
809 * 1) 0 <= index < osize
810 * 2) osize <= index < size
811 * 3) size <= index < nsize
812 * 4) nsize <= index
813 *
814 * Entries in the first part are ignored.
815 * Entries in the second part, that don't
816 * collide, are moved into the table.
817 * Entries in the third part, that don't
818 * collide, are counted for is_tree_small.
819 * Entries in the fourth part are ignored.
820 */
821
822 ipc_splay_tree_split(&space->is_tree,
823 MACH_PORT_MAKE(nsize, 0),
824 &small);
825 ipc_splay_tree_split(&small,
826 MACH_PORT_MAKE(size, 0),
827 &move);
828 ipc_splay_tree_split(&move,
829 MACH_PORT_MAKE(osize, 0),
830 &ignore);
831
832 /* move entries into the table */
833
834 for (tentry = ipc_splay_traverse_start(&move);
835 tentry != ITE_NULL;
836 tentry = ipc_splay_traverse_next(&move, delete)) {
837
838 mach_port_name_t name;
839 mach_port_gen_t gen;
840 mach_port_type_t type;
841 ipc_entry_bits_t bits;
842 ipc_object_t obj;
843 ipc_entry_t entry;
844
845 name = tentry->ite_name;
846 gen = MACH_PORT_GEN(name);
847 index = MACH_PORT_INDEX(name);
848
849 assert(tentry->ite_space == space);
850 assert((osize <= index) && (index < size));
851
852 entry = &table[index];
853 bits = entry->ie_bits;
854 if (IE_BITS_TYPE(bits)) {
855 assert(IE_BITS_GEN(bits) != gen);
856 entry->ie_bits |= IE_BITS_COLLISION;
857 delete = FALSE;
858 continue;
859 }
860
861 bits = tentry->ite_bits;
862 type = IE_BITS_TYPE(bits);
863 assert(type != MACH_PORT_TYPE_NONE);
864
865 entry->ie_bits = bits | gen;
866 entry->ie_request = tentry->ite_request;
867 entry->ie_object = obj = tentry->ite_object;
868
869 if (type == MACH_PORT_TYPE_SEND) {
870 ipc_hash_global_delete(space, obj,
871 name, tentry);
872 ipc_hash_local_insert(space, obj,
873 index, entry);
874 }
875 space->is_tree_total--;
876 delete = TRUE;
877 }
878 ipc_splay_traverse_finish(&move);
879
880 /* count entries for is_tree_small */
881
882 nosmall = 0; index = 0;
883 for (tentry = ipc_splay_traverse_start(&small);
884 tentry != ITE_NULL;
885 tentry = ipc_splay_traverse_next(&small, FALSE)) {
886 mach_port_index_t nindex;
887
888 nindex = MACH_PORT_INDEX(tentry->ite_name);
889
890 if (nindex != index) {
891 nosmall++;
892 index = nindex;
893 }
894 }
895 ipc_splay_traverse_finish(&small);
896
897 assert(nosmall <= (nsize - size));
898 assert(nosmall <= space->is_tree_total);
899 space->is_tree_small = nosmall;
900
901 /* put the splay tree back together */
902
903 ipc_splay_tree_join(&space->is_tree, &small);
904 ipc_splay_tree_join(&space->is_tree, &move);
905 ipc_splay_tree_join(&space->is_tree, &ignore);
906 }
907
908 /*
909 * Add entries in the new part which still aren't used
910 * to the free list. Add them in reverse order,
911 * and set the generation number to -1, so that
912 * early allocations produce "natural" names.
913 */
914
915 free_index = table[0].ie_next;
916 for (i = size-1; i >= osize; --i) {
917 ipc_entry_t entry = &table[i];
918
919 if (entry->ie_bits == 0) {
920 entry->ie_bits = IE_BITS_GEN_MASK;
921 entry->ie_next = free_index;
922 free_index = i;
923 }
924 }
925 table[0].ie_next = free_index;
926
927 /*
928 * Now we need to free the old table.
929 * If the space dies or grows while unlocked,
930 * then we can quit here.
931 */
932 is_write_unlock(space);
933 thread_wakeup((event_t) space);
934
935 it_entries_free(oits, otable);
936 is_write_lock(space);
937 if (!space->is_active || (space->is_table_next != nits))
938 return KERN_SUCCESS;
939
940 /*
941 * We might have moved enough entries from
942 * the splay tree into the table that
943 * the table can be profitably grown again.
944 *
945 * Note that if size == nsize, then
946 * space->is_tree_small == 0.
947 */
948 } while ((space->is_tree_small > 0) &&
949 (((nsize - size) * sizeof(struct ipc_entry)) <
950 (space->is_tree_small * sizeof(struct ipc_tree_entry))));
951
952 return KERN_SUCCESS;
953}
954
955
956#if MACH_KDB
957#include <ddb/db_output.h>
958#define printf kdbprintf
959
960ipc_entry_t db_ipc_object_by_name(
961 task_t task,
962 mach_port_name_t name);
963
964
965ipc_entry_t
966db_ipc_object_by_name(
967 task_t task,
968 mach_port_name_t name)
969{
970 ipc_space_t space = task->itk_space;
971 ipc_entry_t entry;
972
973
974 entry = ipc_entry_lookup(space, name);
975 if(entry != IE_NULL) {
976 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
977 task, name, entry->ie_object);
978 return (ipc_entry_t) entry->ie_object;
979 }
980 return entry;
981}
982#endif /* MACH_KDB */