]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_entry.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_entry.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: ipc/ipc_entry.c
54 * Author: Rich Draves
55 * Date: 1989
56 *
57 * Primitive functions to manipulate translation entries.
58 */
59
60 #include <mach_kdb.h>
61 #include <mach_debug.h>
62
63 #include <mach/kern_return.h>
64 #include <mach/port.h>
65 #include <kern/assert.h>
66 #include <kern/sched_prim.h>
67 #include <kern/zalloc.h>
68 #include <kern/misc_protos.h>
69 #if MACH_KDB
70 #include <kern/task.h>
71 #endif
72 #include <ipc/port.h>
73 #include <ipc/ipc_entry.h>
74 #include <ipc/ipc_space.h>
75 #include <ipc/ipc_splay.h>
76 #include <ipc/ipc_object.h>
77 #include <ipc/ipc_hash.h>
78 #include <ipc/ipc_table.h>
79 #include <ipc/ipc_port.h>
80 #include <string.h>
81
82 zone_t ipc_tree_entry_zone;
83
84
85
86 /*
87 * Forward declarations
88 */
89 boolean_t ipc_entry_tree_collision(
90 ipc_space_t space,
91 mach_port_name_t name);
92
93 /*
94 * Routine: ipc_entry_tree_collision
95 * Purpose:
96 * Checks if "name" collides with an allocated name
97 * in the space's tree. That is, returns TRUE
98 * if the splay tree contains a name with the same
99 * index as "name".
100 * Conditions:
101 * The space is locked (read or write) and active.
102 */
103
104 boolean_t
105 ipc_entry_tree_collision(
106 ipc_space_t space,
107 mach_port_name_t name)
108 {
109 mach_port_index_t index;
110 mach_port_name_t lower, upper;
111
112 assert(space->is_active);
113
114 /*
115 * Check if we collide with the next smaller name
116 * or the next larger name.
117 */
118
119 ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
120
121 index = MACH_PORT_INDEX(name);
122 return (((lower != (mach_port_name_t)~0) &&
123 (MACH_PORT_INDEX(lower) == index)) ||
124 ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
125 }
126
127 /*
128 * Routine: ipc_entry_lookup
129 * Purpose:
130 * Searches for an entry, given its name.
131 * Conditions:
132 * The space must be read or write locked throughout.
133 * The space must be active.
134 */
135
136 ipc_entry_t
137 ipc_entry_lookup(
138 ipc_space_t space,
139 mach_port_name_t name)
140 {
141 mach_port_index_t index;
142 ipc_entry_t entry;
143
144 assert(space->is_active);
145
146
147 index = MACH_PORT_INDEX(name);
148 /*
149 * If space is fast, we assume no splay tree and name within table
150 * bounds, but still check generation numbers (if enabled) and
151 * look for null entries.
152 */
153 if (is_fast_space(space)) {
154 entry = &space->is_table[index];
155 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
156 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
157 entry = IE_NULL;
158 }
159 else
160 if (index < space->is_table_size) {
161 entry = &space->is_table[index];
162 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
163 if (entry->ie_bits & IE_BITS_COLLISION) {
164 assert(space->is_tree_total > 0);
165 goto tree_lookup;
166 } else
167 entry = IE_NULL;
168 else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
169 entry = IE_NULL;
170 } else if (space->is_tree_total == 0)
171 entry = IE_NULL;
172 else {
173 tree_lookup:
174 entry = (ipc_entry_t)
175 ipc_splay_tree_lookup(&space->is_tree, name);
176 /* with sub-space introduction, an entry may appear in */
177 /* the splay tree and yet not show rights for this subspace */
178 if(entry != IE_NULL) {
179 if(!(IE_BITS_TYPE(entry->ie_bits)))
180 entry = IE_NULL;
181 }
182 }
183
184 assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
185 return entry;
186 }
187
188 /*
189 * Routine: ipc_entry_get
190 * Purpose:
191 * Tries to allocate an entry out of the space.
192 * Conditions:
193 * The space is write-locked and active throughout.
194 * An object may be locked. Will not allocate memory.
195 * Returns:
196 * KERN_SUCCESS A free entry was found.
197 * KERN_NO_SPACE No entry allocated.
198 */
199
200 kern_return_t
201 ipc_entry_get(
202 ipc_space_t space,
203 mach_port_name_t *namep,
204 ipc_entry_t *entryp)
205 {
206 ipc_entry_t table;
207 mach_port_index_t first_free;
208 ipc_entry_t free_entry;
209
210 assert(space->is_active);
211
212 {
213 table = space->is_table;
214 first_free = table->ie_next;
215
216 if (first_free == 0)
217 return KERN_NO_SPACE;
218
219 free_entry = &table[first_free];
220 table->ie_next = free_entry->ie_next;
221 }
222
223 /*
224 * Initialize the new entry. We need only
225 * increment the generation number and clear ie_request.
226 */
227 {
228 mach_port_name_t new_name;
229 mach_port_gen_t gen;
230
231 gen = IE_BITS_NEW_GEN(free_entry->ie_bits);
232 free_entry->ie_bits = gen;
233 free_entry->ie_request = 0;
234
235 /*
236 * The new name can't be MACH_PORT_NULL because index
237 * is non-zero. It can't be MACH_PORT_DEAD because
238 * the table isn't allowed to grow big enough.
239 * (See comment in ipc/ipc_table.h.)
240 */
241 new_name = MACH_PORT_MAKE(first_free, gen);
242 assert(MACH_PORT_VALID(new_name));
243 *namep = new_name;
244 }
245
246 assert(free_entry->ie_object == IO_NULL);
247
248 *entryp = free_entry;
249 return KERN_SUCCESS;
250 }
251
252 /*
253 * Routine: ipc_entry_alloc
254 * Purpose:
255 * Allocate an entry out of the space.
256 * Conditions:
257 * The space is not locked before, but it is write-locked after
258 * if the call is successful. May allocate memory.
259 * Returns:
260 * KERN_SUCCESS An entry was allocated.
261 * KERN_INVALID_TASK The space is dead.
262 * KERN_NO_SPACE No room for an entry in the space.
263 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
264 */
265
266 kern_return_t
267 ipc_entry_alloc(
268 ipc_space_t space,
269 mach_port_name_t *namep,
270 ipc_entry_t *entryp)
271 {
272 kern_return_t kr;
273
274 is_write_lock(space);
275
276 for (;;) {
277 if (!space->is_active) {
278 is_write_unlock(space);
279 return KERN_INVALID_TASK;
280 }
281
282 kr = ipc_entry_get(space, namep, entryp);
283 if (kr == KERN_SUCCESS)
284 return kr;
285
286 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
287 if (kr != KERN_SUCCESS)
288 return kr; /* space is unlocked */
289 }
290 }
291
292 /*
293 * Routine: ipc_entry_alloc_name
294 * Purpose:
295 * Allocates/finds an entry with a specific name.
296 * If an existing entry is returned, its type will be nonzero.
297 * Conditions:
298 * The space is not locked before, but it is write-locked after
299 * if the call is successful. May allocate memory.
300 * Returns:
301 * KERN_SUCCESS Found existing entry with same name.
302 * KERN_SUCCESS Allocated a new entry.
303 * KERN_INVALID_TASK The space is dead.
304 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
305 */
306
307 kern_return_t
308 ipc_entry_alloc_name(
309 ipc_space_t space,
310 mach_port_name_t name,
311 ipc_entry_t *entryp)
312 {
313 mach_port_index_t index = MACH_PORT_INDEX(name);
314 mach_port_gen_t gen = MACH_PORT_GEN(name);
315 ipc_tree_entry_t tentry = ITE_NULL;
316
317 assert(MACH_PORT_VALID(name));
318
319
320 is_write_lock(space);
321
322 for (;;) {
323 ipc_entry_t entry;
324 ipc_tree_entry_t tentry2;
325 ipc_table_size_t its;
326
327 if (!space->is_active) {
328 is_write_unlock(space);
329 if (tentry) ite_free(tentry);
330 return KERN_INVALID_TASK;
331 }
332
333 /*
334 * If we are under the table cutoff,
335 * there are usually four cases:
336 * 1) The entry is reserved (index 0)
337 * 2) The entry is inuse, for the same name
338 * 3) The entry is inuse, for a different name
339 * 4) The entry is free
340 * For a task with a "fast" IPC space, we disallow
341 * cases 1) and 3), because ports cannot be renamed.
342 */
343 if (index < space->is_table_size) {
344 ipc_entry_t table = space->is_table;
345
346 entry = &table[index];
347
348 if (index == 0) {
349 assert(!IE_BITS_TYPE(entry->ie_bits));
350 assert(!IE_BITS_GEN(entry->ie_bits));
351 } else if (IE_BITS_TYPE(entry->ie_bits)) {
352 if (IE_BITS_GEN(entry->ie_bits) == gen) {
353 *entryp = entry;
354 assert(!tentry);
355 return KERN_SUCCESS;
356 }
357 } else {
358 mach_port_index_t free_index, next_index;
359
360 /*
361 * Rip the entry out of the free list.
362 */
363
364 for (free_index = 0;
365 (next_index = table[free_index].ie_next)
366 != index;
367 free_index = next_index)
368 continue;
369
370 table[free_index].ie_next =
371 table[next_index].ie_next;
372
373 entry->ie_bits = gen;
374 entry->ie_request = 0;
375 *entryp = entry;
376
377 assert(entry->ie_object == IO_NULL);
378 if (is_fast_space(space))
379 assert(!tentry);
380 else if (tentry)
381 ite_free(tentry);
382 return KERN_SUCCESS;
383 }
384 }
385
386 /*
387 * In a fast space, ipc_entry_alloc_name may be
388 * used only to add a right to a port name already
389 * known in this space.
390 */
391 if (is_fast_space(space)) {
392 is_write_unlock(space);
393 assert(!tentry);
394 return KERN_FAILURE;
395 }
396
397 /*
398 * Before trying to allocate any memory,
399 * check if the entry already exists in the tree.
400 * This avoids spurious resource errors.
401 * The splay tree makes a subsequent lookup/insert
402 * of the same name cheap, so this costs little.
403 */
404
405 if ((space->is_tree_total > 0) &&
406 ((tentry2 = ipc_splay_tree_lookup(&space->is_tree, name))
407 != ITE_NULL)) {
408 assert(tentry2->ite_space == space);
409 assert(IE_BITS_TYPE(tentry2->ite_bits));
410
411 *entryp = &tentry2->ite_entry;
412 if (tentry) ite_free(tentry);
413 return KERN_SUCCESS;
414 }
415
416 its = space->is_table_next;
417
418 /*
419 * Check if the table should be grown.
420 *
421 * Note that if space->is_table_size == its->its_size,
422 * then we won't ever try to grow the table.
423 *
424 * Note that we are optimistically assuming that name
425 * doesn't collide with any existing names. (So if
426 * it were entered into the tree, is_tree_small would
427 * be incremented.) This is OK, because even in that
428 * case, we don't lose memory by growing the table.
429 */
430 if ((space->is_table_size <= index) &&
431 (index < its->its_size) &&
432 (((its->its_size - space->is_table_size) *
433 sizeof(struct ipc_entry)) <
434 ((space->is_tree_small + 1) *
435 sizeof(struct ipc_tree_entry)))) {
436 kern_return_t kr;
437
438 /*
439 * Can save space by growing the table.
440 * Because the space will be unlocked,
441 * we must restart.
442 */
443
444 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
445 assert(kr != KERN_NO_SPACE);
446 if (kr != KERN_SUCCESS) {
447 /* space is unlocked */
448 if (tentry) ite_free(tentry);
449 return kr;
450 }
451
452 continue;
453 }
454
455 /*
456 * If a splay-tree entry was allocated previously,
457 * go ahead and insert it into the tree.
458 */
459
460 if (tentry != ITE_NULL) {
461
462 space->is_tree_total++;
463
464 if (index < space->is_table_size) {
465 entry = &space->is_table[index];
466 entry->ie_bits |= IE_BITS_COLLISION;
467 } else if ((index < its->its_size) &&
468 !ipc_entry_tree_collision(space, name))
469 space->is_tree_small++;
470
471 ipc_splay_tree_insert(&space->is_tree, name, tentry);
472 tentry->ite_bits = 0;
473 tentry->ite_request = 0;
474 tentry->ite_object = IO_NULL;
475 tentry->ite_space = space;
476 *entryp = &tentry->ite_entry;
477 return KERN_SUCCESS;
478 }
479
480 /*
481 * Allocate a tree entry and try again.
482 */
483
484 is_write_unlock(space);
485 tentry = ite_alloc();
486 if (tentry == ITE_NULL)
487 return KERN_RESOURCE_SHORTAGE;
488 is_write_lock(space);
489 }
490 }
491
492 /*
493 * Routine: ipc_entry_dealloc
494 * Purpose:
495 * Deallocates an entry from a space.
496 * Conditions:
497 * The space must be write-locked throughout.
498 * The space must be active.
499 */
500
501 void
502 ipc_entry_dealloc(
503 ipc_space_t space,
504 mach_port_name_t name,
505 ipc_entry_t entry)
506 {
507 ipc_entry_t table;
508 ipc_entry_num_t size;
509 mach_port_index_t index;
510
511 assert(space->is_active);
512 assert(entry->ie_object == IO_NULL);
513 assert(entry->ie_request == 0);
514
515 index = MACH_PORT_INDEX(name);
516 table = space->is_table;
517 size = space->is_table_size;
518
519 if (is_fast_space(space)) {
520 assert(index < size);
521 assert(entry == &table[index]);
522 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
523 assert(!(entry->ie_bits & IE_BITS_COLLISION));
524 entry->ie_bits &= IE_BITS_GEN_MASK;
525 entry->ie_next = table->ie_next;
526 table->ie_next = index;
527 return;
528 }
529
530
531 if ((index < size) && (entry == &table[index])) {
532 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
533
534 if (entry->ie_bits & IE_BITS_COLLISION) {
535 struct ipc_splay_tree small, collisions;
536 ipc_tree_entry_t tentry;
537 mach_port_name_t tname;
538 boolean_t pick;
539 ipc_object_t obj;
540
541 /* must move an entry from tree to table */
542
543 ipc_splay_tree_split(&space->is_tree,
544 MACH_PORT_MAKE(index+1, 0),
545 &collisions);
546 ipc_splay_tree_split(&collisions,
547 MACH_PORT_MAKE(index, 0),
548 &small);
549
550 pick = ipc_splay_tree_pick(&collisions,
551 &tname, &tentry);
552 assert(pick);
553 assert(MACH_PORT_INDEX(tname) == index);
554
555 entry->ie_object = obj = tentry->ite_object;
556 entry->ie_bits = tentry->ite_bits|MACH_PORT_GEN(tname);
557 entry->ie_request = tentry->ite_request;
558
559 assert(tentry->ite_space == space);
560
561 if (IE_BITS_TYPE(tentry->ite_bits)==MACH_PORT_TYPE_SEND) {
562 ipc_hash_global_delete(space, obj,
563 tname, tentry);
564 ipc_hash_local_insert(space, obj,
565 index, entry);
566 }
567
568 ipc_splay_tree_delete(&collisions, tname, tentry);
569
570 assert(space->is_tree_total > 0);
571 space->is_tree_total--;
572
573 /* check if collision bit should still be on */
574
575 pick = ipc_splay_tree_pick(&collisions,
576 &tname, &tentry);
577 if (pick) {
578 entry->ie_bits |= IE_BITS_COLLISION;
579 ipc_splay_tree_join(&space->is_tree,
580 &collisions);
581 }
582
583 ipc_splay_tree_join(&space->is_tree, &small);
584
585 } else {
586 entry->ie_bits &= IE_BITS_GEN_MASK;
587 entry->ie_next = table->ie_next;
588 table->ie_next = index;
589 }
590
591 } else {
592 ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
593
594 assert(tentry->ite_space == space);
595
596 ipc_splay_tree_delete(&space->is_tree, name, tentry);
597
598 assert(space->is_tree_total > 0);
599 space->is_tree_total--;
600
601 if (index < size) {
602 ipc_entry_t ientry = &table[index];
603
604 assert(ientry->ie_bits & IE_BITS_COLLISION);
605
606 if (!ipc_entry_tree_collision(space, name))
607 ientry->ie_bits &= ~IE_BITS_COLLISION;
608
609 } else if ((index < space->is_table_next->its_size) &&
610 !ipc_entry_tree_collision(space, name)) {
611
612 assert(space->is_tree_small > 0);
613
614 space->is_tree_small--;
615 }
616 }
617 }
618
619 /*
620 * Routine: ipc_entry_grow_table
621 * Purpose:
622 * Grows the table in a space.
623 * Conditions:
624 * The space must be write-locked and active before.
625 * If successful, it is also returned locked.
626 * Allocates memory.
627 * Returns:
628 * KERN_SUCCESS Grew the table.
629 * KERN_SUCCESS Somebody else grew the table.
630 * KERN_SUCCESS The space died.
631 * KERN_NO_SPACE Table has maximum size already.
632 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
633 */
634
635 kern_return_t
636 ipc_entry_grow_table(
637 ipc_space_t space,
638 ipc_table_elems_t target_size)
639 {
640 ipc_entry_num_t osize, size, nsize, psize;
641
642 do {
643 boolean_t reallocated=FALSE;
644
645 ipc_entry_t otable, table;
646 ipc_table_size_t oits, its, nits;
647 mach_port_index_t i, free_index;
648
649 assert(space->is_active);
650
651 if (space->is_growing) {
652 /*
653 * Somebody else is growing the table.
654 * We just wait for them to finish.
655 */
656
657 is_write_sleep(space);
658 return KERN_SUCCESS;
659 }
660
661 otable = space->is_table;
662
663 its = space->is_table_next;
664 size = its->its_size;
665
666 /*
667 * Since is_table_next points to the next natural size
668 * we can identify the current size entry.
669 */
670 oits = its - 1;
671 osize = oits->its_size;
672
673 /*
674 * If there is no target size, then the new size is simply
675 * specified by is_table_next. If there is a target
676 * size, then search for the next entry.
677 */
678 if (target_size != ITS_SIZE_NONE) {
679 if (target_size <= osize) {
680 is_write_unlock(space);
681 return KERN_SUCCESS;
682 }
683
684 psize = osize;
685 while ((psize != size) && (target_size > size)) {
686 psize = size;
687 its++;
688 size = its->its_size;
689 }
690 if (psize == size) {
691 is_write_unlock(space);
692 return KERN_NO_SPACE;
693 }
694 }
695
696 if (osize == size) {
697 is_write_unlock(space);
698 return KERN_NO_SPACE;
699 }
700
701 nits = its + 1;
702 nsize = nits->its_size;
703
704 assert((osize < size) && (size <= nsize));
705
706 /*
707 * OK, we'll attempt to grow the table.
708 * The realloc requires that the old table
709 * remain in existence.
710 */
711
712 space->is_growing = TRUE;
713 is_write_unlock(space);
714
715 if (it_entries_reallocable(oits)) {
716 table = it_entries_realloc(oits, otable, its);
717 reallocated=TRUE;
718 }
719 else {
720 table = it_entries_alloc(its);
721 }
722
723 is_write_lock(space);
724 space->is_growing = FALSE;
725
726 /*
727 * We need to do a wakeup on the space,
728 * to rouse waiting threads. We defer
729 * this until the space is unlocked,
730 * because we don't want them to spin.
731 */
732
733 if (table == IE_NULL) {
734 is_write_unlock(space);
735 thread_wakeup((event_t) space);
736 return KERN_RESOURCE_SHORTAGE;
737 }
738
739 if (!space->is_active) {
740 /*
741 * The space died while it was unlocked.
742 */
743
744 is_write_unlock(space);
745 thread_wakeup((event_t) space);
746 it_entries_free(its, table);
747 is_write_lock(space);
748 return KERN_SUCCESS;
749 }
750
751 assert(space->is_table == otable);
752 assert((space->is_table_next == its) ||
753 (target_size != ITS_SIZE_NONE));
754 assert(space->is_table_size == osize);
755
756 space->is_table = table;
757 space->is_table_size = size;
758 space->is_table_next = nits;
759
760 /*
761 * If we did a realloc, it remapped the data.
762 * Otherwise we copy by hand first. Then we have
763 * to zero the new part and the old local hash
764 * values.
765 */
766 if (!reallocated)
767 (void) memcpy((void *) table, (const void *) otable,
768 osize * (sizeof(struct ipc_entry)));
769
770 for (i = 0; i < osize; i++)
771 table[i].ie_index = 0;
772
773 (void) memset((void *) (table + osize) , 0,
774 ((size - osize) * (sizeof(struct ipc_entry))));
775
776 /*
777 * Put old entries into the reverse hash table.
778 */
779 for (i = 0; i < osize; i++) {
780 ipc_entry_t entry = &table[i];
781
782 if (IE_BITS_TYPE(entry->ie_bits)==MACH_PORT_TYPE_SEND) {
783 ipc_hash_local_insert(space, entry->ie_object,
784 i, entry);
785 }
786 }
787
788 /*
789 * If there are entries in the splay tree,
790 * then we have work to do:
791 * 1) transfer entries to the table
792 * 2) update is_tree_small
793 */
794 assert(!is_fast_space(space) || space->is_tree_total == 0);
795 if (space->is_tree_total > 0) {
796 mach_port_index_t index;
797 boolean_t delete;
798 struct ipc_splay_tree ignore;
799 struct ipc_splay_tree move;
800 struct ipc_splay_tree small;
801 ipc_entry_num_t nosmall;
802 ipc_tree_entry_t tentry;
803
804 /*
805 * The splay tree divides into four regions,
806 * based on the index of the entries:
807 * 1) 0 <= index < osize
808 * 2) osize <= index < size
809 * 3) size <= index < nsize
810 * 4) nsize <= index
811 *
812 * Entries in the first part are ignored.
813 * Entries in the second part, that don't
814 * collide, are moved into the table.
815 * Entries in the third part, that don't
816 * collide, are counted for is_tree_small.
817 * Entries in the fourth part are ignored.
818 */
819
820 ipc_splay_tree_split(&space->is_tree,
821 MACH_PORT_MAKE(nsize, 0),
822 &small);
823 ipc_splay_tree_split(&small,
824 MACH_PORT_MAKE(size, 0),
825 &move);
826 ipc_splay_tree_split(&move,
827 MACH_PORT_MAKE(osize, 0),
828 &ignore);
829
830 /* move entries into the table */
831
832 for (tentry = ipc_splay_traverse_start(&move);
833 tentry != ITE_NULL;
834 tentry = ipc_splay_traverse_next(&move, delete)) {
835
836 mach_port_name_t name;
837 mach_port_gen_t gen;
838 mach_port_type_t type;
839 ipc_entry_bits_t bits;
840 ipc_object_t obj;
841 ipc_entry_t entry;
842
843 name = tentry->ite_name;
844 gen = MACH_PORT_GEN(name);
845 index = MACH_PORT_INDEX(name);
846
847 assert(tentry->ite_space == space);
848 assert((osize <= index) && (index < size));
849
850 entry = &table[index];
851 bits = entry->ie_bits;
852 if (IE_BITS_TYPE(bits)) {
853 assert(IE_BITS_GEN(bits) != gen);
854 entry->ie_bits |= IE_BITS_COLLISION;
855 delete = FALSE;
856 continue;
857 }
858
859 bits = tentry->ite_bits;
860 type = IE_BITS_TYPE(bits);
861 assert(type != MACH_PORT_TYPE_NONE);
862
863 entry->ie_bits = bits | gen;
864 entry->ie_request = tentry->ite_request;
865 entry->ie_object = obj = tentry->ite_object;
866
867 if (type == MACH_PORT_TYPE_SEND) {
868 ipc_hash_global_delete(space, obj,
869 name, tentry);
870 ipc_hash_local_insert(space, obj,
871 index, entry);
872 }
873 space->is_tree_total--;
874 delete = TRUE;
875 }
876 ipc_splay_traverse_finish(&move);
877
878 /* count entries for is_tree_small */
879
880 nosmall = 0; index = 0;
881 for (tentry = ipc_splay_traverse_start(&small);
882 tentry != ITE_NULL;
883 tentry = ipc_splay_traverse_next(&small, FALSE)) {
884 mach_port_index_t nindex;
885
886 nindex = MACH_PORT_INDEX(tentry->ite_name);
887
888 if (nindex != index) {
889 nosmall++;
890 index = nindex;
891 }
892 }
893 ipc_splay_traverse_finish(&small);
894
895 assert(nosmall <= (nsize - size));
896 assert(nosmall <= space->is_tree_total);
897 space->is_tree_small = nosmall;
898
899 /* put the splay tree back together */
900
901 ipc_splay_tree_join(&space->is_tree, &small);
902 ipc_splay_tree_join(&space->is_tree, &move);
903 ipc_splay_tree_join(&space->is_tree, &ignore);
904 }
905
906 /*
907 * Add entries in the new part which still aren't used
908 * to the free list. Add them in reverse order,
909 * and set the generation number to -1, so that
910 * early allocations produce "natural" names.
911 */
912
913 free_index = table[0].ie_next;
914 for (i = size-1; i >= osize; --i) {
915 ipc_entry_t entry = &table[i];
916
917 if (entry->ie_bits == 0) {
918 entry->ie_bits = IE_BITS_GEN_MASK;
919 entry->ie_next = free_index;
920 free_index = i;
921 }
922 }
923 table[0].ie_next = free_index;
924
925 /*
926 * Now we need to free the old table.
927 * If the space dies or grows while unlocked,
928 * then we can quit here.
929 */
930 is_write_unlock(space);
931 thread_wakeup((event_t) space);
932
933 it_entries_free(oits, otable);
934 is_write_lock(space);
935 if (!space->is_active || (space->is_table_next != nits))
936 return KERN_SUCCESS;
937
938 /*
939 * We might have moved enough entries from
940 * the splay tree into the table that
941 * the table can be profitably grown again.
942 *
943 * Note that if size == nsize, then
944 * space->is_tree_small == 0.
945 */
946 } while ((space->is_tree_small > 0) &&
947 (((nsize - size) * sizeof(struct ipc_entry)) <
948 (space->is_tree_small * sizeof(struct ipc_tree_entry))));
949
950 return KERN_SUCCESS;
951 }
952
953
954 #if MACH_KDB
955 #include <ddb/db_output.h>
956 #define printf kdbprintf
957
958 ipc_entry_t db_ipc_object_by_name(
959 task_t task,
960 mach_port_name_t name);
961
962
963 ipc_entry_t
964 db_ipc_object_by_name(
965 task_t task,
966 mach_port_name_t name)
967 {
968 ipc_space_t space = task->itk_space;
969 ipc_entry_t entry;
970
971
972 entry = ipc_entry_lookup(space, name);
973 if(entry != IE_NULL) {
974 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
975 task, name, entry->ie_object);
976 return (ipc_entry_t) entry->ie_object;
977 }
978 return entry;
979 }
980 #endif /* MACH_KDB */