]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_entry.c
3eab9631cade3d15a41bed9e2e08a45dda0b3298
[apple/xnu.git] / osfmk / ipc / ipc_entry.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: ipc/ipc_entry.c
57 * Author: Rich Draves
58 * Date: 1989
59 *
60 * Primitive functions to manipulate translation entries.
61 */
62
63 #include <mach_kdb.h>
64 #include <mach_debug.h>
65
66 #include <mach/kern_return.h>
67 #include <mach/port.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/zalloc.h>
71 #include <kern/misc_protos.h>
72 #if MACH_KDB
73 #include <kern/task.h>
74 #endif
75 #include <ipc/port.h>
76 #include <ipc/ipc_entry.h>
77 #include <ipc/ipc_space.h>
78 #include <ipc/ipc_splay.h>
79 #include <ipc/ipc_object.h>
80 #include <ipc/ipc_hash.h>
81 #include <ipc/ipc_table.h>
82 #include <ipc/ipc_port.h>
83 #include <string.h>
84
85 zone_t ipc_tree_entry_zone;
86
87
88
89 /*
90 * Forward declarations
91 */
92 boolean_t ipc_entry_tree_collision(
93 ipc_space_t space,
94 mach_port_name_t name);
95
96 /*
97 * Routine: ipc_entry_tree_collision
98 * Purpose:
99 * Checks if "name" collides with an allocated name
100 * in the space's tree. That is, returns TRUE
101 * if the splay tree contains a name with the same
102 * index as "name".
103 * Conditions:
104 * The space is locked (read or write) and active.
105 */
106
107 boolean_t
108 ipc_entry_tree_collision(
109 ipc_space_t space,
110 mach_port_name_t name)
111 {
112 mach_port_index_t index;
113 mach_port_name_t lower, upper;
114
115 assert(space->is_active);
116
117 /*
118 * Check if we collide with the next smaller name
119 * or the next larger name.
120 */
121
122 ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
123
124 index = MACH_PORT_INDEX(name);
125 return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
126 ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
127 }
128
129 /*
130 * Routine: ipc_entry_lookup
131 * Purpose:
132 * Searches for an entry, given its name.
133 * Conditions:
134 * The space must be read or write locked throughout.
135 * The space must be active.
136 */
137
138 ipc_entry_t
139 ipc_entry_lookup(
140 ipc_space_t space,
141 mach_port_name_t name)
142 {
143 mach_port_index_t index;
144 ipc_entry_t entry;
145
146 assert(space->is_active);
147
148
149 index = MACH_PORT_INDEX(name);
150 /*
151 * If space is fast, we assume no splay tree and name within table
152 * bounds, but still check generation numbers (if enabled) and
153 * look for null entries.
154 */
155 if (is_fast_space(space)) {
156 entry = &space->is_table[index];
157 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
158 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
159 entry = IE_NULL;
160 }
161 else
162 if (index < space->is_table_size) {
163 entry = &space->is_table[index];
164 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
165 if (entry->ie_bits & IE_BITS_COLLISION) {
166 assert(space->is_tree_total > 0);
167 goto tree_lookup;
168 } else
169 entry = IE_NULL;
170 else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
171 entry = IE_NULL;
172 } else if (space->is_tree_total == 0)
173 entry = IE_NULL;
174 else {
175 tree_lookup:
176 entry = (ipc_entry_t)
177 ipc_splay_tree_lookup(&space->is_tree, name);
178 /* with sub-space introduction, an entry may appear in */
179 /* the splay tree and yet not show rights for this subspace */
180 if(entry != IE_NULL) {
181 if(!(IE_BITS_TYPE(entry->ie_bits)))
182 entry = IE_NULL;
183 }
184 }
185
186 assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
187 return entry;
188 }
189
190 /*
191 * Routine: ipc_entry_get
192 * Purpose:
193 * Tries to allocate an entry out of the space.
194 * Conditions:
195 * The space is write-locked and active throughout.
196 * An object may be locked. Will not allocate memory.
197 * Returns:
198 * KERN_SUCCESS A free entry was found.
199 * KERN_NO_SPACE No entry allocated.
200 */
201
202 kern_return_t
203 ipc_entry_get(
204 ipc_space_t space,
205 mach_port_name_t *namep,
206 ipc_entry_t *entryp)
207 {
208 ipc_entry_t table;
209 mach_port_index_t first_free;
210 ipc_entry_t free_entry;
211
212 assert(space->is_active);
213
214 {
215 table = space->is_table;
216 first_free = table->ie_next;
217
218 if (first_free == 0)
219 return KERN_NO_SPACE;
220
221 free_entry = &table[first_free];
222 table->ie_next = free_entry->ie_next;
223 }
224
225 /*
226 * Initialize the new entry. We need only
227 * increment the generation number and clear ie_request.
228 */
229 {
230 mach_port_name_t new_name;
231 mach_port_gen_t gen;
232
233 gen = IE_BITS_NEW_GEN(free_entry->ie_bits);
234 free_entry->ie_bits = gen;
235 free_entry->ie_request = 0;
236
237 /*
238 * The new name can't be MACH_PORT_NULL because index
239 * is non-zero. It can't be MACH_PORT_DEAD because
240 * the table isn't allowed to grow big enough.
241 * (See comment in ipc/ipc_table.h.)
242 */
243 new_name = MACH_PORT_MAKE(first_free, gen);
244 assert(MACH_PORT_VALID(new_name));
245 *namep = new_name;
246 }
247
248 assert(free_entry->ie_object == IO_NULL);
249
250 *entryp = free_entry;
251 return KERN_SUCCESS;
252 }
253
254 /*
255 * Routine: ipc_entry_alloc
256 * Purpose:
257 * Allocate an entry out of the space.
258 * Conditions:
259 * The space is not locked before, but it is write-locked after
260 * if the call is successful. May allocate memory.
261 * Returns:
262 * KERN_SUCCESS An entry was allocated.
263 * KERN_INVALID_TASK The space is dead.
264 * KERN_NO_SPACE No room for an entry in the space.
265 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
266 */
267
268 kern_return_t
269 ipc_entry_alloc(
270 ipc_space_t space,
271 mach_port_name_t *namep,
272 ipc_entry_t *entryp)
273 {
274 kern_return_t kr;
275
276 is_write_lock(space);
277
278 for (;;) {
279 if (!space->is_active) {
280 is_write_unlock(space);
281 return KERN_INVALID_TASK;
282 }
283
284 kr = ipc_entry_get(space, namep, entryp);
285 if (kr == KERN_SUCCESS)
286 return kr;
287
288 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
289 if (kr != KERN_SUCCESS)
290 return kr; /* space is unlocked */
291 }
292 }
293
294 /*
295 * Routine: ipc_entry_alloc_name
296 * Purpose:
297 * Allocates/finds an entry with a specific name.
298 * If an existing entry is returned, its type will be nonzero.
299 * Conditions:
300 * The space is not locked before, but it is write-locked after
301 * if the call is successful. May allocate memory.
302 * Returns:
303 * KERN_SUCCESS Found existing entry with same name.
304 * KERN_SUCCESS Allocated a new entry.
305 * KERN_INVALID_TASK The space is dead.
306 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
307 */
308
309 kern_return_t
310 ipc_entry_alloc_name(
311 ipc_space_t space,
312 mach_port_name_t name,
313 ipc_entry_t *entryp)
314 {
315 mach_port_index_t index = MACH_PORT_INDEX(name);
316 mach_port_gen_t gen = MACH_PORT_GEN(name);
317 ipc_tree_entry_t tentry = ITE_NULL;
318
319 assert(MACH_PORT_VALID(name));
320
321
322 is_write_lock(space);
323
324 for (;;) {
325 ipc_entry_t entry;
326 ipc_tree_entry_t tentry2;
327 ipc_table_size_t its;
328
329 if (!space->is_active) {
330 is_write_unlock(space);
331 if (tentry) ite_free(tentry);
332 return KERN_INVALID_TASK;
333 }
334
335 /*
336 * If we are under the table cutoff,
337 * there are usually four cases:
338 * 1) The entry is reserved (index 0)
339 * 2) The entry is inuse, for the same name
340 * 3) The entry is inuse, for a different name
341 * 4) The entry is free
342 * For a task with a "fast" IPC space, we disallow
343 * cases 1) and 3), because ports cannot be renamed.
344 */
345 if (index < space->is_table_size) {
346 ipc_entry_t table = space->is_table;
347
348 entry = &table[index];
349
350 if (index == 0) {
351 assert(!IE_BITS_TYPE(entry->ie_bits));
352 assert(!IE_BITS_GEN(entry->ie_bits));
353 } else if (IE_BITS_TYPE(entry->ie_bits)) {
354 if (IE_BITS_GEN(entry->ie_bits) == gen) {
355 *entryp = entry;
356 assert(!tentry);
357 return KERN_SUCCESS;
358 }
359 } else {
360 mach_port_index_t free_index, next_index;
361
362 /*
363 * Rip the entry out of the free list.
364 */
365
366 for (free_index = 0;
367 (next_index = table[free_index].ie_next)
368 != index;
369 free_index = next_index)
370 continue;
371
372 table[free_index].ie_next =
373 table[next_index].ie_next;
374
375 entry->ie_bits = gen;
376 entry->ie_request = 0;
377 *entryp = entry;
378
379 assert(entry->ie_object == IO_NULL);
380 if (is_fast_space(space))
381 assert(!tentry);
382 else if (tentry)
383 ite_free(tentry);
384 return KERN_SUCCESS;
385 }
386 }
387
388 /*
389 * In a fast space, ipc_entry_alloc_name may be
390 * used only to add a right to a port name already
391 * known in this space.
392 */
393 if (is_fast_space(space)) {
394 is_write_unlock(space);
395 assert(!tentry);
396 return KERN_FAILURE;
397 }
398
399 /*
400 * Before trying to allocate any memory,
401 * check if the entry already exists in the tree.
402 * This avoids spurious resource errors.
403 * The splay tree makes a subsequent lookup/insert
404 * of the same name cheap, so this costs little.
405 */
406
407 if ((space->is_tree_total > 0) &&
408 ((tentry2 = ipc_splay_tree_lookup(&space->is_tree, name))
409 != ITE_NULL)) {
410 assert(tentry2->ite_space == space);
411 assert(IE_BITS_TYPE(tentry2->ite_bits));
412
413 *entryp = &tentry2->ite_entry;
414 if (tentry) ite_free(tentry);
415 return KERN_SUCCESS;
416 }
417
418 its = space->is_table_next;
419
420 /*
421 * Check if the table should be grown.
422 *
423 * Note that if space->is_table_size == its->its_size,
424 * then we won't ever try to grow the table.
425 *
426 * Note that we are optimistically assuming that name
427 * doesn't collide with any existing names. (So if
428 * it were entered into the tree, is_tree_small would
429 * be incremented.) This is OK, because even in that
430 * case, we don't lose memory by growing the table.
431 */
432 if ((space->is_table_size <= index) &&
433 (index < its->its_size) &&
434 (((its->its_size - space->is_table_size) *
435 sizeof(struct ipc_entry)) <
436 ((space->is_tree_small + 1) *
437 sizeof(struct ipc_tree_entry)))) {
438 kern_return_t kr;
439
440 /*
441 * Can save space by growing the table.
442 * Because the space will be unlocked,
443 * we must restart.
444 */
445
446 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
447 assert(kr != KERN_NO_SPACE);
448 if (kr != KERN_SUCCESS) {
449 /* space is unlocked */
450 if (tentry) ite_free(tentry);
451 return kr;
452 }
453
454 continue;
455 }
456
457 /*
458 * If a splay-tree entry was allocated previously,
459 * go ahead and insert it into the tree.
460 */
461
462 if (tentry != ITE_NULL) {
463
464 space->is_tree_total++;
465
466 if (index < space->is_table_size) {
467 entry = &space->is_table[index];
468 entry->ie_bits |= IE_BITS_COLLISION;
469 } else if ((index < its->its_size) &&
470 !ipc_entry_tree_collision(space, name))
471 space->is_tree_small++;
472
473 ipc_splay_tree_insert(&space->is_tree, name, tentry);
474 tentry->ite_bits = 0;
475 tentry->ite_request = 0;
476 tentry->ite_object = IO_NULL;
477 tentry->ite_space = space;
478 *entryp = &tentry->ite_entry;
479 return KERN_SUCCESS;
480 }
481
482 /*
483 * Allocate a tree entry and try again.
484 */
485
486 is_write_unlock(space);
487 tentry = ite_alloc();
488 if (tentry == ITE_NULL)
489 return KERN_RESOURCE_SHORTAGE;
490 is_write_lock(space);
491 }
492 }
493
494 /*
495 * Routine: ipc_entry_dealloc
496 * Purpose:
497 * Deallocates an entry from a space.
498 * Conditions:
499 * The space must be write-locked throughout.
500 * The space must be active.
501 */
502
503 void
504 ipc_entry_dealloc(
505 ipc_space_t space,
506 mach_port_name_t name,
507 ipc_entry_t entry)
508 {
509 ipc_entry_t table;
510 ipc_entry_num_t size;
511 mach_port_index_t index;
512
513 assert(space->is_active);
514 assert(entry->ie_object == IO_NULL);
515 assert(entry->ie_request == 0);
516
517 index = MACH_PORT_INDEX(name);
518 table = space->is_table;
519 size = space->is_table_size;
520
521 if (is_fast_space(space)) {
522 assert(index < size);
523 assert(entry == &table[index]);
524 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
525 assert(!(entry->ie_bits & IE_BITS_COLLISION));
526 entry->ie_bits &= IE_BITS_GEN_MASK;
527 entry->ie_next = table->ie_next;
528 table->ie_next = index;
529 return;
530 }
531
532
533 if ((index < size) && (entry == &table[index])) {
534 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
535
536 if (entry->ie_bits & IE_BITS_COLLISION) {
537 struct ipc_splay_tree small, collisions;
538 ipc_tree_entry_t tentry;
539 mach_port_name_t tname;
540 boolean_t pick;
541 ipc_entry_bits_t bits;
542 ipc_object_t obj;
543
544 /* must move an entry from tree to table */
545
546 ipc_splay_tree_split(&space->is_tree,
547 MACH_PORT_MAKE(index+1, 0),
548 &collisions);
549 ipc_splay_tree_split(&collisions,
550 MACH_PORT_MAKE(index, 0),
551 &small);
552
553 pick = ipc_splay_tree_pick(&collisions,
554 &tname, &tentry);
555 assert(pick);
556 assert(MACH_PORT_INDEX(tname) == index);
557
558 entry->ie_object = obj = tentry->ite_object;
559 entry->ie_bits = tentry->ite_bits|MACH_PORT_GEN(tname);
560 entry->ie_request = tentry->ite_request;
561
562 assert(tentry->ite_space == space);
563
564 if (IE_BITS_TYPE(tentry->ite_bits)==MACH_PORT_TYPE_SEND) {
565 ipc_hash_global_delete(space, obj,
566 tname, tentry);
567 ipc_hash_local_insert(space, obj,
568 index, entry);
569 }
570
571 ipc_splay_tree_delete(&collisions, tname, tentry);
572
573 assert(space->is_tree_total > 0);
574 space->is_tree_total--;
575
576 /* check if collision bit should still be on */
577
578 pick = ipc_splay_tree_pick(&collisions,
579 &tname, &tentry);
580 if (pick) {
581 entry->ie_bits |= IE_BITS_COLLISION;
582 ipc_splay_tree_join(&space->is_tree,
583 &collisions);
584 }
585
586 ipc_splay_tree_join(&space->is_tree, &small);
587
588 } else {
589 entry->ie_bits &= IE_BITS_GEN_MASK;
590 entry->ie_next = table->ie_next;
591 table->ie_next = index;
592 }
593
594 } else {
595 ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
596
597 assert(tentry->ite_space == space);
598
599 ipc_splay_tree_delete(&space->is_tree, name, tentry);
600
601 assert(space->is_tree_total > 0);
602 space->is_tree_total--;
603
604 if (index < size) {
605 ipc_entry_t ientry = &table[index];
606
607 assert(ientry->ie_bits & IE_BITS_COLLISION);
608
609 if (!ipc_entry_tree_collision(space, name))
610 ientry->ie_bits &= ~IE_BITS_COLLISION;
611
612 } else if ((index < space->is_table_next->its_size) &&
613 !ipc_entry_tree_collision(space, name)) {
614
615 assert(space->is_tree_small > 0);
616
617 space->is_tree_small--;
618 }
619 }
620 }
621
622 /*
623 * Routine: ipc_entry_grow_table
624 * Purpose:
625 * Grows the table in a space.
626 * Conditions:
627 * The space must be write-locked and active before.
628 * If successful, it is also returned locked.
629 * Allocates memory.
630 * Returns:
631 * KERN_SUCCESS Grew the table.
632 * KERN_SUCCESS Somebody else grew the table.
633 * KERN_SUCCESS The space died.
634 * KERN_NO_SPACE Table has maximum size already.
635 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
636 */
637
638 kern_return_t
639 ipc_entry_grow_table(
640 ipc_space_t space,
641 int target_size)
642 {
643 ipc_entry_num_t osize, size, nsize, psize;
644
645 do {
646 boolean_t reallocated=FALSE;
647
648 ipc_entry_t otable, table;
649 ipc_table_size_t oits, its, nits;
650 mach_port_index_t i, free_index;
651
652 assert(space->is_active);
653
654 if (space->is_growing) {
655 /*
656 * Somebody else is growing the table.
657 * We just wait for them to finish.
658 */
659
660 is_write_sleep(space);
661 return KERN_SUCCESS;
662 }
663
664 otable = space->is_table;
665
666 its = space->is_table_next;
667 size = its->its_size;
668
669 /*
670 * Since is_table_next points to the next natural size
671 * we can identify the current size entry.
672 */
673 oits = its - 1;
674 osize = oits->its_size;
675
676 /*
677 * If there is no target size, then the new size is simply
678 * specified by is_table_next. If there is a target
679 * size, then search for the next entry.
680 */
681 if (target_size != ITS_SIZE_NONE) {
682 if (target_size <= osize) {
683 is_write_unlock(space);
684 return KERN_SUCCESS;
685 }
686
687 psize = osize;
688 while ((psize != size) && (target_size > size)) {
689 psize = size;
690 its++;
691 size = its->its_size;
692 }
693 if (psize == size) {
694 is_write_unlock(space);
695 return KERN_NO_SPACE;
696 }
697 }
698
699 if (osize == size) {
700 is_write_unlock(space);
701 return KERN_NO_SPACE;
702 }
703
704 nits = its + 1;
705 nsize = nits->its_size;
706
707 assert((osize < size) && (size <= nsize));
708
709 /*
710 * OK, we'll attempt to grow the table.
711 * The realloc requires that the old table
712 * remain in existence.
713 */
714
715 space->is_growing = TRUE;
716 is_write_unlock(space);
717
718 if (it_entries_reallocable(oits)) {
719 table = it_entries_realloc(oits, otable, its);
720 reallocated=TRUE;
721 }
722 else {
723 table = it_entries_alloc(its);
724 }
725
726 is_write_lock(space);
727 space->is_growing = FALSE;
728
729 /*
730 * We need to do a wakeup on the space,
731 * to rouse waiting threads. We defer
732 * this until the space is unlocked,
733 * because we don't want them to spin.
734 */
735
736 if (table == IE_NULL) {
737 is_write_unlock(space);
738 thread_wakeup((event_t) space);
739 return KERN_RESOURCE_SHORTAGE;
740 }
741
742 if (!space->is_active) {
743 /*
744 * The space died while it was unlocked.
745 */
746
747 is_write_unlock(space);
748 thread_wakeup((event_t) space);
749 it_entries_free(its, table);
750 is_write_lock(space);
751 return KERN_SUCCESS;
752 }
753
754 assert(space->is_table == otable);
755 assert((space->is_table_next == its) ||
756 (target_size != ITS_SIZE_NONE));
757 assert(space->is_table_size == osize);
758
759 space->is_table = table;
760 space->is_table_size = size;
761 space->is_table_next = nits;
762
763 /*
764 * If we did a realloc, it remapped the data.
765 * Otherwise we copy by hand first. Then we have
766 * to zero the new part and the old local hash
767 * values.
768 */
769 if (!reallocated)
770 (void) memcpy((void *) table, (const void *) otable,
771 osize * (sizeof(struct ipc_entry)));
772
773 for (i = 0; i < osize; i++)
774 table[i].ie_index = 0;
775
776 (void) memset((void *) (table + osize) , 0,
777 ((size - osize) * (sizeof(struct ipc_entry))));
778
779 /*
780 * Put old entries into the reverse hash table.
781 */
782 for (i = 0; i < osize; i++) {
783 ipc_entry_t entry = &table[i];
784
785 if (IE_BITS_TYPE(entry->ie_bits)==MACH_PORT_TYPE_SEND) {
786 ipc_hash_local_insert(space, entry->ie_object,
787 i, entry);
788 }
789 }
790
791 /*
792 * If there are entries in the splay tree,
793 * then we have work to do:
794 * 1) transfer entries to the table
795 * 2) update is_tree_small
796 */
797 assert(!is_fast_space(space) || space->is_tree_total == 0);
798 if (space->is_tree_total > 0) {
799 mach_port_index_t index;
800 boolean_t delete;
801 struct ipc_splay_tree ignore;
802 struct ipc_splay_tree move;
803 struct ipc_splay_tree small;
804 ipc_entry_num_t nosmall;
805 ipc_tree_entry_t tentry;
806
807 /*
808 * The splay tree divides into four regions,
809 * based on the index of the entries:
810 * 1) 0 <= index < osize
811 * 2) osize <= index < size
812 * 3) size <= index < nsize
813 * 4) nsize <= index
814 *
815 * Entries in the first part are ignored.
816 * Entries in the second part, that don't
817 * collide, are moved into the table.
818 * Entries in the third part, that don't
819 * collide, are counted for is_tree_small.
820 * Entries in the fourth part are ignored.
821 */
822
823 ipc_splay_tree_split(&space->is_tree,
824 MACH_PORT_MAKE(nsize, 0),
825 &small);
826 ipc_splay_tree_split(&small,
827 MACH_PORT_MAKE(size, 0),
828 &move);
829 ipc_splay_tree_split(&move,
830 MACH_PORT_MAKE(osize, 0),
831 &ignore);
832
833 /* move entries into the table */
834
835 for (tentry = ipc_splay_traverse_start(&move);
836 tentry != ITE_NULL;
837 tentry = ipc_splay_traverse_next(&move, delete)) {
838
839 mach_port_name_t name;
840 mach_port_gen_t gen;
841 mach_port_type_t type;
842 ipc_entry_bits_t bits;
843 ipc_object_t obj;
844 ipc_entry_t entry;
845
846 name = tentry->ite_name;
847 gen = MACH_PORT_GEN(name);
848 index = MACH_PORT_INDEX(name);
849
850 assert(tentry->ite_space == space);
851 assert((osize <= index) && (index < size));
852
853 entry = &table[index];
854 bits = entry->ie_bits;
855 if (IE_BITS_TYPE(bits)) {
856 assert(IE_BITS_GEN(bits) != gen);
857 entry->ie_bits |= IE_BITS_COLLISION;
858 delete = FALSE;
859 continue;
860 }
861
862 bits = tentry->ite_bits;
863 type = IE_BITS_TYPE(bits);
864 assert(type != MACH_PORT_TYPE_NONE);
865
866 entry->ie_bits = bits | gen;
867 entry->ie_request = tentry->ite_request;
868 entry->ie_object = obj = tentry->ite_object;
869
870 if (type == MACH_PORT_TYPE_SEND) {
871 ipc_hash_global_delete(space, obj,
872 name, tentry);
873 ipc_hash_local_insert(space, obj,
874 index, entry);
875 }
876 space->is_tree_total--;
877 delete = TRUE;
878 }
879 ipc_splay_traverse_finish(&move);
880
881 /* count entries for is_tree_small */
882
883 nosmall = 0; index = 0;
884 for (tentry = ipc_splay_traverse_start(&small);
885 tentry != ITE_NULL;
886 tentry = ipc_splay_traverse_next(&small, FALSE)) {
887 mach_port_index_t nindex;
888
889 nindex = MACH_PORT_INDEX(tentry->ite_name);
890
891 if (nindex != index) {
892 nosmall++;
893 index = nindex;
894 }
895 }
896 ipc_splay_traverse_finish(&small);
897
898 assert(nosmall <= (nsize - size));
899 assert(nosmall <= space->is_tree_total);
900 space->is_tree_small = nosmall;
901
902 /* put the splay tree back together */
903
904 ipc_splay_tree_join(&space->is_tree, &small);
905 ipc_splay_tree_join(&space->is_tree, &move);
906 ipc_splay_tree_join(&space->is_tree, &ignore);
907 }
908
909 /*
910 * Add entries in the new part which still aren't used
911 * to the free list. Add them in reverse order,
912 * and set the generation number to -1, so that
913 * early allocations produce "natural" names.
914 */
915
916 free_index = table[0].ie_next;
917 for (i = size-1; i >= osize; --i) {
918 ipc_entry_t entry = &table[i];
919
920 if (entry->ie_bits == 0) {
921 entry->ie_bits = IE_BITS_GEN_MASK;
922 entry->ie_next = free_index;
923 free_index = i;
924 }
925 }
926 table[0].ie_next = free_index;
927
928 /*
929 * Now we need to free the old table.
930 * If the space dies or grows while unlocked,
931 * then we can quit here.
932 */
933 is_write_unlock(space);
934 thread_wakeup((event_t) space);
935
936 it_entries_free(oits, otable);
937 is_write_lock(space);
938 if (!space->is_active || (space->is_table_next != nits))
939 return KERN_SUCCESS;
940
941 /*
942 * We might have moved enough entries from
943 * the splay tree into the table that
944 * the table can be profitably grown again.
945 *
946 * Note that if size == nsize, then
947 * space->is_tree_small == 0.
948 */
949 } while ((space->is_tree_small > 0) &&
950 (((nsize - size) * sizeof(struct ipc_entry)) <
951 (space->is_tree_small * sizeof(struct ipc_tree_entry))));
952
953 return KERN_SUCCESS;
954 }
955
956
957 #if MACH_KDB
958 #include <ddb/db_output.h>
959 #define printf kdbprintf
960
961 ipc_entry_t db_ipc_object_by_name(
962 task_t task,
963 mach_port_name_t name);
964
965
966 ipc_entry_t
967 db_ipc_object_by_name(
968 task_t task,
969 mach_port_name_t name)
970 {
971 ipc_space_t space = task->itk_space;
972 ipc_entry_t entry;
973
974
975 entry = ipc_entry_lookup(space, name);
976 if(entry != IE_NULL) {
977 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
978 task, name, entry->ie_object);
979 return (ipc_entry_t) entry->ie_object;
980 }
981 return entry;
982 }
983 #endif /* MACH_KDB */