]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_entry.c
7688041005c6df02919b26045beb54017c77084f
[apple/xnu.git] / osfmk / ipc / ipc_entry.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: ipc/ipc_entry.c
62 * Author: Rich Draves
63 * Date: 1989
64 *
65 * Primitive functions to manipulate translation entries.
66 */
67
68 #include <mach_kdb.h>
69 #include <mach_debug.h>
70
71 #include <mach/kern_return.h>
72 #include <mach/port.h>
73 #include <kern/assert.h>
74 #include <kern/sched_prim.h>
75 #include <kern/zalloc.h>
76 #include <kern/misc_protos.h>
77 #if MACH_KDB
78 #include <kern/task.h>
79 #endif
80 #include <ipc/port.h>
81 #include <ipc/ipc_entry.h>
82 #include <ipc/ipc_space.h>
83 #include <ipc/ipc_splay.h>
84 #include <ipc/ipc_object.h>
85 #include <ipc/ipc_hash.h>
86 #include <ipc/ipc_table.h>
87 #include <ipc/ipc_port.h>
88 #include <string.h>
89
90 zone_t ipc_tree_entry_zone;
91
92
93
94 /*
95 * Forward declarations
96 */
97 boolean_t ipc_entry_tree_collision(
98 ipc_space_t space,
99 mach_port_name_t name);
100
101 /*
102 * Routine: ipc_entry_tree_collision
103 * Purpose:
104 * Checks if "name" collides with an allocated name
105 * in the space's tree. That is, returns TRUE
106 * if the splay tree contains a name with the same
107 * index as "name".
108 * Conditions:
109 * The space is locked (read or write) and active.
110 */
111
112 boolean_t
113 ipc_entry_tree_collision(
114 ipc_space_t space,
115 mach_port_name_t name)
116 {
117 mach_port_index_t index;
118 mach_port_name_t lower, upper;
119
120 assert(space->is_active);
121
122 /*
123 * Check if we collide with the next smaller name
124 * or the next larger name.
125 */
126
127 ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
128
129 index = MACH_PORT_INDEX(name);
130 return (((lower != (mach_port_name_t)~0) &&
131 (MACH_PORT_INDEX(lower) == index)) ||
132 ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
133 }
134
135 /*
136 * Routine: ipc_entry_lookup
137 * Purpose:
138 * Searches for an entry, given its name.
139 * Conditions:
140 * The space must be read or write locked throughout.
141 * The space must be active.
142 */
143
144 ipc_entry_t
145 ipc_entry_lookup(
146 ipc_space_t space,
147 mach_port_name_t name)
148 {
149 mach_port_index_t index;
150 ipc_entry_t entry;
151
152 assert(space->is_active);
153
154
155 index = MACH_PORT_INDEX(name);
156 /*
157 * If space is fast, we assume no splay tree and name within table
158 * bounds, but still check generation numbers (if enabled) and
159 * look for null entries.
160 */
161 if (is_fast_space(space)) {
162 entry = &space->is_table[index];
163 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
164 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
165 entry = IE_NULL;
166 }
167 else
168 if (index < space->is_table_size) {
169 entry = &space->is_table[index];
170 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
171 if (entry->ie_bits & IE_BITS_COLLISION) {
172 assert(space->is_tree_total > 0);
173 goto tree_lookup;
174 } else
175 entry = IE_NULL;
176 else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
177 entry = IE_NULL;
178 } else if (space->is_tree_total == 0)
179 entry = IE_NULL;
180 else {
181 tree_lookup:
182 entry = (ipc_entry_t)
183 ipc_splay_tree_lookup(&space->is_tree, name);
184 /* with sub-space introduction, an entry may appear in */
185 /* the splay tree and yet not show rights for this subspace */
186 if(entry != IE_NULL) {
187 if(!(IE_BITS_TYPE(entry->ie_bits)))
188 entry = IE_NULL;
189 }
190 }
191
192 assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
193 return entry;
194 }
195
196 /*
197 * Routine: ipc_entry_get
198 * Purpose:
199 * Tries to allocate an entry out of the space.
200 * Conditions:
201 * The space is write-locked and active throughout.
202 * An object may be locked. Will not allocate memory.
203 * Returns:
204 * KERN_SUCCESS A free entry was found.
205 * KERN_NO_SPACE No entry allocated.
206 */
207
208 kern_return_t
209 ipc_entry_get(
210 ipc_space_t space,
211 mach_port_name_t *namep,
212 ipc_entry_t *entryp)
213 {
214 ipc_entry_t table;
215 mach_port_index_t first_free;
216 ipc_entry_t free_entry;
217
218 assert(space->is_active);
219
220 {
221 table = space->is_table;
222 first_free = table->ie_next;
223
224 if (first_free == 0)
225 return KERN_NO_SPACE;
226
227 free_entry = &table[first_free];
228 table->ie_next = free_entry->ie_next;
229 }
230
231 /*
232 * Initialize the new entry. We need only
233 * increment the generation number and clear ie_request.
234 */
235 {
236 mach_port_name_t new_name;
237 mach_port_gen_t gen;
238
239 gen = IE_BITS_NEW_GEN(free_entry->ie_bits);
240 free_entry->ie_bits = gen;
241 free_entry->ie_request = 0;
242
243 /*
244 * The new name can't be MACH_PORT_NULL because index
245 * is non-zero. It can't be MACH_PORT_DEAD because
246 * the table isn't allowed to grow big enough.
247 * (See comment in ipc/ipc_table.h.)
248 */
249 new_name = MACH_PORT_MAKE(first_free, gen);
250 assert(MACH_PORT_VALID(new_name));
251 *namep = new_name;
252 }
253
254 assert(free_entry->ie_object == IO_NULL);
255
256 *entryp = free_entry;
257 return KERN_SUCCESS;
258 }
259
260 /*
261 * Routine: ipc_entry_alloc
262 * Purpose:
263 * Allocate an entry out of the space.
264 * Conditions:
265 * The space is not locked before, but it is write-locked after
266 * if the call is successful. May allocate memory.
267 * Returns:
268 * KERN_SUCCESS An entry was allocated.
269 * KERN_INVALID_TASK The space is dead.
270 * KERN_NO_SPACE No room for an entry in the space.
271 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
272 */
273
274 kern_return_t
275 ipc_entry_alloc(
276 ipc_space_t space,
277 mach_port_name_t *namep,
278 ipc_entry_t *entryp)
279 {
280 kern_return_t kr;
281
282 is_write_lock(space);
283
284 for (;;) {
285 if (!space->is_active) {
286 is_write_unlock(space);
287 return KERN_INVALID_TASK;
288 }
289
290 kr = ipc_entry_get(space, namep, entryp);
291 if (kr == KERN_SUCCESS)
292 return kr;
293
294 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
295 if (kr != KERN_SUCCESS)
296 return kr; /* space is unlocked */
297 }
298 }
299
300 /*
301 * Routine: ipc_entry_alloc_name
302 * Purpose:
303 * Allocates/finds an entry with a specific name.
304 * If an existing entry is returned, its type will be nonzero.
305 * Conditions:
306 * The space is not locked before, but it is write-locked after
307 * if the call is successful. May allocate memory.
308 * Returns:
309 * KERN_SUCCESS Found existing entry with same name.
310 * KERN_SUCCESS Allocated a new entry.
311 * KERN_INVALID_TASK The space is dead.
312 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
313 */
314
315 kern_return_t
316 ipc_entry_alloc_name(
317 ipc_space_t space,
318 mach_port_name_t name,
319 ipc_entry_t *entryp)
320 {
321 mach_port_index_t index = MACH_PORT_INDEX(name);
322 mach_port_gen_t gen = MACH_PORT_GEN(name);
323 ipc_tree_entry_t tentry = ITE_NULL;
324
325 assert(MACH_PORT_VALID(name));
326
327
328 is_write_lock(space);
329
330 for (;;) {
331 ipc_entry_t entry;
332 ipc_tree_entry_t tentry2;
333 ipc_table_size_t its;
334
335 if (!space->is_active) {
336 is_write_unlock(space);
337 if (tentry) ite_free(tentry);
338 return KERN_INVALID_TASK;
339 }
340
341 /*
342 * If we are under the table cutoff,
343 * there are usually four cases:
344 * 1) The entry is reserved (index 0)
345 * 2) The entry is inuse, for the same name
346 * 3) The entry is inuse, for a different name
347 * 4) The entry is free
348 * For a task with a "fast" IPC space, we disallow
349 * cases 1) and 3), because ports cannot be renamed.
350 */
351 if (index < space->is_table_size) {
352 ipc_entry_t table = space->is_table;
353
354 entry = &table[index];
355
356 if (index == 0) {
357 assert(!IE_BITS_TYPE(entry->ie_bits));
358 assert(!IE_BITS_GEN(entry->ie_bits));
359 } else if (IE_BITS_TYPE(entry->ie_bits)) {
360 if (IE_BITS_GEN(entry->ie_bits) == gen) {
361 *entryp = entry;
362 assert(!tentry);
363 return KERN_SUCCESS;
364 }
365 } else {
366 mach_port_index_t free_index, next_index;
367
368 /*
369 * Rip the entry out of the free list.
370 */
371
372 for (free_index = 0;
373 (next_index = table[free_index].ie_next)
374 != index;
375 free_index = next_index)
376 continue;
377
378 table[free_index].ie_next =
379 table[next_index].ie_next;
380
381 entry->ie_bits = gen;
382 entry->ie_request = 0;
383 *entryp = entry;
384
385 assert(entry->ie_object == IO_NULL);
386 if (is_fast_space(space))
387 assert(!tentry);
388 else if (tentry)
389 ite_free(tentry);
390 return KERN_SUCCESS;
391 }
392 }
393
394 /*
395 * In a fast space, ipc_entry_alloc_name may be
396 * used only to add a right to a port name already
397 * known in this space.
398 */
399 if (is_fast_space(space)) {
400 is_write_unlock(space);
401 assert(!tentry);
402 return KERN_FAILURE;
403 }
404
405 /*
406 * Before trying to allocate any memory,
407 * check if the entry already exists in the tree.
408 * This avoids spurious resource errors.
409 * The splay tree makes a subsequent lookup/insert
410 * of the same name cheap, so this costs little.
411 */
412
413 if ((space->is_tree_total > 0) &&
414 ((tentry2 = ipc_splay_tree_lookup(&space->is_tree, name))
415 != ITE_NULL)) {
416 assert(tentry2->ite_space == space);
417 assert(IE_BITS_TYPE(tentry2->ite_bits));
418
419 *entryp = &tentry2->ite_entry;
420 if (tentry) ite_free(tentry);
421 return KERN_SUCCESS;
422 }
423
424 its = space->is_table_next;
425
426 /*
427 * Check if the table should be grown.
428 *
429 * Note that if space->is_table_size == its->its_size,
430 * then we won't ever try to grow the table.
431 *
432 * Note that we are optimistically assuming that name
433 * doesn't collide with any existing names. (So if
434 * it were entered into the tree, is_tree_small would
435 * be incremented.) This is OK, because even in that
436 * case, we don't lose memory by growing the table.
437 */
438 if ((space->is_table_size <= index) &&
439 (index < its->its_size) &&
440 (((its->its_size - space->is_table_size) *
441 sizeof(struct ipc_entry)) <
442 ((space->is_tree_small + 1) *
443 sizeof(struct ipc_tree_entry)))) {
444 kern_return_t kr;
445
446 /*
447 * Can save space by growing the table.
448 * Because the space will be unlocked,
449 * we must restart.
450 */
451
452 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
453 assert(kr != KERN_NO_SPACE);
454 if (kr != KERN_SUCCESS) {
455 /* space is unlocked */
456 if (tentry) ite_free(tentry);
457 return kr;
458 }
459
460 continue;
461 }
462
463 /*
464 * If a splay-tree entry was allocated previously,
465 * go ahead and insert it into the tree.
466 */
467
468 if (tentry != ITE_NULL) {
469
470 space->is_tree_total++;
471
472 if (index < space->is_table_size) {
473 entry = &space->is_table[index];
474 entry->ie_bits |= IE_BITS_COLLISION;
475 } else if ((index < its->its_size) &&
476 !ipc_entry_tree_collision(space, name))
477 space->is_tree_small++;
478
479 ipc_splay_tree_insert(&space->is_tree, name, tentry);
480 tentry->ite_bits = 0;
481 tentry->ite_request = 0;
482 tentry->ite_object = IO_NULL;
483 tentry->ite_space = space;
484 *entryp = &tentry->ite_entry;
485 return KERN_SUCCESS;
486 }
487
488 /*
489 * Allocate a tree entry and try again.
490 */
491
492 is_write_unlock(space);
493 tentry = ite_alloc();
494 if (tentry == ITE_NULL)
495 return KERN_RESOURCE_SHORTAGE;
496 is_write_lock(space);
497 }
498 }
499
500 /*
501 * Routine: ipc_entry_dealloc
502 * Purpose:
503 * Deallocates an entry from a space.
504 * Conditions:
505 * The space must be write-locked throughout.
506 * The space must be active.
507 */
508
509 void
510 ipc_entry_dealloc(
511 ipc_space_t space,
512 mach_port_name_t name,
513 ipc_entry_t entry)
514 {
515 ipc_entry_t table;
516 ipc_entry_num_t size;
517 mach_port_index_t index;
518
519 assert(space->is_active);
520 assert(entry->ie_object == IO_NULL);
521 assert(entry->ie_request == 0);
522
523 index = MACH_PORT_INDEX(name);
524 table = space->is_table;
525 size = space->is_table_size;
526
527 if (is_fast_space(space)) {
528 assert(index < size);
529 assert(entry == &table[index]);
530 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
531 assert(!(entry->ie_bits & IE_BITS_COLLISION));
532 entry->ie_bits &= IE_BITS_GEN_MASK;
533 entry->ie_next = table->ie_next;
534 table->ie_next = index;
535 return;
536 }
537
538
539 if ((index < size) && (entry == &table[index])) {
540 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
541
542 if (entry->ie_bits & IE_BITS_COLLISION) {
543 struct ipc_splay_tree small, collisions;
544 ipc_tree_entry_t tentry;
545 mach_port_name_t tname;
546 boolean_t pick;
547 ipc_object_t obj;
548
549 /* must move an entry from tree to table */
550
551 ipc_splay_tree_split(&space->is_tree,
552 MACH_PORT_MAKE(index+1, 0),
553 &collisions);
554 ipc_splay_tree_split(&collisions,
555 MACH_PORT_MAKE(index, 0),
556 &small);
557
558 pick = ipc_splay_tree_pick(&collisions,
559 &tname, &tentry);
560 assert(pick);
561 assert(MACH_PORT_INDEX(tname) == index);
562
563 entry->ie_object = obj = tentry->ite_object;
564 entry->ie_bits = tentry->ite_bits|MACH_PORT_GEN(tname);
565 entry->ie_request = tentry->ite_request;
566
567 assert(tentry->ite_space == space);
568
569 if (IE_BITS_TYPE(tentry->ite_bits)==MACH_PORT_TYPE_SEND) {
570 ipc_hash_global_delete(space, obj,
571 tname, tentry);
572 ipc_hash_local_insert(space, obj,
573 index, entry);
574 }
575
576 ipc_splay_tree_delete(&collisions, tname, tentry);
577
578 assert(space->is_tree_total > 0);
579 space->is_tree_total--;
580
581 /* check if collision bit should still be on */
582
583 pick = ipc_splay_tree_pick(&collisions,
584 &tname, &tentry);
585 if (pick) {
586 entry->ie_bits |= IE_BITS_COLLISION;
587 ipc_splay_tree_join(&space->is_tree,
588 &collisions);
589 }
590
591 ipc_splay_tree_join(&space->is_tree, &small);
592
593 } else {
594 entry->ie_bits &= IE_BITS_GEN_MASK;
595 entry->ie_next = table->ie_next;
596 table->ie_next = index;
597 }
598
599 } else {
600 ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
601
602 assert(tentry->ite_space == space);
603
604 ipc_splay_tree_delete(&space->is_tree, name, tentry);
605
606 assert(space->is_tree_total > 0);
607 space->is_tree_total--;
608
609 if (index < size) {
610 ipc_entry_t ientry = &table[index];
611
612 assert(ientry->ie_bits & IE_BITS_COLLISION);
613
614 if (!ipc_entry_tree_collision(space, name))
615 ientry->ie_bits &= ~IE_BITS_COLLISION;
616
617 } else if ((index < space->is_table_next->its_size) &&
618 !ipc_entry_tree_collision(space, name)) {
619
620 assert(space->is_tree_small > 0);
621
622 space->is_tree_small--;
623 }
624 }
625 }
626
627 /*
628 * Routine: ipc_entry_grow_table
629 * Purpose:
630 * Grows the table in a space.
631 * Conditions:
632 * The space must be write-locked and active before.
633 * If successful, it is also returned locked.
634 * Allocates memory.
635 * Returns:
636 * KERN_SUCCESS Grew the table.
637 * KERN_SUCCESS Somebody else grew the table.
638 * KERN_SUCCESS The space died.
639 * KERN_NO_SPACE Table has maximum size already.
640 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
641 */
642
643 kern_return_t
644 ipc_entry_grow_table(
645 ipc_space_t space,
646 ipc_table_elems_t target_size)
647 {
648 ipc_entry_num_t osize, size, nsize, psize;
649
650 do {
651 boolean_t reallocated=FALSE;
652
653 ipc_entry_t otable, table;
654 ipc_table_size_t oits, its, nits;
655 mach_port_index_t i, free_index;
656
657 assert(space->is_active);
658
659 if (space->is_growing) {
660 /*
661 * Somebody else is growing the table.
662 * We just wait for them to finish.
663 */
664
665 is_write_sleep(space);
666 return KERN_SUCCESS;
667 }
668
669 otable = space->is_table;
670
671 its = space->is_table_next;
672 size = its->its_size;
673
674 /*
675 * Since is_table_next points to the next natural size
676 * we can identify the current size entry.
677 */
678 oits = its - 1;
679 osize = oits->its_size;
680
681 /*
682 * If there is no target size, then the new size is simply
683 * specified by is_table_next. If there is a target
684 * size, then search for the next entry.
685 */
686 if (target_size != ITS_SIZE_NONE) {
687 if (target_size <= osize) {
688 is_write_unlock(space);
689 return KERN_SUCCESS;
690 }
691
692 psize = osize;
693 while ((psize != size) && (target_size > size)) {
694 psize = size;
695 its++;
696 size = its->its_size;
697 }
698 if (psize == size) {
699 is_write_unlock(space);
700 return KERN_NO_SPACE;
701 }
702 }
703
704 if (osize == size) {
705 is_write_unlock(space);
706 return KERN_NO_SPACE;
707 }
708
709 nits = its + 1;
710 nsize = nits->its_size;
711
712 assert((osize < size) && (size <= nsize));
713
714 /*
715 * OK, we'll attempt to grow the table.
716 * The realloc requires that the old table
717 * remain in existence.
718 */
719
720 space->is_growing = TRUE;
721 is_write_unlock(space);
722
723 if (it_entries_reallocable(oits)) {
724 table = it_entries_realloc(oits, otable, its);
725 reallocated=TRUE;
726 }
727 else {
728 table = it_entries_alloc(its);
729 }
730
731 is_write_lock(space);
732 space->is_growing = FALSE;
733
734 /*
735 * We need to do a wakeup on the space,
736 * to rouse waiting threads. We defer
737 * this until the space is unlocked,
738 * because we don't want them to spin.
739 */
740
741 if (table == IE_NULL) {
742 is_write_unlock(space);
743 thread_wakeup((event_t) space);
744 return KERN_RESOURCE_SHORTAGE;
745 }
746
747 if (!space->is_active) {
748 /*
749 * The space died while it was unlocked.
750 */
751
752 is_write_unlock(space);
753 thread_wakeup((event_t) space);
754 it_entries_free(its, table);
755 is_write_lock(space);
756 return KERN_SUCCESS;
757 }
758
759 assert(space->is_table == otable);
760 assert((space->is_table_next == its) ||
761 (target_size != ITS_SIZE_NONE));
762 assert(space->is_table_size == osize);
763
764 space->is_table = table;
765 space->is_table_size = size;
766 space->is_table_next = nits;
767
768 /*
769 * If we did a realloc, it remapped the data.
770 * Otherwise we copy by hand first. Then we have
771 * to zero the new part and the old local hash
772 * values.
773 */
774 if (!reallocated)
775 (void) memcpy((void *) table, (const void *) otable,
776 osize * (sizeof(struct ipc_entry)));
777
778 for (i = 0; i < osize; i++)
779 table[i].ie_index = 0;
780
781 (void) memset((void *) (table + osize) , 0,
782 ((size - osize) * (sizeof(struct ipc_entry))));
783
784 /*
785 * Put old entries into the reverse hash table.
786 */
787 for (i = 0; i < osize; i++) {
788 ipc_entry_t entry = &table[i];
789
790 if (IE_BITS_TYPE(entry->ie_bits)==MACH_PORT_TYPE_SEND) {
791 ipc_hash_local_insert(space, entry->ie_object,
792 i, entry);
793 }
794 }
795
796 /*
797 * If there are entries in the splay tree,
798 * then we have work to do:
799 * 1) transfer entries to the table
800 * 2) update is_tree_small
801 */
802 assert(!is_fast_space(space) || space->is_tree_total == 0);
803 if (space->is_tree_total > 0) {
804 mach_port_index_t index;
805 boolean_t delete;
806 struct ipc_splay_tree ignore;
807 struct ipc_splay_tree move;
808 struct ipc_splay_tree small;
809 ipc_entry_num_t nosmall;
810 ipc_tree_entry_t tentry;
811
812 /*
813 * The splay tree divides into four regions,
814 * based on the index of the entries:
815 * 1) 0 <= index < osize
816 * 2) osize <= index < size
817 * 3) size <= index < nsize
818 * 4) nsize <= index
819 *
820 * Entries in the first part are ignored.
821 * Entries in the second part, that don't
822 * collide, are moved into the table.
823 * Entries in the third part, that don't
824 * collide, are counted for is_tree_small.
825 * Entries in the fourth part are ignored.
826 */
827
828 ipc_splay_tree_split(&space->is_tree,
829 MACH_PORT_MAKE(nsize, 0),
830 &small);
831 ipc_splay_tree_split(&small,
832 MACH_PORT_MAKE(size, 0),
833 &move);
834 ipc_splay_tree_split(&move,
835 MACH_PORT_MAKE(osize, 0),
836 &ignore);
837
838 /* move entries into the table */
839
840 for (tentry = ipc_splay_traverse_start(&move);
841 tentry != ITE_NULL;
842 tentry = ipc_splay_traverse_next(&move, delete)) {
843
844 mach_port_name_t name;
845 mach_port_gen_t gen;
846 mach_port_type_t type;
847 ipc_entry_bits_t bits;
848 ipc_object_t obj;
849 ipc_entry_t entry;
850
851 name = tentry->ite_name;
852 gen = MACH_PORT_GEN(name);
853 index = MACH_PORT_INDEX(name);
854
855 assert(tentry->ite_space == space);
856 assert((osize <= index) && (index < size));
857
858 entry = &table[index];
859 bits = entry->ie_bits;
860 if (IE_BITS_TYPE(bits)) {
861 assert(IE_BITS_GEN(bits) != gen);
862 entry->ie_bits |= IE_BITS_COLLISION;
863 delete = FALSE;
864 continue;
865 }
866
867 bits = tentry->ite_bits;
868 type = IE_BITS_TYPE(bits);
869 assert(type != MACH_PORT_TYPE_NONE);
870
871 entry->ie_bits = bits | gen;
872 entry->ie_request = tentry->ite_request;
873 entry->ie_object = obj = tentry->ite_object;
874
875 if (type == MACH_PORT_TYPE_SEND) {
876 ipc_hash_global_delete(space, obj,
877 name, tentry);
878 ipc_hash_local_insert(space, obj,
879 index, entry);
880 }
881 space->is_tree_total--;
882 delete = TRUE;
883 }
884 ipc_splay_traverse_finish(&move);
885
886 /* count entries for is_tree_small */
887
888 nosmall = 0; index = 0;
889 for (tentry = ipc_splay_traverse_start(&small);
890 tentry != ITE_NULL;
891 tentry = ipc_splay_traverse_next(&small, FALSE)) {
892 mach_port_index_t nindex;
893
894 nindex = MACH_PORT_INDEX(tentry->ite_name);
895
896 if (nindex != index) {
897 nosmall++;
898 index = nindex;
899 }
900 }
901 ipc_splay_traverse_finish(&small);
902
903 assert(nosmall <= (nsize - size));
904 assert(nosmall <= space->is_tree_total);
905 space->is_tree_small = nosmall;
906
907 /* put the splay tree back together */
908
909 ipc_splay_tree_join(&space->is_tree, &small);
910 ipc_splay_tree_join(&space->is_tree, &move);
911 ipc_splay_tree_join(&space->is_tree, &ignore);
912 }
913
914 /*
915 * Add entries in the new part which still aren't used
916 * to the free list. Add them in reverse order,
917 * and set the generation number to -1, so that
918 * early allocations produce "natural" names.
919 */
920
921 free_index = table[0].ie_next;
922 for (i = size-1; i >= osize; --i) {
923 ipc_entry_t entry = &table[i];
924
925 if (entry->ie_bits == 0) {
926 entry->ie_bits = IE_BITS_GEN_MASK;
927 entry->ie_next = free_index;
928 free_index = i;
929 }
930 }
931 table[0].ie_next = free_index;
932
933 /*
934 * Now we need to free the old table.
935 * If the space dies or grows while unlocked,
936 * then we can quit here.
937 */
938 is_write_unlock(space);
939 thread_wakeup((event_t) space);
940
941 it_entries_free(oits, otable);
942 is_write_lock(space);
943 if (!space->is_active || (space->is_table_next != nits))
944 return KERN_SUCCESS;
945
946 /*
947 * We might have moved enough entries from
948 * the splay tree into the table that
949 * the table can be profitably grown again.
950 *
951 * Note that if size == nsize, then
952 * space->is_tree_small == 0.
953 */
954 } while ((space->is_tree_small > 0) &&
955 (((nsize - size) * sizeof(struct ipc_entry)) <
956 (space->is_tree_small * sizeof(struct ipc_tree_entry))));
957
958 return KERN_SUCCESS;
959 }
960
961
962 #if MACH_KDB
963 #include <ddb/db_output.h>
964 #define printf kdbprintf
965
966 ipc_entry_t db_ipc_object_by_name(
967 task_t task,
968 mach_port_name_t name);
969
970
971 ipc_entry_t
972 db_ipc_object_by_name(
973 task_t task,
974 mach_port_name_t name)
975 {
976 ipc_space_t space = task->itk_space;
977 ipc_entry_t entry;
978
979
980 entry = ipc_entry_lookup(space, name);
981 if(entry != IE_NULL) {
982 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
983 task, name, entry->ie_object);
984 return (ipc_entry_t) entry->ie_object;
985 }
986 return entry;
987 }
988 #endif /* MACH_KDB */