]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_entry.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_entry.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
8f6c56a5 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: ipc/ipc_entry.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Primitive functions to manipulate translation entries.
64 */
65
66#include <mach_kdb.h>
67#include <mach_debug.h>
68
69#include <mach/kern_return.h>
70#include <mach/port.h>
71#include <kern/assert.h>
72#include <kern/sched_prim.h>
73#include <kern/zalloc.h>
74#include <kern/misc_protos.h>
75#if MACH_KDB
76#include <kern/task.h>
77#endif
78#include <ipc/port.h>
79#include <ipc/ipc_entry.h>
80#include <ipc/ipc_space.h>
81#include <ipc/ipc_splay.h>
82#include <ipc/ipc_object.h>
83#include <ipc/ipc_hash.h>
84#include <ipc/ipc_table.h>
85#include <ipc/ipc_port.h>
86#include <string.h>
87
88zone_t ipc_tree_entry_zone;
89
90
91
92/*
93 * Forward declarations
94 */
95boolean_t ipc_entry_tree_collision(
96 ipc_space_t space,
97 mach_port_name_t name);
98
99/*
100 * Routine: ipc_entry_tree_collision
101 * Purpose:
102 * Checks if "name" collides with an allocated name
103 * in the space's tree. That is, returns TRUE
104 * if the splay tree contains a name with the same
105 * index as "name".
106 * Conditions:
107 * The space is locked (read or write) and active.
108 */
109
110boolean_t
111ipc_entry_tree_collision(
112 ipc_space_t space,
113 mach_port_name_t name)
114{
115 mach_port_index_t index;
116 mach_port_name_t lower, upper;
117
118 assert(space->is_active);
119
120 /*
121 * Check if we collide with the next smaller name
122 * or the next larger name.
123 */
124
125 ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
126
127 index = MACH_PORT_INDEX(name);
91447636
A
128 return (((lower != (mach_port_name_t)~0) &&
129 (MACH_PORT_INDEX(lower) == index)) ||
1c79356b
A
130 ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
131}
132
133/*
134 * Routine: ipc_entry_lookup
135 * Purpose:
136 * Searches for an entry, given its name.
137 * Conditions:
138 * The space must be read or write locked throughout.
139 * The space must be active.
140 */
141
142ipc_entry_t
143ipc_entry_lookup(
144 ipc_space_t space,
145 mach_port_name_t name)
146{
147 mach_port_index_t index;
148 ipc_entry_t entry;
149
150 assert(space->is_active);
151
152
153 index = MACH_PORT_INDEX(name);
154 /*
155 * If space is fast, we assume no splay tree and name within table
156 * bounds, but still check generation numbers (if enabled) and
157 * look for null entries.
158 */
159 if (is_fast_space(space)) {
160 entry = &space->is_table[index];
161 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) ||
162 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
163 entry = IE_NULL;
164 }
165 else
166 if (index < space->is_table_size) {
167 entry = &space->is_table[index];
168 if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
169 if (entry->ie_bits & IE_BITS_COLLISION) {
170 assert(space->is_tree_total > 0);
171 goto tree_lookup;
172 } else
173 entry = IE_NULL;
174 else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
175 entry = IE_NULL;
176 } else if (space->is_tree_total == 0)
177 entry = IE_NULL;
178 else {
179 tree_lookup:
180 entry = (ipc_entry_t)
181 ipc_splay_tree_lookup(&space->is_tree, name);
182 /* with sub-space introduction, an entry may appear in */
183 /* the splay tree and yet not show rights for this subspace */
184 if(entry != IE_NULL) {
185 if(!(IE_BITS_TYPE(entry->ie_bits)))
186 entry = IE_NULL;
187 }
188 }
189
190 assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
191 return entry;
192}
193
194/*
195 * Routine: ipc_entry_get
196 * Purpose:
197 * Tries to allocate an entry out of the space.
198 * Conditions:
199 * The space is write-locked and active throughout.
200 * An object may be locked. Will not allocate memory.
201 * Returns:
202 * KERN_SUCCESS A free entry was found.
203 * KERN_NO_SPACE No entry allocated.
204 */
205
206kern_return_t
207ipc_entry_get(
208 ipc_space_t space,
209 mach_port_name_t *namep,
210 ipc_entry_t *entryp)
211{
212 ipc_entry_t table;
213 mach_port_index_t first_free;
214 ipc_entry_t free_entry;
215
216 assert(space->is_active);
217
218 {
219 table = space->is_table;
220 first_free = table->ie_next;
221
222 if (first_free == 0)
223 return KERN_NO_SPACE;
224
225 free_entry = &table[first_free];
226 table->ie_next = free_entry->ie_next;
227 }
228
229 /*
230 * Initialize the new entry. We need only
231 * increment the generation number and clear ie_request.
232 */
233 {
234 mach_port_name_t new_name;
235 mach_port_gen_t gen;
236
237 gen = IE_BITS_NEW_GEN(free_entry->ie_bits);
238 free_entry->ie_bits = gen;
6d2010ae 239 free_entry->ie_request = IE_REQ_NONE;
1c79356b
A
240
241 /*
242 * The new name can't be MACH_PORT_NULL because index
243 * is non-zero. It can't be MACH_PORT_DEAD because
244 * the table isn't allowed to grow big enough.
245 * (See comment in ipc/ipc_table.h.)
246 */
247 new_name = MACH_PORT_MAKE(first_free, gen);
248 assert(MACH_PORT_VALID(new_name));
249 *namep = new_name;
250 }
251
252 assert(free_entry->ie_object == IO_NULL);
253
254 *entryp = free_entry;
255 return KERN_SUCCESS;
256}
257
258/*
259 * Routine: ipc_entry_alloc
260 * Purpose:
261 * Allocate an entry out of the space.
262 * Conditions:
263 * The space is not locked before, but it is write-locked after
264 * if the call is successful. May allocate memory.
265 * Returns:
266 * KERN_SUCCESS An entry was allocated.
267 * KERN_INVALID_TASK The space is dead.
268 * KERN_NO_SPACE No room for an entry in the space.
269 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
270 */
271
272kern_return_t
273ipc_entry_alloc(
274 ipc_space_t space,
275 mach_port_name_t *namep,
276 ipc_entry_t *entryp)
277{
278 kern_return_t kr;
279
280 is_write_lock(space);
281
282 for (;;) {
283 if (!space->is_active) {
284 is_write_unlock(space);
285 return KERN_INVALID_TASK;
286 }
287
288 kr = ipc_entry_get(space, namep, entryp);
289 if (kr == KERN_SUCCESS)
290 return kr;
291
292 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
293 if (kr != KERN_SUCCESS)
294 return kr; /* space is unlocked */
295 }
296}
297
298/*
299 * Routine: ipc_entry_alloc_name
300 * Purpose:
301 * Allocates/finds an entry with a specific name.
302 * If an existing entry is returned, its type will be nonzero.
303 * Conditions:
304 * The space is not locked before, but it is write-locked after
305 * if the call is successful. May allocate memory.
306 * Returns:
307 * KERN_SUCCESS Found existing entry with same name.
308 * KERN_SUCCESS Allocated a new entry.
309 * KERN_INVALID_TASK The space is dead.
310 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
311 */
312
313kern_return_t
314ipc_entry_alloc_name(
315 ipc_space_t space,
316 mach_port_name_t name,
317 ipc_entry_t *entryp)
318{
319 mach_port_index_t index = MACH_PORT_INDEX(name);
320 mach_port_gen_t gen = MACH_PORT_GEN(name);
321 ipc_tree_entry_t tentry = ITE_NULL;
322
323 assert(MACH_PORT_VALID(name));
324
325
326 is_write_lock(space);
327
328 for (;;) {
329 ipc_entry_t entry;
330 ipc_tree_entry_t tentry2;
331 ipc_table_size_t its;
332
333 if (!space->is_active) {
334 is_write_unlock(space);
335 if (tentry) ite_free(tentry);
336 return KERN_INVALID_TASK;
337 }
338
339 /*
340 * If we are under the table cutoff,
341 * there are usually four cases:
342 * 1) The entry is reserved (index 0)
343 * 2) The entry is inuse, for the same name
344 * 3) The entry is inuse, for a different name
345 * 4) The entry is free
346 * For a task with a "fast" IPC space, we disallow
347 * cases 1) and 3), because ports cannot be renamed.
348 */
349 if (index < space->is_table_size) {
350 ipc_entry_t table = space->is_table;
351
352 entry = &table[index];
353
354 if (index == 0) {
355 assert(!IE_BITS_TYPE(entry->ie_bits));
356 assert(!IE_BITS_GEN(entry->ie_bits));
357 } else if (IE_BITS_TYPE(entry->ie_bits)) {
358 if (IE_BITS_GEN(entry->ie_bits) == gen) {
359 *entryp = entry;
360 assert(!tentry);
361 return KERN_SUCCESS;
362 }
363 } else {
364 mach_port_index_t free_index, next_index;
365
366 /*
367 * Rip the entry out of the free list.
368 */
369
370 for (free_index = 0;
371 (next_index = table[free_index].ie_next)
372 != index;
373 free_index = next_index)
374 continue;
375
376 table[free_index].ie_next =
377 table[next_index].ie_next;
378
379 entry->ie_bits = gen;
6d2010ae 380 entry->ie_request = IE_REQ_NONE;
1c79356b
A
381 *entryp = entry;
382
383 assert(entry->ie_object == IO_NULL);
384 if (is_fast_space(space))
385 assert(!tentry);
386 else if (tentry)
387 ite_free(tentry);
388 return KERN_SUCCESS;
389 }
390 }
391
392 /*
393 * In a fast space, ipc_entry_alloc_name may be
394 * used only to add a right to a port name already
395 * known in this space.
396 */
397 if (is_fast_space(space)) {
398 is_write_unlock(space);
399 assert(!tentry);
400 return KERN_FAILURE;
401 }
402
403 /*
404 * Before trying to allocate any memory,
405 * check if the entry already exists in the tree.
406 * This avoids spurious resource errors.
407 * The splay tree makes a subsequent lookup/insert
408 * of the same name cheap, so this costs little.
409 */
410
411 if ((space->is_tree_total > 0) &&
412 ((tentry2 = ipc_splay_tree_lookup(&space->is_tree, name))
413 != ITE_NULL)) {
414 assert(tentry2->ite_space == space);
415 assert(IE_BITS_TYPE(tentry2->ite_bits));
416
417 *entryp = &tentry2->ite_entry;
418 if (tentry) ite_free(tentry);
419 return KERN_SUCCESS;
420 }
421
422 its = space->is_table_next;
423
424 /*
425 * Check if the table should be grown.
426 *
427 * Note that if space->is_table_size == its->its_size,
428 * then we won't ever try to grow the table.
429 *
430 * Note that we are optimistically assuming that name
431 * doesn't collide with any existing names. (So if
432 * it were entered into the tree, is_tree_small would
433 * be incremented.) This is OK, because even in that
434 * case, we don't lose memory by growing the table.
435 */
436 if ((space->is_table_size <= index) &&
437 (index < its->its_size) &&
438 (((its->its_size - space->is_table_size) *
439 sizeof(struct ipc_entry)) <
440 ((space->is_tree_small + 1) *
441 sizeof(struct ipc_tree_entry)))) {
442 kern_return_t kr;
443
444 /*
445 * Can save space by growing the table.
446 * Because the space will be unlocked,
447 * we must restart.
448 */
449
450 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
451 assert(kr != KERN_NO_SPACE);
452 if (kr != KERN_SUCCESS) {
453 /* space is unlocked */
454 if (tentry) ite_free(tentry);
455 return kr;
456 }
457
458 continue;
459 }
460
461 /*
462 * If a splay-tree entry was allocated previously,
463 * go ahead and insert it into the tree.
464 */
465
466 if (tentry != ITE_NULL) {
467
468 space->is_tree_total++;
469
470 if (index < space->is_table_size) {
471 entry = &space->is_table[index];
472 entry->ie_bits |= IE_BITS_COLLISION;
473 } else if ((index < its->its_size) &&
474 !ipc_entry_tree_collision(space, name))
475 space->is_tree_small++;
476
477 ipc_splay_tree_insert(&space->is_tree, name, tentry);
478 tentry->ite_bits = 0;
479 tentry->ite_request = 0;
480 tentry->ite_object = IO_NULL;
481 tentry->ite_space = space;
482 *entryp = &tentry->ite_entry;
483 return KERN_SUCCESS;
484 }
485
486 /*
487 * Allocate a tree entry and try again.
488 */
489
490 is_write_unlock(space);
491 tentry = ite_alloc();
492 if (tentry == ITE_NULL)
493 return KERN_RESOURCE_SHORTAGE;
494 is_write_lock(space);
495 }
496}
497
498/*
499 * Routine: ipc_entry_dealloc
500 * Purpose:
501 * Deallocates an entry from a space.
502 * Conditions:
503 * The space must be write-locked throughout.
504 * The space must be active.
505 */
506
507void
508ipc_entry_dealloc(
509 ipc_space_t space,
510 mach_port_name_t name,
511 ipc_entry_t entry)
512{
513 ipc_entry_t table;
514 ipc_entry_num_t size;
515 mach_port_index_t index;
516
517 assert(space->is_active);
518 assert(entry->ie_object == IO_NULL);
6d2010ae
A
519 assert(entry->ie_request == IE_REQ_NONE);
520
521#if 1
522 if (entry->ie_request != IE_REQ_NONE)
523 panic("ipc_entry_dealloc()\n");
524#endif
1c79356b
A
525
526 index = MACH_PORT_INDEX(name);
527 table = space->is_table;
528 size = space->is_table_size;
529
530 if (is_fast_space(space)) {
531 assert(index < size);
532 assert(entry == &table[index]);
533 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
534 assert(!(entry->ie_bits & IE_BITS_COLLISION));
535 entry->ie_bits &= IE_BITS_GEN_MASK;
536 entry->ie_next = table->ie_next;
537 table->ie_next = index;
538 return;
539 }
540
541
542 if ((index < size) && (entry == &table[index])) {
543 assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
544
545 if (entry->ie_bits & IE_BITS_COLLISION) {
546 struct ipc_splay_tree small, collisions;
547 ipc_tree_entry_t tentry;
548 mach_port_name_t tname;
549 boolean_t pick;
1c79356b
A
550 ipc_object_t obj;
551
552 /* must move an entry from tree to table */
553
554 ipc_splay_tree_split(&space->is_tree,
555 MACH_PORT_MAKE(index+1, 0),
556 &collisions);
557 ipc_splay_tree_split(&collisions,
558 MACH_PORT_MAKE(index, 0),
559 &small);
560
561 pick = ipc_splay_tree_pick(&collisions,
562 &tname, &tentry);
563 assert(pick);
564 assert(MACH_PORT_INDEX(tname) == index);
565
566 entry->ie_object = obj = tentry->ite_object;
567 entry->ie_bits = tentry->ite_bits|MACH_PORT_GEN(tname);
568 entry->ie_request = tentry->ite_request;
569
570 assert(tentry->ite_space == space);
571
572 if (IE_BITS_TYPE(tentry->ite_bits)==MACH_PORT_TYPE_SEND) {
573 ipc_hash_global_delete(space, obj,
574 tname, tentry);
575 ipc_hash_local_insert(space, obj,
576 index, entry);
577 }
578
579 ipc_splay_tree_delete(&collisions, tname, tentry);
580
581 assert(space->is_tree_total > 0);
582 space->is_tree_total--;
583
584 /* check if collision bit should still be on */
585
586 pick = ipc_splay_tree_pick(&collisions,
587 &tname, &tentry);
588 if (pick) {
589 entry->ie_bits |= IE_BITS_COLLISION;
590 ipc_splay_tree_join(&space->is_tree,
591 &collisions);
592 }
593
594 ipc_splay_tree_join(&space->is_tree, &small);
595
596 } else {
597 entry->ie_bits &= IE_BITS_GEN_MASK;
598 entry->ie_next = table->ie_next;
599 table->ie_next = index;
600 }
601
602 } else {
603 ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
604
605 assert(tentry->ite_space == space);
606
607 ipc_splay_tree_delete(&space->is_tree, name, tentry);
608
609 assert(space->is_tree_total > 0);
610 space->is_tree_total--;
611
612 if (index < size) {
613 ipc_entry_t ientry = &table[index];
614
615 assert(ientry->ie_bits & IE_BITS_COLLISION);
616
617 if (!ipc_entry_tree_collision(space, name))
618 ientry->ie_bits &= ~IE_BITS_COLLISION;
619
620 } else if ((index < space->is_table_next->its_size) &&
621 !ipc_entry_tree_collision(space, name)) {
622
623 assert(space->is_tree_small > 0);
624
625 space->is_tree_small--;
626 }
627 }
628}
629
630/*
631 * Routine: ipc_entry_grow_table
632 * Purpose:
633 * Grows the table in a space.
634 * Conditions:
635 * The space must be write-locked and active before.
636 * If successful, it is also returned locked.
637 * Allocates memory.
638 * Returns:
639 * KERN_SUCCESS Grew the table.
640 * KERN_SUCCESS Somebody else grew the table.
641 * KERN_SUCCESS The space died.
642 * KERN_NO_SPACE Table has maximum size already.
643 * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
644 */
645
646kern_return_t
647ipc_entry_grow_table(
91447636
A
648 ipc_space_t space,
649 ipc_table_elems_t target_size)
1c79356b
A
650{
651 ipc_entry_num_t osize, size, nsize, psize;
652
653 do {
654 boolean_t reallocated=FALSE;
655
656 ipc_entry_t otable, table;
657 ipc_table_size_t oits, its, nits;
658 mach_port_index_t i, free_index;
659
660 assert(space->is_active);
661
662 if (space->is_growing) {
663 /*
664 * Somebody else is growing the table.
665 * We just wait for them to finish.
666 */
667
9bccf70c 668 is_write_sleep(space);
1c79356b
A
669 return KERN_SUCCESS;
670 }
671
672 otable = space->is_table;
673
674 its = space->is_table_next;
675 size = its->its_size;
676
677 /*
678 * Since is_table_next points to the next natural size
679 * we can identify the current size entry.
680 */
681 oits = its - 1;
682 osize = oits->its_size;
683
684 /*
685 * If there is no target size, then the new size is simply
686 * specified by is_table_next. If there is a target
687 * size, then search for the next entry.
688 */
689 if (target_size != ITS_SIZE_NONE) {
690 if (target_size <= osize) {
691 is_write_unlock(space);
692 return KERN_SUCCESS;
693 }
694
695 psize = osize;
696 while ((psize != size) && (target_size > size)) {
697 psize = size;
698 its++;
699 size = its->its_size;
700 }
701 if (psize == size) {
702 is_write_unlock(space);
703 return KERN_NO_SPACE;
704 }
705 }
1c79356b
A
706
707 if (osize == size) {
708 is_write_unlock(space);
709 return KERN_NO_SPACE;
710 }
711
765c9de3
A
712 nits = its + 1;
713 nsize = nits->its_size;
714
1c79356b
A
715 assert((osize < size) && (size <= nsize));
716
717 /*
718 * OK, we'll attempt to grow the table.
719 * The realloc requires that the old table
720 * remain in existence.
721 */
722
723 space->is_growing = TRUE;
724 is_write_unlock(space);
725
726 if (it_entries_reallocable(oits)) {
727 table = it_entries_realloc(oits, otable, its);
728 reallocated=TRUE;
729 }
730 else {
731 table = it_entries_alloc(its);
732 }
733
734 is_write_lock(space);
735 space->is_growing = FALSE;
736
737 /*
738 * We need to do a wakeup on the space,
739 * to rouse waiting threads. We defer
740 * this until the space is unlocked,
741 * because we don't want them to spin.
742 */
743
744 if (table == IE_NULL) {
745 is_write_unlock(space);
746 thread_wakeup((event_t) space);
747 return KERN_RESOURCE_SHORTAGE;
748 }
749
750 if (!space->is_active) {
751 /*
752 * The space died while it was unlocked.
753 */
754
755 is_write_unlock(space);
756 thread_wakeup((event_t) space);
757 it_entries_free(its, table);
758 is_write_lock(space);
759 return KERN_SUCCESS;
760 }
761
762 assert(space->is_table == otable);
763 assert((space->is_table_next == its) ||
764 (target_size != ITS_SIZE_NONE));
765 assert(space->is_table_size == osize);
766
767 space->is_table = table;
768 space->is_table_size = size;
769 space->is_table_next = nits;
770
771 /*
772 * If we did a realloc, it remapped the data.
773 * Otherwise we copy by hand first. Then we have
774 * to zero the new part and the old local hash
775 * values.
776 */
777 if (!reallocated)
778 (void) memcpy((void *) table, (const void *) otable,
779 osize * (sizeof(struct ipc_entry)));
780
781 for (i = 0; i < osize; i++)
782 table[i].ie_index = 0;
783
784 (void) memset((void *) (table + osize) , 0,
785 ((size - osize) * (sizeof(struct ipc_entry))));
786
787 /*
788 * Put old entries into the reverse hash table.
789 */
790 for (i = 0; i < osize; i++) {
791 ipc_entry_t entry = &table[i];
792
793 if (IE_BITS_TYPE(entry->ie_bits)==MACH_PORT_TYPE_SEND) {
794 ipc_hash_local_insert(space, entry->ie_object,
795 i, entry);
796 }
797 }
798
799 /*
800 * If there are entries in the splay tree,
801 * then we have work to do:
802 * 1) transfer entries to the table
803 * 2) update is_tree_small
804 */
805 assert(!is_fast_space(space) || space->is_tree_total == 0);
806 if (space->is_tree_total > 0) {
807 mach_port_index_t index;
808 boolean_t delete;
809 struct ipc_splay_tree ignore;
810 struct ipc_splay_tree move;
811 struct ipc_splay_tree small;
812 ipc_entry_num_t nosmall;
813 ipc_tree_entry_t tentry;
814
815 /*
816 * The splay tree divides into four regions,
817 * based on the index of the entries:
818 * 1) 0 <= index < osize
819 * 2) osize <= index < size
820 * 3) size <= index < nsize
821 * 4) nsize <= index
822 *
823 * Entries in the first part are ignored.
824 * Entries in the second part, that don't
825 * collide, are moved into the table.
826 * Entries in the third part, that don't
827 * collide, are counted for is_tree_small.
828 * Entries in the fourth part are ignored.
829 */
830
831 ipc_splay_tree_split(&space->is_tree,
832 MACH_PORT_MAKE(nsize, 0),
833 &small);
834 ipc_splay_tree_split(&small,
835 MACH_PORT_MAKE(size, 0),
836 &move);
837 ipc_splay_tree_split(&move,
838 MACH_PORT_MAKE(osize, 0),
839 &ignore);
840
841 /* move entries into the table */
842
843 for (tentry = ipc_splay_traverse_start(&move);
844 tentry != ITE_NULL;
845 tentry = ipc_splay_traverse_next(&move, delete)) {
846
847 mach_port_name_t name;
848 mach_port_gen_t gen;
849 mach_port_type_t type;
850 ipc_entry_bits_t bits;
851 ipc_object_t obj;
852 ipc_entry_t entry;
853
854 name = tentry->ite_name;
855 gen = MACH_PORT_GEN(name);
856 index = MACH_PORT_INDEX(name);
857
858 assert(tentry->ite_space == space);
859 assert((osize <= index) && (index < size));
860
861 entry = &table[index];
862 bits = entry->ie_bits;
863 if (IE_BITS_TYPE(bits)) {
864 assert(IE_BITS_GEN(bits) != gen);
865 entry->ie_bits |= IE_BITS_COLLISION;
866 delete = FALSE;
867 continue;
868 }
869
870 bits = tentry->ite_bits;
871 type = IE_BITS_TYPE(bits);
872 assert(type != MACH_PORT_TYPE_NONE);
873
874 entry->ie_bits = bits | gen;
875 entry->ie_request = tentry->ite_request;
876 entry->ie_object = obj = tentry->ite_object;
877
878 if (type == MACH_PORT_TYPE_SEND) {
879 ipc_hash_global_delete(space, obj,
880 name, tentry);
881 ipc_hash_local_insert(space, obj,
882 index, entry);
883 }
884 space->is_tree_total--;
885 delete = TRUE;
886 }
887 ipc_splay_traverse_finish(&move);
888
889 /* count entries for is_tree_small */
890
891 nosmall = 0; index = 0;
892 for (tentry = ipc_splay_traverse_start(&small);
893 tentry != ITE_NULL;
894 tentry = ipc_splay_traverse_next(&small, FALSE)) {
895 mach_port_index_t nindex;
896
897 nindex = MACH_PORT_INDEX(tentry->ite_name);
898
899 if (nindex != index) {
900 nosmall++;
901 index = nindex;
902 }
903 }
904 ipc_splay_traverse_finish(&small);
905
906 assert(nosmall <= (nsize - size));
907 assert(nosmall <= space->is_tree_total);
908 space->is_tree_small = nosmall;
909
910 /* put the splay tree back together */
911
912 ipc_splay_tree_join(&space->is_tree, &small);
913 ipc_splay_tree_join(&space->is_tree, &move);
914 ipc_splay_tree_join(&space->is_tree, &ignore);
915 }
916
917 /*
918 * Add entries in the new part which still aren't used
919 * to the free list. Add them in reverse order,
920 * and set the generation number to -1, so that
921 * early allocations produce "natural" names.
922 */
923
924 free_index = table[0].ie_next;
925 for (i = size-1; i >= osize; --i) {
926 ipc_entry_t entry = &table[i];
927
928 if (entry->ie_bits == 0) {
929 entry->ie_bits = IE_BITS_GEN_MASK;
930 entry->ie_next = free_index;
931 free_index = i;
932 }
933 }
934 table[0].ie_next = free_index;
935
936 /*
937 * Now we need to free the old table.
938 * If the space dies or grows while unlocked,
939 * then we can quit here.
940 */
941 is_write_unlock(space);
942 thread_wakeup((event_t) space);
943
944 it_entries_free(oits, otable);
945 is_write_lock(space);
946 if (!space->is_active || (space->is_table_next != nits))
947 return KERN_SUCCESS;
948
949 /*
950 * We might have moved enough entries from
951 * the splay tree into the table that
952 * the table can be profitably grown again.
953 *
954 * Note that if size == nsize, then
955 * space->is_tree_small == 0.
956 */
957 } while ((space->is_tree_small > 0) &&
958 (((nsize - size) * sizeof(struct ipc_entry)) <
959 (space->is_tree_small * sizeof(struct ipc_tree_entry))));
960
961 return KERN_SUCCESS;
962}
963
964
965#if MACH_KDB
966#include <ddb/db_output.h>
967#define printf kdbprintf
968
969ipc_entry_t db_ipc_object_by_name(
970 task_t task,
971 mach_port_name_t name);
972
973
974ipc_entry_t
975db_ipc_object_by_name(
976 task_t task,
977 mach_port_name_t name)
978{
979 ipc_space_t space = task->itk_space;
980 ipc_entry_t entry;
981
982
983 entry = ipc_entry_lookup(space, name);
984 if(entry != IE_NULL) {
985 iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n",
986 task, name, entry->ie_object);
987 return (ipc_entry_t) entry->ie_object;
988 }
989 return entry;
990}
991#endif /* MACH_KDB */