2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: ipc/ipc_hash.c
63 * Entry hash table operations.
66 #include <mach/boolean.h>
67 #include <mach/port.h>
68 #include <kern/lock.h>
69 #include <kern/kalloc.h>
71 #include <ipc/ipc_space.h>
72 #include <ipc/ipc_object.h>
73 #include <ipc/ipc_entry.h>
74 #include <ipc/ipc_hash.h>
75 #include <ipc/ipc_init.h>
77 #include <mach_ipc_debug.h>
80 #include <mach/kern_return.h>
81 #include <mach_debug/hash_info.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_kern.h>
84 #endif /* MACH_IPC_DEBUG */
87 * Forward declarations
90 /* Lookup (space, obj) in global hash table */
91 boolean_t
ipc_hash_global_lookup(
94 mach_port_name_t
*namep
,
95 ipc_tree_entry_t
*entryp
);
97 /* Insert an entry into the global reverse hash table */
98 void ipc_hash_global_insert(
101 mach_port_name_t name
,
102 ipc_tree_entry_t entry
);
104 /* Delete an entry from the local reverse hash table */
105 void ipc_hash_local_delete(
108 mach_port_index_t index
,
112 * Routine: ipc_hash_lookup
114 * Converts (space, obj) -> (name, entry).
115 * Returns TRUE if an entry was found.
117 * The space must be locked (read or write) throughout.
124 mach_port_name_t
*namep
,
129 rv
= ipc_hash_local_lookup(space
, obj
, namep
, entryp
);
131 assert(!is_fast_space(space
) || space
->is_tree_hash
== 0);
132 if (space
->is_tree_hash
> 0)
133 rv
= ipc_hash_global_lookup(space
, obj
, namep
,
134 (ipc_tree_entry_t
*) entryp
);
140 * Routine: ipc_hash_insert
142 * Inserts an entry into the appropriate reverse hash table,
143 * so that ipc_hash_lookup will find it.
145 * The space must be write-locked.
152 mach_port_name_t name
,
155 mach_port_index_t index
;
157 index
= MACH_PORT_INDEX(name
);
158 if ((index
< space
->is_table_size
) &&
159 (entry
== &space
->is_table
[index
]))
160 ipc_hash_local_insert(space
, obj
, index
, entry
);
162 assert(!is_fast_space(space
));
163 ipc_hash_global_insert(space
, obj
, name
,
164 (ipc_tree_entry_t
) entry
);
169 * Routine: ipc_hash_delete
171 * Deletes an entry from the appropriate reverse hash table.
173 * The space must be write-locked.
180 mach_port_name_t name
,
183 mach_port_index_t index
;
185 index
= MACH_PORT_INDEX(name
);
186 if ((index
< space
->is_table_size
) &&
187 (entry
== &space
->is_table
[index
]))
188 ipc_hash_local_delete(space
, obj
, index
, entry
);
190 assert(!is_fast_space(space
));
191 ipc_hash_global_delete(space
, obj
, name
,
192 (ipc_tree_entry_t
) entry
);
197 * The global reverse hash table holds splay tree entries.
198 * It is a simple open-chaining hash table with singly-linked buckets.
199 * Each bucket is locked separately, with an exclusive lock.
200 * Within each bucket, move-to-front is used.
203 typedef natural_t ipc_hash_index_t
;
205 ipc_hash_index_t ipc_hash_global_size
;
206 ipc_hash_index_t ipc_hash_global_mask
;
208 #define IH_GLOBAL_HASH(space, obj) \
209 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
210 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
211 ipc_hash_global_mask)
213 typedef struct ipc_hash_global_bucket
{
214 decl_lck_mtx_data(, ihgb_lock_data
)
215 ipc_tree_entry_t ihgb_head
;
216 } *ipc_hash_global_bucket_t
;
218 #define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
220 #define ihgb_lock_init(ihgb) lck_mtx_init(&(ihgb)->ihgb_lock_data, &ipc_lck_grp, &ipc_lck_attr)
221 #define ihgb_lock(ihgb) lck_mtx_lock(&(ihgb)->ihgb_lock_data)
222 #define ihgb_unlock(ihgb) lck_mtx_unlock(&(ihgb)->ihgb_lock_data)
224 ipc_hash_global_bucket_t ipc_hash_global_table
;
227 * Routine: ipc_hash_global_lookup
229 * Converts (space, obj) -> (name, entry).
230 * Looks in the global table, for splay tree entries.
231 * Returns TRUE if an entry was found.
233 * The space must be locked (read or write) throughout.
237 ipc_hash_global_lookup(
240 mach_port_name_t
*namep
,
241 ipc_tree_entry_t
*entryp
)
243 ipc_hash_global_bucket_t bucket
;
244 ipc_tree_entry_t
this, *last
;
246 assert(space
!= IS_NULL
);
247 assert(obj
!= IO_NULL
);
249 assert(!is_fast_space(space
));
250 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
253 if ((this = bucket
->ihgb_head
) != ITE_NULL
) {
254 if ((this->ite_object
== obj
) &&
255 (this->ite_space
== space
)) {
256 /* found it at front; no need to move */
258 *namep
= this->ite_name
;
260 } else for (last
= &this->ite_next
;
261 (this = *last
) != ITE_NULL
;
262 last
= &this->ite_next
) {
263 if ((this->ite_object
== obj
) &&
264 (this->ite_space
== space
)) {
265 /* found it; move to front */
267 *last
= this->ite_next
;
268 this->ite_next
= bucket
->ihgb_head
;
269 bucket
->ihgb_head
= this;
271 *namep
= this->ite_name
;
279 return this != ITE_NULL
;
283 * Routine: ipc_hash_global_insert
285 * Inserts an entry into the global reverse hash table.
287 * The space must be write-locked.
291 ipc_hash_global_insert(
294 __assert_only mach_port_name_t name
,
295 ipc_tree_entry_t entry
)
297 ipc_hash_global_bucket_t bucket
;
299 assert(!is_fast_space(space
));
300 assert(entry
->ite_name
== name
);
301 assert(space
!= IS_NULL
);
302 assert(entry
->ite_space
== space
);
303 assert(obj
!= IO_NULL
);
304 assert(entry
->ite_object
== obj
);
306 space
->is_tree_hash
++;
307 assert(space
->is_tree_hash
<= space
->is_tree_total
);
309 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
312 /* insert at front of bucket */
314 entry
->ite_next
= bucket
->ihgb_head
;
315 bucket
->ihgb_head
= entry
;
321 * Routine: ipc_hash_global_delete
323 * Deletes an entry from the global reverse hash table.
325 * The space must be write-locked.
329 ipc_hash_global_delete(
332 __assert_only mach_port_name_t name
,
333 ipc_tree_entry_t entry
)
335 ipc_hash_global_bucket_t bucket
;
336 ipc_tree_entry_t
this, *last
;
338 assert(!is_fast_space(space
));
339 assert(entry
->ite_name
== name
);
340 assert(space
!= IS_NULL
);
341 assert(entry
->ite_space
== space
);
342 assert(obj
!= IO_NULL
);
343 assert(entry
->ite_object
== obj
);
345 assert(space
->is_tree_hash
> 0);
346 space
->is_tree_hash
--;
348 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
351 for (last
= &bucket
->ihgb_head
;
352 (this = *last
) != ITE_NULL
;
353 last
= &this->ite_next
) {
355 /* found it; remove from bucket */
357 *last
= this->ite_next
;
361 assert(this != ITE_NULL
);
367 * Each space has a local reverse hash table, which holds
368 * entries from the space's table. In fact, the hash table
369 * just uses a field (ie_index) in the table itself.
371 * The local hash table is an open-addressing hash table,
372 * which means that when a collision occurs, instead of
373 * throwing the entry into a bucket, the entry is rehashed
374 * to another position in the table. In this case the rehash
375 * is very simple: linear probing (ie, just increment the position).
376 * This simple rehash makes deletions tractable (they're still a pain),
377 * but it means that collisions tend to build up into clumps.
379 * Because at least one entry in the table (index 0) is always unused,
380 * there will always be room in the reverse hash table. If a table
381 * with n slots gets completely full, the reverse hash table will
382 * have one giant clump of n-1 slots and one free slot somewhere.
383 * Because entries are only entered into the reverse table if they
384 * are pure send rights (not receive, send-once, port-set,
385 * or dead-name rights), and free entries of course aren't entered,
386 * I expect the reverse hash table won't get unreasonably full.
388 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
389 * pp. 135-142.) may be desirable here. They can dramatically help
390 * unsuccessful lookups. But unsuccessful lookups are almost always
391 * followed by insertions, and those slow down somewhat. They
392 * also can help deletions somewhat. Successful lookups aren't affected.
393 * So possibly a small win; probably nothing significant.
396 #define IH_LOCAL_HASH(obj, size) \
397 ((mach_port_index_t)((((uintptr_t) (obj)) >> 6) % (size)))
400 * Routine: ipc_hash_local_lookup
402 * Converts (space, obj) -> (name, entry).
403 * Looks in the space's local table, for table entries.
404 * Returns TRUE if an entry was found.
406 * The space must be locked (read or write) throughout.
410 ipc_hash_local_lookup(
413 mach_port_name_t
*namep
,
417 ipc_entry_num_t size
;
418 mach_port_index_t hindex
, index
;
420 assert(space
!= IS_NULL
);
421 assert(obj
!= IO_NULL
);
423 table
= space
->is_table
;
424 size
= space
->is_table_size
;
425 hindex
= IH_LOCAL_HASH(obj
, size
);
428 * Ideally, table[hindex].ie_index is the name we want.
429 * However, must check ie_object to verify this,
430 * because collisions can happen. In case of a collision,
431 * search farther along in the clump.
434 while ((index
= table
[hindex
].ie_index
) != 0) {
435 ipc_entry_t entry
= &table
[index
];
437 if (entry
->ie_object
== obj
) {
439 *namep
= MACH_PORT_MAKE(index
,
440 IE_BITS_GEN(entry
->ie_bits
));
444 if (++hindex
== size
)
452 * Routine: ipc_hash_local_insert
454 * Inserts an entry into the space's reverse hash table.
456 * The space must be write-locked.
460 ipc_hash_local_insert(
463 mach_port_index_t index
,
464 __assert_only ipc_entry_t entry
)
467 ipc_entry_num_t size
;
468 mach_port_index_t hindex
;
471 assert(space
!= IS_NULL
);
472 assert(obj
!= IO_NULL
);
474 table
= space
->is_table
;
475 size
= space
->is_table_size
;
476 hindex
= IH_LOCAL_HASH(obj
, size
);
478 assert(entry
== &table
[index
]);
479 assert(entry
->ie_object
== obj
);
482 * We want to insert at hindex, but there may be collisions.
483 * If a collision occurs, search for the end of the clump
487 while (table
[hindex
].ie_index
!= 0) {
488 if (++hindex
== size
)
492 table
[hindex
].ie_index
= index
;
496 * Routine: ipc_hash_local_delete
498 * Deletes an entry from the space's reverse hash table.
500 * The space must be write-locked.
504 ipc_hash_local_delete(
507 mach_port_index_t index
,
508 __assert_only ipc_entry_t entry
)
511 ipc_entry_num_t size
;
512 mach_port_index_t hindex
, dindex
;
514 assert(index
!= MACH_PORT_NULL
);
515 assert(space
!= IS_NULL
);
516 assert(obj
!= IO_NULL
);
518 table
= space
->is_table
;
519 size
= space
->is_table_size
;
520 hindex
= IH_LOCAL_HASH(obj
, size
);
522 assert(entry
== &table
[index
]);
523 assert(entry
->ie_object
== obj
);
526 * First check we have the right hindex for this index.
527 * In case of collision, we have to search farther
528 * along in this clump.
531 while (table
[hindex
].ie_index
!= index
) {
532 if (++hindex
== size
)
537 * Now we want to set table[hindex].ie_index = 0.
538 * But if we aren't the last index in a clump,
539 * this might cause problems for lookups of objects
540 * farther along in the clump that are displaced
541 * due to collisions. Searches for them would fail
542 * at hindex instead of succeeding.
544 * So we must check the clump after hindex for objects
545 * that are so displaced, and move one up to the new hole.
547 * hindex - index of new hole in the clump
548 * dindex - index we are checking for a displaced object
550 * When we move a displaced object up into the hole,
551 * it creates a new hole, and we have to repeat the process
552 * until we get to the end of the clump.
555 for (dindex
= hindex
; index
!= 0; hindex
= dindex
) {
557 mach_port_index_t tindex
;
560 if (++dindex
== size
)
562 assert(dindex
!= hindex
);
564 /* are we at the end of the clump? */
566 index
= table
[dindex
].ie_index
;
570 /* is this a displaced object? */
572 tobj
= table
[index
].ie_object
;
573 assert(tobj
!= IO_NULL
);
574 tindex
= IH_LOCAL_HASH(tobj
, size
);
576 if ((dindex
< hindex
) ?
577 ((dindex
< tindex
) && (tindex
<= hindex
)) :
578 ((dindex
< tindex
) || (tindex
<= hindex
)))
582 table
[hindex
].ie_index
= index
;
587 * Routine: ipc_hash_init
589 * Initialize the reverse hash table implementation.
597 /* if not configured, initialize ipc_hash_global_size */
599 if (ipc_hash_global_size
== 0) {
600 ipc_hash_global_size
= ipc_tree_entry_max
>> 8;
601 if (ipc_hash_global_size
< 32)
602 ipc_hash_global_size
= 32;
605 /* make sure it is a power of two */
607 ipc_hash_global_mask
= ipc_hash_global_size
- 1;
608 if ((ipc_hash_global_size
& ipc_hash_global_mask
) != 0) {
611 /* round up to closest power of two */
613 for (bit
= 1;; bit
<<= 1) {
614 ipc_hash_global_mask
|= bit
;
615 ipc_hash_global_size
= ipc_hash_global_mask
+ 1;
617 if ((ipc_hash_global_size
& ipc_hash_global_mask
) == 0)
622 /* allocate ipc_hash_global_table */
624 ipc_hash_global_table
= (ipc_hash_global_bucket_t
)
625 kalloc((vm_size_t
) (ipc_hash_global_size
*
626 sizeof(struct ipc_hash_global_bucket
)));
627 assert(ipc_hash_global_table
!= IHGB_NULL
);
629 /* and initialize it */
631 for (i
= 0; i
< ipc_hash_global_size
; i
++) {
632 ipc_hash_global_bucket_t bucket
;
634 bucket
= &ipc_hash_global_table
[i
];
635 ihgb_lock_init(bucket
);
636 bucket
->ihgb_head
= ITE_NULL
;
643 * Routine: ipc_hash_size
645 * Return the size of the global reverse hash table.
650 return ipc_hash_global_size
;
654 * Routine: ipc_hash_info
656 * Return information about the global reverse hash table.
657 * Fills the buffer with as much information as possible
658 * and returns the desired size of the buffer.
660 * Nothing locked. The caller should provide
661 * possibly-pageable memory.
667 hash_info_bucket_t
*info
,
672 if (ipc_hash_global_size
< count
)
673 count
= ipc_hash_global_size
;
675 for (i
= 0; i
< count
; i
++) {
676 ipc_hash_global_bucket_t bucket
= &ipc_hash_global_table
[i
];
677 unsigned int bucket_count
= 0;
678 ipc_tree_entry_t entry
;
681 for (entry
= bucket
->ihgb_head
;
683 entry
= entry
->ite_next
)
687 /* don't touch pageable memory while holding locks */
688 info
[i
].hib_count
= bucket_count
;
691 return ipc_hash_global_size
;
694 #endif /* MACH_IPC_DEBUG */