2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
61 * File: ipc/ipc_hash.c
65 * Entry hash table operations.
68 #include <mach/boolean.h>
69 #include <mach/port.h>
70 #include <kern/lock.h>
71 #include <kern/kalloc.h>
73 #include <ipc/ipc_space.h>
74 #include <ipc/ipc_object.h>
75 #include <ipc/ipc_entry.h>
76 #include <ipc/ipc_hash.h>
77 #include <ipc/ipc_init.h>
79 #include <mach_ipc_debug.h>
82 #include <mach/kern_return.h>
83 #include <mach_debug/hash_info.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_kern.h>
86 #endif /* MACH_IPC_DEBUG */
89 * Forward declarations
92 /* Lookup (space, obj) in global hash table */
93 boolean_t
ipc_hash_global_lookup(
96 mach_port_name_t
*namep
,
97 ipc_tree_entry_t
*entryp
);
99 /* Insert an entry into the global reverse hash table */
100 void ipc_hash_global_insert(
103 mach_port_name_t name
,
104 ipc_tree_entry_t entry
);
106 /* Delete an entry from the local reverse hash table */
107 void ipc_hash_local_delete(
110 mach_port_index_t index
,
114 * Routine: ipc_hash_lookup
116 * Converts (space, obj) -> (name, entry).
117 * Returns TRUE if an entry was found.
119 * The space must be locked (read or write) throughout.
126 mach_port_name_t
*namep
,
131 rv
= ipc_hash_local_lookup(space
, obj
, namep
, entryp
);
133 assert(!is_fast_space(space
) || space
->is_tree_hash
== 0);
134 if (space
->is_tree_hash
> 0)
135 rv
= ipc_hash_global_lookup(space
, obj
, namep
,
136 (ipc_tree_entry_t
*) entryp
);
142 * Routine: ipc_hash_insert
144 * Inserts an entry into the appropriate reverse hash table,
145 * so that ipc_hash_lookup will find it.
147 * The space must be write-locked.
154 mach_port_name_t name
,
157 mach_port_index_t index
;
159 index
= MACH_PORT_INDEX(name
);
160 if ((index
< space
->is_table_size
) &&
161 (entry
== &space
->is_table
[index
]))
162 ipc_hash_local_insert(space
, obj
, index
, entry
);
164 assert(!is_fast_space(space
));
165 ipc_hash_global_insert(space
, obj
, name
,
166 (ipc_tree_entry_t
) entry
);
171 * Routine: ipc_hash_delete
173 * Deletes an entry from the appropriate reverse hash table.
175 * The space must be write-locked.
182 mach_port_name_t name
,
185 mach_port_index_t index
;
187 index
= MACH_PORT_INDEX(name
);
188 if ((index
< space
->is_table_size
) &&
189 (entry
== &space
->is_table
[index
]))
190 ipc_hash_local_delete(space
, obj
, index
, entry
);
192 assert(!is_fast_space(space
));
193 ipc_hash_global_delete(space
, obj
, name
,
194 (ipc_tree_entry_t
) entry
);
199 * The global reverse hash table holds splay tree entries.
200 * It is a simple open-chaining hash table with singly-linked buckets.
201 * Each bucket is locked separately, with an exclusive lock.
202 * Within each bucket, move-to-front is used.
205 typedef natural_t ipc_hash_index_t
;
207 ipc_hash_index_t ipc_hash_global_size
;
208 ipc_hash_index_t ipc_hash_global_mask
;
210 #define IH_GLOBAL_HASH(space, obj) \
211 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
212 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
213 ipc_hash_global_mask)
215 typedef struct ipc_hash_global_bucket
{
216 decl_mutex_data(, ihgb_lock_data
)
217 ipc_tree_entry_t ihgb_head
;
218 } *ipc_hash_global_bucket_t
;
220 #define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
222 #define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, 0)
223 #define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
224 #define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
226 ipc_hash_global_bucket_t ipc_hash_global_table
;
229 * Routine: ipc_hash_global_lookup
231 * Converts (space, obj) -> (name, entry).
232 * Looks in the global table, for splay tree entries.
233 * Returns TRUE if an entry was found.
235 * The space must be locked (read or write) throughout.
239 ipc_hash_global_lookup(
242 mach_port_name_t
*namep
,
243 ipc_tree_entry_t
*entryp
)
245 ipc_hash_global_bucket_t bucket
;
246 ipc_tree_entry_t
this, *last
;
248 assert(space
!= IS_NULL
);
249 assert(obj
!= IO_NULL
);
251 assert(!is_fast_space(space
));
252 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
255 if ((this = bucket
->ihgb_head
) != ITE_NULL
) {
256 if ((this->ite_object
== obj
) &&
257 (this->ite_space
== space
)) {
258 /* found it at front; no need to move */
260 *namep
= this->ite_name
;
262 } else for (last
= &this->ite_next
;
263 (this = *last
) != ITE_NULL
;
264 last
= &this->ite_next
) {
265 if ((this->ite_object
== obj
) &&
266 (this->ite_space
== space
)) {
267 /* found it; move to front */
269 *last
= this->ite_next
;
270 this->ite_next
= bucket
->ihgb_head
;
271 bucket
->ihgb_head
= this;
273 *namep
= this->ite_name
;
281 return this != ITE_NULL
;
285 * Routine: ipc_hash_global_insert
287 * Inserts an entry into the global reverse hash table.
289 * The space must be write-locked.
293 ipc_hash_global_insert(
296 __assert_only mach_port_name_t name
,
297 ipc_tree_entry_t entry
)
299 ipc_hash_global_bucket_t bucket
;
301 assert(!is_fast_space(space
));
302 assert(entry
->ite_name
== name
);
303 assert(space
!= IS_NULL
);
304 assert(entry
->ite_space
== space
);
305 assert(obj
!= IO_NULL
);
306 assert(entry
->ite_object
== obj
);
308 space
->is_tree_hash
++;
309 assert(space
->is_tree_hash
<= space
->is_tree_total
);
311 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
314 /* insert at front of bucket */
316 entry
->ite_next
= bucket
->ihgb_head
;
317 bucket
->ihgb_head
= entry
;
323 * Routine: ipc_hash_global_delete
325 * Deletes an entry from the global reverse hash table.
327 * The space must be write-locked.
331 ipc_hash_global_delete(
334 __assert_only mach_port_name_t name
,
335 ipc_tree_entry_t entry
)
337 ipc_hash_global_bucket_t bucket
;
338 ipc_tree_entry_t
this, *last
;
340 assert(!is_fast_space(space
));
341 assert(entry
->ite_name
== name
);
342 assert(space
!= IS_NULL
);
343 assert(entry
->ite_space
== space
);
344 assert(obj
!= IO_NULL
);
345 assert(entry
->ite_object
== obj
);
347 assert(space
->is_tree_hash
> 0);
348 space
->is_tree_hash
--;
350 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
353 for (last
= &bucket
->ihgb_head
;
354 (this = *last
) != ITE_NULL
;
355 last
= &this->ite_next
) {
357 /* found it; remove from bucket */
359 *last
= this->ite_next
;
363 assert(this != ITE_NULL
);
369 * Each space has a local reverse hash table, which holds
370 * entries from the space's table. In fact, the hash table
371 * just uses a field (ie_index) in the table itself.
373 * The local hash table is an open-addressing hash table,
374 * which means that when a collision occurs, instead of
375 * throwing the entry into a bucket, the entry is rehashed
376 * to another position in the table. In this case the rehash
377 * is very simple: linear probing (ie, just increment the position).
378 * This simple rehash makes deletions tractable (they're still a pain),
379 * but it means that collisions tend to build up into clumps.
381 * Because at least one entry in the table (index 0) is always unused,
382 * there will always be room in the reverse hash table. If a table
383 * with n slots gets completely full, the reverse hash table will
384 * have one giant clump of n-1 slots and one free slot somewhere.
385 * Because entries are only entered into the reverse table if they
386 * are pure send rights (not receive, send-once, port-set,
387 * or dead-name rights), and free entries of course aren't entered,
388 * I expect the reverse hash table won't get unreasonably full.
390 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
391 * pp. 135-142.) may be desirable here. They can dramatically help
392 * unsuccessful lookups. But unsuccessful lookups are almost always
393 * followed by insertions, and those slow down somewhat. They
394 * also can help deletions somewhat. Successful lookups aren't affected.
395 * So possibly a small win; probably nothing significant.
398 #define IH_LOCAL_HASH(obj, size) \
399 ((((mach_port_index_t) (obj)) >> 6) % (size))
402 * Routine: ipc_hash_local_lookup
404 * Converts (space, obj) -> (name, entry).
405 * Looks in the space's local table, for table entries.
406 * Returns TRUE if an entry was found.
408 * The space must be locked (read or write) throughout.
412 ipc_hash_local_lookup(
415 mach_port_name_t
*namep
,
419 ipc_entry_num_t size
;
420 mach_port_index_t hindex
, index
;
422 assert(space
!= IS_NULL
);
423 assert(obj
!= IO_NULL
);
425 table
= space
->is_table
;
426 size
= space
->is_table_size
;
427 hindex
= IH_LOCAL_HASH(obj
, size
);
430 * Ideally, table[hindex].ie_index is the name we want.
431 * However, must check ie_object to verify this,
432 * because collisions can happen. In case of a collision,
433 * search farther along in the clump.
436 while ((index
= table
[hindex
].ie_index
) != 0) {
437 ipc_entry_t entry
= &table
[index
];
439 if (entry
->ie_object
== obj
) {
441 *namep
= MACH_PORT_MAKE(index
,
442 IE_BITS_GEN(entry
->ie_bits
));
446 if (++hindex
== size
)
454 * Routine: ipc_hash_local_insert
456 * Inserts an entry into the space's reverse hash table.
458 * The space must be write-locked.
462 ipc_hash_local_insert(
465 mach_port_index_t index
,
466 __assert_only ipc_entry_t entry
)
469 ipc_entry_num_t size
;
470 mach_port_index_t hindex
;
473 assert(space
!= IS_NULL
);
474 assert(obj
!= IO_NULL
);
476 table
= space
->is_table
;
477 size
= space
->is_table_size
;
478 hindex
= IH_LOCAL_HASH(obj
, size
);
480 assert(entry
== &table
[index
]);
481 assert(entry
->ie_object
== obj
);
484 * We want to insert at hindex, but there may be collisions.
485 * If a collision occurs, search for the end of the clump
489 while (table
[hindex
].ie_index
!= 0) {
490 if (++hindex
== size
)
494 table
[hindex
].ie_index
= index
;
498 * Routine: ipc_hash_local_delete
500 * Deletes an entry from the space's reverse hash table.
502 * The space must be write-locked.
506 ipc_hash_local_delete(
509 mach_port_index_t index
,
510 __assert_only ipc_entry_t entry
)
513 ipc_entry_num_t size
;
514 mach_port_index_t hindex
, dindex
;
516 assert(index
!= MACH_PORT_NULL
);
517 assert(space
!= IS_NULL
);
518 assert(obj
!= IO_NULL
);
520 table
= space
->is_table
;
521 size
= space
->is_table_size
;
522 hindex
= IH_LOCAL_HASH(obj
, size
);
524 assert(entry
== &table
[index
]);
525 assert(entry
->ie_object
== obj
);
528 * First check we have the right hindex for this index.
529 * In case of collision, we have to search farther
530 * along in this clump.
533 while (table
[hindex
].ie_index
!= index
) {
534 if (++hindex
== size
)
539 * Now we want to set table[hindex].ie_index = 0.
540 * But if we aren't the last index in a clump,
541 * this might cause problems for lookups of objects
542 * farther along in the clump that are displaced
543 * due to collisions. Searches for them would fail
544 * at hindex instead of succeeding.
546 * So we must check the clump after hindex for objects
547 * that are so displaced, and move one up to the new hole.
549 * hindex - index of new hole in the clump
550 * dindex - index we are checking for a displaced object
552 * When we move a displaced object up into the hole,
553 * it creates a new hole, and we have to repeat the process
554 * until we get to the end of the clump.
557 for (dindex
= hindex
; index
!= 0; hindex
= dindex
) {
559 mach_port_index_t tindex
;
562 if (++dindex
== size
)
564 assert(dindex
!= hindex
);
566 /* are we at the end of the clump? */
568 index
= table
[dindex
].ie_index
;
572 /* is this a displaced object? */
574 tobj
= table
[index
].ie_object
;
575 assert(tobj
!= IO_NULL
);
576 tindex
= IH_LOCAL_HASH(tobj
, size
);
578 if ((dindex
< hindex
) ?
579 ((dindex
< tindex
) && (tindex
<= hindex
)) :
580 ((dindex
< tindex
) || (tindex
<= hindex
)))
584 table
[hindex
].ie_index
= index
;
589 * Routine: ipc_hash_init
591 * Initialize the reverse hash table implementation.
599 /* if not configured, initialize ipc_hash_global_size */
601 if (ipc_hash_global_size
== 0) {
602 ipc_hash_global_size
= ipc_tree_entry_max
>> 8;
603 if (ipc_hash_global_size
< 32)
604 ipc_hash_global_size
= 32;
607 /* make sure it is a power of two */
609 ipc_hash_global_mask
= ipc_hash_global_size
- 1;
610 if ((ipc_hash_global_size
& ipc_hash_global_mask
) != 0) {
613 /* round up to closest power of two */
615 for (bit
= 1;; bit
<<= 1) {
616 ipc_hash_global_mask
|= bit
;
617 ipc_hash_global_size
= ipc_hash_global_mask
+ 1;
619 if ((ipc_hash_global_size
& ipc_hash_global_mask
) == 0)
624 /* allocate ipc_hash_global_table */
626 ipc_hash_global_table
= (ipc_hash_global_bucket_t
)
627 kalloc((vm_size_t
) (ipc_hash_global_size
*
628 sizeof(struct ipc_hash_global_bucket
)));
629 assert(ipc_hash_global_table
!= IHGB_NULL
);
631 /* and initialize it */
633 for (i
= 0; i
< ipc_hash_global_size
; i
++) {
634 ipc_hash_global_bucket_t bucket
;
636 bucket
= &ipc_hash_global_table
[i
];
637 ihgb_lock_init(bucket
);
638 bucket
->ihgb_head
= ITE_NULL
;
645 * Routine: ipc_hash_info
647 * Return information about the global reverse hash table.
648 * Fills the buffer with as much information as possible
649 * and returns the desired size of the buffer.
651 * Nothing locked. The caller should provide
652 * possibly-pageable memory.
658 hash_info_bucket_t
*info
,
659 mach_msg_type_number_t count
)
663 if (ipc_hash_global_size
< count
)
664 count
= ipc_hash_global_size
;
666 for (i
= 0; i
< count
; i
++) {
667 ipc_hash_global_bucket_t bucket
= &ipc_hash_global_table
[i
];
668 unsigned int bucket_count
= 0;
669 ipc_tree_entry_t entry
;
672 for (entry
= bucket
->ihgb_head
;
674 entry
= entry
->ite_next
)
678 /* don't touch pageable memory while holding locks */
679 info
[i
].hib_count
= bucket_count
;
682 return ipc_hash_global_size
;
685 #endif /* MACH_IPC_DEBUG */