2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: ipc/ipc_hash.c
57 * Entry hash table operations.
60 #include <mach/boolean.h>
61 #include <mach/port.h>
62 #include <kern/lock.h>
63 #include <kern/kalloc.h>
65 #include <ipc/ipc_space.h>
66 #include <ipc/ipc_object.h>
67 #include <ipc/ipc_entry.h>
68 #include <ipc/ipc_hash.h>
69 #include <ipc/ipc_init.h>
71 #include <mach_ipc_debug.h>
74 #include <mach/kern_return.h>
75 #include <mach_debug/hash_info.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_kern.h>
78 #endif /* MACH_IPC_DEBUG */
81 * Forward declarations
84 /* Lookup (space, obj) in global hash table */
85 boolean_t
ipc_hash_global_lookup(
88 mach_port_name_t
*namep
,
89 ipc_tree_entry_t
*entryp
);
91 /* Insert an entry into the global reverse hash table */
92 void ipc_hash_global_insert(
95 mach_port_name_t name
,
96 ipc_tree_entry_t entry
);
98 /* Delete an entry from the local reverse hash table */
99 void ipc_hash_local_delete(
102 mach_port_index_t index
,
106 * Routine: ipc_hash_lookup
108 * Converts (space, obj) -> (name, entry).
109 * Returns TRUE if an entry was found.
111 * The space must be locked (read or write) throughout.
118 mach_port_name_t
*namep
,
123 rv
= ipc_hash_local_lookup(space
, obj
, namep
, entryp
);
125 assert(!is_fast_space(space
) || space
->is_tree_hash
== 0);
126 if (space
->is_tree_hash
> 0)
127 rv
= ipc_hash_global_lookup(space
, obj
, namep
,
128 (ipc_tree_entry_t
*) entryp
);
134 * Routine: ipc_hash_insert
136 * Inserts an entry into the appropriate reverse hash table,
137 * so that ipc_hash_lookup will find it.
139 * The space must be write-locked.
146 mach_port_name_t name
,
149 mach_port_index_t index
;
151 index
= MACH_PORT_INDEX(name
);
152 if ((index
< space
->is_table_size
) &&
153 (entry
== &space
->is_table
[index
]))
154 ipc_hash_local_insert(space
, obj
, index
, entry
);
156 assert(!is_fast_space(space
));
157 ipc_hash_global_insert(space
, obj
, name
,
158 (ipc_tree_entry_t
) entry
);
163 * Routine: ipc_hash_delete
165 * Deletes an entry from the appropriate reverse hash table.
167 * The space must be write-locked.
174 mach_port_name_t name
,
177 mach_port_index_t index
;
179 index
= MACH_PORT_INDEX(name
);
180 if ((index
< space
->is_table_size
) &&
181 (entry
== &space
->is_table
[index
]))
182 ipc_hash_local_delete(space
, obj
, index
, entry
);
184 assert(!is_fast_space(space
));
185 ipc_hash_global_delete(space
, obj
, name
,
186 (ipc_tree_entry_t
) entry
);
191 * The global reverse hash table holds splay tree entries.
192 * It is a simple open-chaining hash table with singly-linked buckets.
193 * Each bucket is locked separately, with an exclusive lock.
194 * Within each bucket, move-to-front is used.
197 typedef natural_t ipc_hash_index_t
;
199 ipc_hash_index_t ipc_hash_global_size
;
200 ipc_hash_index_t ipc_hash_global_mask
;
202 #define IH_GLOBAL_HASH(space, obj) \
203 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
204 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
205 ipc_hash_global_mask)
207 typedef struct ipc_hash_global_bucket
{
208 decl_mutex_data(, ihgb_lock_data
)
209 ipc_tree_entry_t ihgb_head
;
210 } *ipc_hash_global_bucket_t
;
212 #define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
214 #define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, 0)
215 #define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
216 #define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
218 ipc_hash_global_bucket_t ipc_hash_global_table
;
221 * Routine: ipc_hash_global_lookup
223 * Converts (space, obj) -> (name, entry).
224 * Looks in the global table, for splay tree entries.
225 * Returns TRUE if an entry was found.
227 * The space must be locked (read or write) throughout.
231 ipc_hash_global_lookup(
234 mach_port_name_t
*namep
,
235 ipc_tree_entry_t
*entryp
)
237 ipc_hash_global_bucket_t bucket
;
238 ipc_tree_entry_t
this, *last
;
240 assert(space
!= IS_NULL
);
241 assert(obj
!= IO_NULL
);
243 assert(!is_fast_space(space
));
244 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
247 if ((this = bucket
->ihgb_head
) != ITE_NULL
) {
248 if ((this->ite_object
== obj
) &&
249 (this->ite_space
== space
)) {
250 /* found it at front; no need to move */
252 *namep
= this->ite_name
;
254 } else for (last
= &this->ite_next
;
255 (this = *last
) != ITE_NULL
;
256 last
= &this->ite_next
) {
257 if ((this->ite_object
== obj
) &&
258 (this->ite_space
== space
)) {
259 /* found it; move to front */
261 *last
= this->ite_next
;
262 this->ite_next
= bucket
->ihgb_head
;
263 bucket
->ihgb_head
= this;
265 *namep
= this->ite_name
;
273 return this != ITE_NULL
;
277 * Routine: ipc_hash_global_insert
279 * Inserts an entry into the global reverse hash table.
281 * The space must be write-locked.
285 ipc_hash_global_insert(
288 __assert_only mach_port_name_t name
,
289 ipc_tree_entry_t entry
)
291 ipc_hash_global_bucket_t bucket
;
293 assert(!is_fast_space(space
));
294 assert(entry
->ite_name
== name
);
295 assert(space
!= IS_NULL
);
296 assert(entry
->ite_space
== space
);
297 assert(obj
!= IO_NULL
);
298 assert(entry
->ite_object
== obj
);
300 space
->is_tree_hash
++;
301 assert(space
->is_tree_hash
<= space
->is_tree_total
);
303 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
306 /* insert at front of bucket */
308 entry
->ite_next
= bucket
->ihgb_head
;
309 bucket
->ihgb_head
= entry
;
315 * Routine: ipc_hash_global_delete
317 * Deletes an entry from the global reverse hash table.
319 * The space must be write-locked.
323 ipc_hash_global_delete(
326 __assert_only mach_port_name_t name
,
327 ipc_tree_entry_t entry
)
329 ipc_hash_global_bucket_t bucket
;
330 ipc_tree_entry_t
this, *last
;
332 assert(!is_fast_space(space
));
333 assert(entry
->ite_name
== name
);
334 assert(space
!= IS_NULL
);
335 assert(entry
->ite_space
== space
);
336 assert(obj
!= IO_NULL
);
337 assert(entry
->ite_object
== obj
);
339 assert(space
->is_tree_hash
> 0);
340 space
->is_tree_hash
--;
342 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
345 for (last
= &bucket
->ihgb_head
;
346 (this = *last
) != ITE_NULL
;
347 last
= &this->ite_next
) {
349 /* found it; remove from bucket */
351 *last
= this->ite_next
;
355 assert(this != ITE_NULL
);
361 * Each space has a local reverse hash table, which holds
362 * entries from the space's table. In fact, the hash table
363 * just uses a field (ie_index) in the table itself.
365 * The local hash table is an open-addressing hash table,
366 * which means that when a collision occurs, instead of
367 * throwing the entry into a bucket, the entry is rehashed
368 * to another position in the table. In this case the rehash
369 * is very simple: linear probing (ie, just increment the position).
370 * This simple rehash makes deletions tractable (they're still a pain),
371 * but it means that collisions tend to build up into clumps.
373 * Because at least one entry in the table (index 0) is always unused,
374 * there will always be room in the reverse hash table. If a table
375 * with n slots gets completely full, the reverse hash table will
376 * have one giant clump of n-1 slots and one free slot somewhere.
377 * Because entries are only entered into the reverse table if they
378 * are pure send rights (not receive, send-once, port-set,
379 * or dead-name rights), and free entries of course aren't entered,
380 * I expect the reverse hash table won't get unreasonably full.
382 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
383 * pp. 135-142.) may be desirable here. They can dramatically help
384 * unsuccessful lookups. But unsuccessful lookups are almost always
385 * followed by insertions, and those slow down somewhat. They
386 * also can help deletions somewhat. Successful lookups aren't affected.
387 * So possibly a small win; probably nothing significant.
390 #define IH_LOCAL_HASH(obj, size) \
391 ((((mach_port_index_t) (obj)) >> 6) % (size))
394 * Routine: ipc_hash_local_lookup
396 * Converts (space, obj) -> (name, entry).
397 * Looks in the space's local table, for table entries.
398 * Returns TRUE if an entry was found.
400 * The space must be locked (read or write) throughout.
404 ipc_hash_local_lookup(
407 mach_port_name_t
*namep
,
411 ipc_entry_num_t size
;
412 mach_port_index_t hindex
, index
;
414 assert(space
!= IS_NULL
);
415 assert(obj
!= IO_NULL
);
417 table
= space
->is_table
;
418 size
= space
->is_table_size
;
419 hindex
= IH_LOCAL_HASH(obj
, size
);
422 * Ideally, table[hindex].ie_index is the name we want.
423 * However, must check ie_object to verify this,
424 * because collisions can happen. In case of a collision,
425 * search farther along in the clump.
428 while ((index
= table
[hindex
].ie_index
) != 0) {
429 ipc_entry_t entry
= &table
[index
];
431 if (entry
->ie_object
== obj
) {
433 *namep
= MACH_PORT_MAKE(index
,
434 IE_BITS_GEN(entry
->ie_bits
));
438 if (++hindex
== size
)
446 * Routine: ipc_hash_local_insert
448 * Inserts an entry into the space's reverse hash table.
450 * The space must be write-locked.
454 ipc_hash_local_insert(
457 mach_port_index_t index
,
458 __assert_only ipc_entry_t entry
)
461 ipc_entry_num_t size
;
462 mach_port_index_t hindex
;
465 assert(space
!= IS_NULL
);
466 assert(obj
!= IO_NULL
);
468 table
= space
->is_table
;
469 size
= space
->is_table_size
;
470 hindex
= IH_LOCAL_HASH(obj
, size
);
472 assert(entry
== &table
[index
]);
473 assert(entry
->ie_object
== obj
);
476 * We want to insert at hindex, but there may be collisions.
477 * If a collision occurs, search for the end of the clump
481 while (table
[hindex
].ie_index
!= 0) {
482 if (++hindex
== size
)
486 table
[hindex
].ie_index
= index
;
490 * Routine: ipc_hash_local_delete
492 * Deletes an entry from the space's reverse hash table.
494 * The space must be write-locked.
498 ipc_hash_local_delete(
501 mach_port_index_t index
,
502 __assert_only ipc_entry_t entry
)
505 ipc_entry_num_t size
;
506 mach_port_index_t hindex
, dindex
;
508 assert(index
!= MACH_PORT_NULL
);
509 assert(space
!= IS_NULL
);
510 assert(obj
!= IO_NULL
);
512 table
= space
->is_table
;
513 size
= space
->is_table_size
;
514 hindex
= IH_LOCAL_HASH(obj
, size
);
516 assert(entry
== &table
[index
]);
517 assert(entry
->ie_object
== obj
);
520 * First check we have the right hindex for this index.
521 * In case of collision, we have to search farther
522 * along in this clump.
525 while (table
[hindex
].ie_index
!= index
) {
526 if (++hindex
== size
)
531 * Now we want to set table[hindex].ie_index = 0.
532 * But if we aren't the last index in a clump,
533 * this might cause problems for lookups of objects
534 * farther along in the clump that are displaced
535 * due to collisions. Searches for them would fail
536 * at hindex instead of succeeding.
538 * So we must check the clump after hindex for objects
539 * that are so displaced, and move one up to the new hole.
541 * hindex - index of new hole in the clump
542 * dindex - index we are checking for a displaced object
544 * When we move a displaced object up into the hole,
545 * it creates a new hole, and we have to repeat the process
546 * until we get to the end of the clump.
549 for (dindex
= hindex
; index
!= 0; hindex
= dindex
) {
551 mach_port_index_t tindex
;
554 if (++dindex
== size
)
556 assert(dindex
!= hindex
);
558 /* are we at the end of the clump? */
560 index
= table
[dindex
].ie_index
;
564 /* is this a displaced object? */
566 tobj
= table
[index
].ie_object
;
567 assert(tobj
!= IO_NULL
);
568 tindex
= IH_LOCAL_HASH(tobj
, size
);
570 if ((dindex
< hindex
) ?
571 ((dindex
< tindex
) && (tindex
<= hindex
)) :
572 ((dindex
< tindex
) || (tindex
<= hindex
)))
576 table
[hindex
].ie_index
= index
;
581 * Routine: ipc_hash_init
583 * Initialize the reverse hash table implementation.
591 /* if not configured, initialize ipc_hash_global_size */
593 if (ipc_hash_global_size
== 0) {
594 ipc_hash_global_size
= ipc_tree_entry_max
>> 8;
595 if (ipc_hash_global_size
< 32)
596 ipc_hash_global_size
= 32;
599 /* make sure it is a power of two */
601 ipc_hash_global_mask
= ipc_hash_global_size
- 1;
602 if ((ipc_hash_global_size
& ipc_hash_global_mask
) != 0) {
605 /* round up to closest power of two */
607 for (bit
= 1;; bit
<<= 1) {
608 ipc_hash_global_mask
|= bit
;
609 ipc_hash_global_size
= ipc_hash_global_mask
+ 1;
611 if ((ipc_hash_global_size
& ipc_hash_global_mask
) == 0)
616 /* allocate ipc_hash_global_table */
618 ipc_hash_global_table
= (ipc_hash_global_bucket_t
)
619 kalloc((vm_size_t
) (ipc_hash_global_size
*
620 sizeof(struct ipc_hash_global_bucket
)));
621 assert(ipc_hash_global_table
!= IHGB_NULL
);
623 /* and initialize it */
625 for (i
= 0; i
< ipc_hash_global_size
; i
++) {
626 ipc_hash_global_bucket_t bucket
;
628 bucket
= &ipc_hash_global_table
[i
];
629 ihgb_lock_init(bucket
);
630 bucket
->ihgb_head
= ITE_NULL
;
637 * Routine: ipc_hash_info
639 * Return information about the global reverse hash table.
640 * Fills the buffer with as much information as possible
641 * and returns the desired size of the buffer.
643 * Nothing locked. The caller should provide
644 * possibly-pageable memory.
650 hash_info_bucket_t
*info
,
651 mach_msg_type_number_t count
)
655 if (ipc_hash_global_size
< count
)
656 count
= ipc_hash_global_size
;
658 for (i
= 0; i
< count
; i
++) {
659 ipc_hash_global_bucket_t bucket
= &ipc_hash_global_table
[i
];
660 unsigned int bucket_count
= 0;
661 ipc_tree_entry_t entry
;
664 for (entry
= bucket
->ihgb_head
;
666 entry
= entry
->ite_next
)
670 /* don't touch pageable memory while holding locks */
671 info
[i
].hib_count
= bucket_count
;
674 return ipc_hash_global_size
;
677 #endif /* MACH_IPC_DEBUG */