2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: ipc/ipc_hash.c
57 * Entry hash table operations.
60 #include <mach/boolean.h>
61 #include <mach/port.h>
62 #include <kern/lock.h>
63 #include <kern/kalloc.h>
65 #include <ipc/ipc_space.h>
66 #include <ipc/ipc_object.h>
67 #include <ipc/ipc_entry.h>
68 #include <ipc/ipc_hash.h>
69 #include <ipc/ipc_init.h>
71 #include <mach_ipc_debug.h>
74 #include <mach/kern_return.h>
75 #include <mach_debug/hash_info.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_kern.h>
78 #endif /* MACH_IPC_DEBUG */
81 * Forward declarations
84 /* Lookup (space, obj) in global hash table */
85 boolean_t
ipc_hash_global_lookup(
88 mach_port_name_t
*namep
,
89 ipc_tree_entry_t
*entryp
);
91 /* Insert an entry into the global reverse hash table */
92 void ipc_hash_global_insert(
95 mach_port_name_t name
,
96 ipc_tree_entry_t entry
);
98 /* Delete an entry from the local reverse hash table */
99 void ipc_hash_local_delete(
102 mach_port_index_t index
,
106 * Routine: ipc_hash_lookup
108 * Converts (space, obj) -> (name, entry).
109 * Returns TRUE if an entry was found.
111 * The space must be locked (read or write) throughout.
118 mach_port_name_t
*namep
,
123 rv
= ipc_hash_local_lookup(space
, obj
, namep
, entryp
);
125 assert(!is_fast_space(space
) || space
->is_tree_hash
== 0);
126 if (space
->is_tree_hash
> 0)
127 rv
= ipc_hash_global_lookup(space
, obj
, namep
,
128 (ipc_tree_entry_t
*) entryp
);
134 * Routine: ipc_hash_insert
136 * Inserts an entry into the appropriate reverse hash table,
137 * so that ipc_hash_lookup will find it.
139 * The space must be write-locked.
146 mach_port_name_t name
,
149 mach_port_index_t index
;
151 index
= MACH_PORT_INDEX(name
);
152 if ((index
< space
->is_table_size
) &&
153 (entry
== &space
->is_table
[index
]))
154 ipc_hash_local_insert(space
, obj
, index
, entry
);
156 assert(!is_fast_space(space
));
157 ipc_hash_global_insert(space
, obj
, name
,
158 (ipc_tree_entry_t
) entry
);
163 * Routine: ipc_hash_delete
165 * Deletes an entry from the appropriate reverse hash table.
167 * The space must be write-locked.
174 mach_port_name_t name
,
177 mach_port_index_t index
;
179 index
= MACH_PORT_INDEX(name
);
180 if ((index
< space
->is_table_size
) &&
181 (entry
== &space
->is_table
[index
]))
182 ipc_hash_local_delete(space
, obj
, index
, entry
);
184 assert(!is_fast_space(space
));
185 ipc_hash_global_delete(space
, obj
, name
,
186 (ipc_tree_entry_t
) entry
);
191 * The global reverse hash table holds splay tree entries.
192 * It is a simple open-chaining hash table with singly-linked buckets.
193 * Each bucket is locked separately, with an exclusive lock.
194 * Within each bucket, move-to-front is used.
197 typedef natural_t ipc_hash_index_t
;
199 ipc_hash_index_t ipc_hash_global_size
;
200 ipc_hash_index_t ipc_hash_global_mask
;
202 #define IH_GLOBAL_HASH(space, obj) \
203 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
204 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
205 ipc_hash_global_mask)
207 typedef struct ipc_hash_global_bucket
{
208 decl_mutex_data(, ihgb_lock_data
)
209 ipc_tree_entry_t ihgb_head
;
210 } *ipc_hash_global_bucket_t
;
212 #define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
214 #define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, \
216 #define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
217 #define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
219 ipc_hash_global_bucket_t ipc_hash_global_table
;
222 * Routine: ipc_hash_global_lookup
224 * Converts (space, obj) -> (name, entry).
225 * Looks in the global table, for splay tree entries.
226 * Returns TRUE if an entry was found.
228 * The space must be locked (read or write) throughout.
232 ipc_hash_global_lookup(
235 mach_port_name_t
*namep
,
236 ipc_tree_entry_t
*entryp
)
238 ipc_hash_global_bucket_t bucket
;
239 ipc_tree_entry_t
this, *last
;
241 assert(space
!= IS_NULL
);
242 assert(obj
!= IO_NULL
);
244 assert(!is_fast_space(space
));
245 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
248 if ((this = bucket
->ihgb_head
) != ITE_NULL
) {
249 if ((this->ite_object
== obj
) &&
250 (this->ite_space
== space
)) {
251 /* found it at front; no need to move */
253 *namep
= this->ite_name
;
255 } else for (last
= &this->ite_next
;
256 (this = *last
) != ITE_NULL
;
257 last
= &this->ite_next
) {
258 if ((this->ite_object
== obj
) &&
259 (this->ite_space
== space
)) {
260 /* found it; move to front */
262 *last
= this->ite_next
;
263 this->ite_next
= bucket
->ihgb_head
;
264 bucket
->ihgb_head
= this;
266 *namep
= this->ite_name
;
274 return this != ITE_NULL
;
278 * Routine: ipc_hash_global_insert
280 * Inserts an entry into the global reverse hash table.
282 * The space must be write-locked.
286 ipc_hash_global_insert(
289 mach_port_name_t name
,
290 ipc_tree_entry_t entry
)
292 ipc_hash_global_bucket_t bucket
;
295 assert(!is_fast_space(space
));
298 assert(entry
->ite_name
== name
);
299 assert(space
!= IS_NULL
);
300 assert(entry
->ite_space
== space
);
301 assert(obj
!= IO_NULL
);
302 assert(entry
->ite_object
== obj
);
304 space
->is_tree_hash
++;
305 assert(space
->is_tree_hash
<= space
->is_tree_total
);
307 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
310 /* insert at front of bucket */
312 entry
->ite_next
= bucket
->ihgb_head
;
313 bucket
->ihgb_head
= entry
;
319 * Routine: ipc_hash_global_delete
321 * Deletes an entry from the global reverse hash table.
323 * The space must be write-locked.
327 ipc_hash_global_delete(
330 mach_port_name_t name
,
331 ipc_tree_entry_t entry
)
333 ipc_hash_global_bucket_t bucket
;
334 ipc_tree_entry_t
this, *last
;
336 assert(!is_fast_space(space
));
338 assert(entry
->ite_name
== name
);
339 assert(space
!= IS_NULL
);
340 assert(entry
->ite_space
== space
);
341 assert(obj
!= IO_NULL
);
342 assert(entry
->ite_object
== obj
);
344 assert(space
->is_tree_hash
> 0);
345 space
->is_tree_hash
--;
347 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
350 for (last
= &bucket
->ihgb_head
;
351 (this = *last
) != ITE_NULL
;
352 last
= &this->ite_next
) {
354 /* found it; remove from bucket */
356 *last
= this->ite_next
;
360 assert(this != ITE_NULL
);
366 * Each space has a local reverse hash table, which holds
367 * entries from the space's table. In fact, the hash table
368 * just uses a field (ie_index) in the table itself.
370 * The local hash table is an open-addressing hash table,
371 * which means that when a collision occurs, instead of
372 * throwing the entry into a bucket, the entry is rehashed
373 * to another position in the table. In this case the rehash
374 * is very simple: linear probing (ie, just increment the position).
375 * This simple rehash makes deletions tractable (they're still a pain),
376 * but it means that collisions tend to build up into clumps.
378 * Because at least one entry in the table (index 0) is always unused,
379 * there will always be room in the reverse hash table. If a table
380 * with n slots gets completely full, the reverse hash table will
381 * have one giant clump of n-1 slots and one free slot somewhere.
382 * Because entries are only entered into the reverse table if they
383 * are pure send rights (not receive, send-once, port-set,
384 * or dead-name rights), and free entries of course aren't entered,
385 * I expect the reverse hash table won't get unreasonably full.
387 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
388 * pp. 135-142.) may be desirable here. They can dramatically help
389 * unsuccessful lookups. But unsuccessful lookups are almost always
390 * followed by insertions, and those slow down somewhat. They
391 * also can help deletions somewhat. Successful lookups aren't affected.
392 * So possibly a small win; probably nothing significant.
395 #define IH_LOCAL_HASH(obj, size) \
396 ((((mach_port_index_t) (obj)) >> 6) % (size))
399 * Routine: ipc_hash_local_lookup
401 * Converts (space, obj) -> (name, entry).
402 * Looks in the space's local table, for table entries.
403 * Returns TRUE if an entry was found.
405 * The space must be locked (read or write) throughout.
409 ipc_hash_local_lookup(
412 mach_port_name_t
*namep
,
416 ipc_entry_num_t size
;
417 mach_port_index_t hindex
, index
;
419 assert(space
!= IS_NULL
);
420 assert(obj
!= IO_NULL
);
422 table
= space
->is_table
;
423 size
= space
->is_table_size
;
424 hindex
= IH_LOCAL_HASH(obj
, size
);
427 * Ideally, table[hindex].ie_index is the name we want.
428 * However, must check ie_object to verify this,
429 * because collisions can happen. In case of a collision,
430 * search farther along in the clump.
433 while ((index
= table
[hindex
].ie_index
) != 0) {
434 ipc_entry_t entry
= &table
[index
];
436 if (entry
->ie_object
== obj
) {
438 *namep
= MACH_PORT_MAKE(index
,
439 IE_BITS_GEN(entry
->ie_bits
));
443 if (++hindex
== size
)
451 * Routine: ipc_hash_local_insert
453 * Inserts an entry into the space's reverse hash table.
455 * The space must be write-locked.
459 ipc_hash_local_insert(
462 mach_port_index_t index
,
466 ipc_entry_num_t size
;
467 mach_port_index_t hindex
;
470 assert(space
!= IS_NULL
);
471 assert(obj
!= IO_NULL
);
473 table
= space
->is_table
;
474 size
= space
->is_table_size
;
475 hindex
= IH_LOCAL_HASH(obj
, size
);
477 assert(entry
== &table
[index
]);
478 assert(entry
->ie_object
== obj
);
481 * We want to insert at hindex, but there may be collisions.
482 * If a collision occurs, search for the end of the clump
486 while (table
[hindex
].ie_index
!= 0) {
487 if (++hindex
== size
)
491 table
[hindex
].ie_index
= index
;
495 * Routine: ipc_hash_local_delete
497 * Deletes an entry from the space's reverse hash table.
499 * The space must be write-locked.
503 ipc_hash_local_delete(
506 mach_port_index_t index
,
510 ipc_entry_num_t size
;
511 mach_port_index_t hindex
, dindex
;
513 assert(index
!= MACH_PORT_NULL
);
514 assert(space
!= IS_NULL
);
515 assert(obj
!= IO_NULL
);
517 table
= space
->is_table
;
518 size
= space
->is_table_size
;
519 hindex
= IH_LOCAL_HASH(obj
, size
);
521 assert(entry
== &table
[index
]);
522 assert(entry
->ie_object
== obj
);
525 * First check we have the right hindex for this index.
526 * In case of collision, we have to search farther
527 * along in this clump.
530 while (table
[hindex
].ie_index
!= index
) {
531 if (++hindex
== size
)
536 * Now we want to set table[hindex].ie_index = 0.
537 * But if we aren't the last index in a clump,
538 * this might cause problems for lookups of objects
539 * farther along in the clump that are displaced
540 * due to collisions. Searches for them would fail
541 * at hindex instead of succeeding.
543 * So we must check the clump after hindex for objects
544 * that are so displaced, and move one up to the new hole.
546 * hindex - index of new hole in the clump
547 * dindex - index we are checking for a displaced object
549 * When we move a displaced object up into the hole,
550 * it creates a new hole, and we have to repeat the process
551 * until we get to the end of the clump.
554 for (dindex
= hindex
; index
!= 0; hindex
= dindex
) {
556 mach_port_index_t tindex
;
559 if (++dindex
== size
)
561 assert(dindex
!= hindex
);
563 /* are we at the end of the clump? */
565 index
= table
[dindex
].ie_index
;
569 /* is this a displaced object? */
571 tobj
= table
[index
].ie_object
;
572 assert(tobj
!= IO_NULL
);
573 tindex
= IH_LOCAL_HASH(tobj
, size
);
575 if ((dindex
< hindex
) ?
576 ((dindex
< tindex
) && (tindex
<= hindex
)) :
577 ((dindex
< tindex
) || (tindex
<= hindex
)))
581 table
[hindex
].ie_index
= index
;
586 * Routine: ipc_hash_init
588 * Initialize the reverse hash table implementation.
596 /* if not configured, initialize ipc_hash_global_size */
598 if (ipc_hash_global_size
== 0) {
599 ipc_hash_global_size
= ipc_tree_entry_max
>> 8;
600 if (ipc_hash_global_size
< 32)
601 ipc_hash_global_size
= 32;
604 /* make sure it is a power of two */
606 ipc_hash_global_mask
= ipc_hash_global_size
- 1;
607 if ((ipc_hash_global_size
& ipc_hash_global_mask
) != 0) {
610 /* round up to closest power of two */
612 for (bit
= 1;; bit
<<= 1) {
613 ipc_hash_global_mask
|= bit
;
614 ipc_hash_global_size
= ipc_hash_global_mask
+ 1;
616 if ((ipc_hash_global_size
& ipc_hash_global_mask
) == 0)
621 /* allocate ipc_hash_global_table */
623 ipc_hash_global_table
= (ipc_hash_global_bucket_t
)
624 kalloc((vm_size_t
) (ipc_hash_global_size
*
625 sizeof(struct ipc_hash_global_bucket
)));
626 assert(ipc_hash_global_table
!= IHGB_NULL
);
628 /* and initialize it */
630 for (i
= 0; i
< ipc_hash_global_size
; i
++) {
631 ipc_hash_global_bucket_t bucket
;
633 bucket
= &ipc_hash_global_table
[i
];
634 ihgb_lock_init(bucket
);
635 bucket
->ihgb_head
= ITE_NULL
;
642 * Routine: ipc_hash_info
644 * Return information about the global reverse hash table.
645 * Fills the buffer with as much information as possible
646 * and returns the desired size of the buffer.
648 * Nothing locked. The caller should provide
649 * possibly-pageable memory.
655 hash_info_bucket_t
*info
,
656 mach_msg_type_number_t count
)
660 if (ipc_hash_global_size
< count
)
661 count
= ipc_hash_global_size
;
663 for (i
= 0; i
< count
; i
++) {
664 ipc_hash_global_bucket_t bucket
= &ipc_hash_global_table
[i
];
665 unsigned int bucket_count
= 0;
666 ipc_tree_entry_t entry
;
669 for (entry
= bucket
->ihgb_head
;
671 entry
= entry
->ite_next
)
675 /* don't touch pageable memory while holding locks */
676 info
[i
].hib_count
= bucket_count
;
679 return ipc_hash_global_size
;
682 #endif /* MACH_IPC_DEBUG */