2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: ipc/ipc_hash.c
60 * Entry hash table operations.
63 #include <mach/boolean.h>
64 #include <mach/port.h>
65 #include <kern/lock.h>
66 #include <kern/kalloc.h>
68 #include <ipc/ipc_space.h>
69 #include <ipc/ipc_object.h>
70 #include <ipc/ipc_entry.h>
71 #include <ipc/ipc_hash.h>
72 #include <ipc/ipc_init.h>
74 #include <mach_ipc_debug.h>
77 #include <mach/kern_return.h>
78 #include <mach_debug/hash_info.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_kern.h>
81 #endif /* MACH_IPC_DEBUG */
84 * Forward declarations
87 /* Lookup (space, obj) in global hash table */
88 boolean_t
ipc_hash_global_lookup(
91 mach_port_name_t
*namep
,
92 ipc_tree_entry_t
*entryp
);
94 /* Insert an entry into the global reverse hash table */
95 void ipc_hash_global_insert(
98 mach_port_name_t name
,
99 ipc_tree_entry_t entry
);
101 /* Delete an entry from the local reverse hash table */
102 void ipc_hash_local_delete(
105 mach_port_index_t index
,
109 * Routine: ipc_hash_lookup
111 * Converts (space, obj) -> (name, entry).
112 * Returns TRUE if an entry was found.
114 * The space must be locked (read or write) throughout.
121 mach_port_name_t
*namep
,
126 rv
= ipc_hash_local_lookup(space
, obj
, namep
, entryp
);
128 assert(!is_fast_space(space
) || space
->is_tree_hash
== 0);
129 if (space
->is_tree_hash
> 0)
130 rv
= ipc_hash_global_lookup(space
, obj
, namep
,
131 (ipc_tree_entry_t
*) entryp
);
137 * Routine: ipc_hash_insert
139 * Inserts an entry into the appropriate reverse hash table,
140 * so that ipc_hash_lookup will find it.
142 * The space must be write-locked.
149 mach_port_name_t name
,
152 mach_port_index_t index
;
154 index
= MACH_PORT_INDEX(name
);
155 if ((index
< space
->is_table_size
) &&
156 (entry
== &space
->is_table
[index
]))
157 ipc_hash_local_insert(space
, obj
, index
, entry
);
159 assert(!is_fast_space(space
));
160 ipc_hash_global_insert(space
, obj
, name
,
161 (ipc_tree_entry_t
) entry
);
166 * Routine: ipc_hash_delete
168 * Deletes an entry from the appropriate reverse hash table.
170 * The space must be write-locked.
177 mach_port_name_t name
,
180 mach_port_index_t index
;
182 index
= MACH_PORT_INDEX(name
);
183 if ((index
< space
->is_table_size
) &&
184 (entry
== &space
->is_table
[index
]))
185 ipc_hash_local_delete(space
, obj
, index
, entry
);
187 assert(!is_fast_space(space
));
188 ipc_hash_global_delete(space
, obj
, name
,
189 (ipc_tree_entry_t
) entry
);
194 * The global reverse hash table holds splay tree entries.
195 * It is a simple open-chaining hash table with singly-linked buckets.
196 * Each bucket is locked separately, with an exclusive lock.
197 * Within each bucket, move-to-front is used.
200 typedef natural_t ipc_hash_index_t
;
202 ipc_hash_index_t ipc_hash_global_size
;
203 ipc_hash_index_t ipc_hash_global_mask
;
205 #define IH_GLOBAL_HASH(space, obj) \
206 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
207 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
208 ipc_hash_global_mask)
210 typedef struct ipc_hash_global_bucket
{
211 decl_mutex_data(, ihgb_lock_data
)
212 ipc_tree_entry_t ihgb_head
;
213 } *ipc_hash_global_bucket_t
;
215 #define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
217 #define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, \
219 #define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
220 #define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
222 ipc_hash_global_bucket_t ipc_hash_global_table
;
225 * Routine: ipc_hash_global_lookup
227 * Converts (space, obj) -> (name, entry).
228 * Looks in the global table, for splay tree entries.
229 * Returns TRUE if an entry was found.
231 * The space must be locked (read or write) throughout.
235 ipc_hash_global_lookup(
238 mach_port_name_t
*namep
,
239 ipc_tree_entry_t
*entryp
)
241 ipc_hash_global_bucket_t bucket
;
242 ipc_tree_entry_t
this, *last
;
244 assert(space
!= IS_NULL
);
245 assert(obj
!= IO_NULL
);
247 assert(!is_fast_space(space
));
248 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
251 if ((this = bucket
->ihgb_head
) != ITE_NULL
) {
252 if ((this->ite_object
== obj
) &&
253 (this->ite_space
== space
)) {
254 /* found it at front; no need to move */
256 *namep
= this->ite_name
;
258 } else for (last
= &this->ite_next
;
259 (this = *last
) != ITE_NULL
;
260 last
= &this->ite_next
) {
261 if ((this->ite_object
== obj
) &&
262 (this->ite_space
== space
)) {
263 /* found it; move to front */
265 *last
= this->ite_next
;
266 this->ite_next
= bucket
->ihgb_head
;
267 bucket
->ihgb_head
= this;
269 *namep
= this->ite_name
;
277 return this != ITE_NULL
;
281 * Routine: ipc_hash_global_insert
283 * Inserts an entry into the global reverse hash table.
285 * The space must be write-locked.
289 ipc_hash_global_insert(
292 mach_port_name_t name
,
293 ipc_tree_entry_t entry
)
295 ipc_hash_global_bucket_t bucket
;
298 assert(!is_fast_space(space
));
301 assert(entry
->ite_name
== name
);
302 assert(space
!= IS_NULL
);
303 assert(entry
->ite_space
== space
);
304 assert(obj
!= IO_NULL
);
305 assert(entry
->ite_object
== obj
);
307 space
->is_tree_hash
++;
308 assert(space
->is_tree_hash
<= space
->is_tree_total
);
310 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
313 /* insert at front of bucket */
315 entry
->ite_next
= bucket
->ihgb_head
;
316 bucket
->ihgb_head
= entry
;
322 * Routine: ipc_hash_global_delete
324 * Deletes an entry from the global reverse hash table.
326 * The space must be write-locked.
330 ipc_hash_global_delete(
333 mach_port_name_t name
,
334 ipc_tree_entry_t entry
)
336 ipc_hash_global_bucket_t bucket
;
337 ipc_tree_entry_t
this, *last
;
339 assert(!is_fast_space(space
));
341 assert(entry
->ite_name
== name
);
342 assert(space
!= IS_NULL
);
343 assert(entry
->ite_space
== space
);
344 assert(obj
!= IO_NULL
);
345 assert(entry
->ite_object
== obj
);
347 assert(space
->is_tree_hash
> 0);
348 space
->is_tree_hash
--;
350 bucket
= &ipc_hash_global_table
[IH_GLOBAL_HASH(space
, obj
)];
353 for (last
= &bucket
->ihgb_head
;
354 (this = *last
) != ITE_NULL
;
355 last
= &this->ite_next
) {
357 /* found it; remove from bucket */
359 *last
= this->ite_next
;
363 assert(this != ITE_NULL
);
369 * Each space has a local reverse hash table, which holds
370 * entries from the space's table. In fact, the hash table
371 * just uses a field (ie_index) in the table itself.
373 * The local hash table is an open-addressing hash table,
374 * which means that when a collision occurs, instead of
375 * throwing the entry into a bucket, the entry is rehashed
376 * to another position in the table. In this case the rehash
377 * is very simple: linear probing (ie, just increment the position).
378 * This simple rehash makes deletions tractable (they're still a pain),
379 * but it means that collisions tend to build up into clumps.
381 * Because at least one entry in the table (index 0) is always unused,
382 * there will always be room in the reverse hash table. If a table
383 * with n slots gets completely full, the reverse hash table will
384 * have one giant clump of n-1 slots and one free slot somewhere.
385 * Because entries are only entered into the reverse table if they
386 * are pure send rights (not receive, send-once, port-set,
387 * or dead-name rights), and free entries of course aren't entered,
388 * I expect the reverse hash table won't get unreasonably full.
390 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
391 * pp. 135-142.) may be desirable here. They can dramatically help
392 * unsuccessful lookups. But unsuccessful lookups are almost always
393 * followed by insertions, and those slow down somewhat. They
394 * also can help deletions somewhat. Successful lookups aren't affected.
395 * So possibly a small win; probably nothing significant.
398 #define IH_LOCAL_HASH(obj, size) \
399 ((((mach_port_index_t) (obj)) >> 6) % (size))
402 * Routine: ipc_hash_local_lookup
404 * Converts (space, obj) -> (name, entry).
405 * Looks in the space's local table, for table entries.
406 * Returns TRUE if an entry was found.
408 * The space must be locked (read or write) throughout.
412 ipc_hash_local_lookup(
415 mach_port_name_t
*namep
,
419 ipc_entry_num_t size
;
420 mach_port_index_t hindex
, index
;
422 assert(space
!= IS_NULL
);
423 assert(obj
!= IO_NULL
);
425 table
= space
->is_table
;
426 size
= space
->is_table_size
;
427 hindex
= IH_LOCAL_HASH(obj
, size
);
430 * Ideally, table[hindex].ie_index is the name we want.
431 * However, must check ie_object to verify this,
432 * because collisions can happen. In case of a collision,
433 * search farther along in the clump.
436 while ((index
= table
[hindex
].ie_index
) != 0) {
437 ipc_entry_t entry
= &table
[index
];
439 if (entry
->ie_object
== obj
) {
441 *namep
= MACH_PORT_MAKE(index
,
442 IE_BITS_GEN(entry
->ie_bits
));
446 if (++hindex
== size
)
454 * Routine: ipc_hash_local_insert
456 * Inserts an entry into the space's reverse hash table.
458 * The space must be write-locked.
462 ipc_hash_local_insert(
465 mach_port_index_t index
,
469 ipc_entry_num_t size
;
470 mach_port_index_t hindex
;
473 assert(space
!= IS_NULL
);
474 assert(obj
!= IO_NULL
);
476 table
= space
->is_table
;
477 size
= space
->is_table_size
;
478 hindex
= IH_LOCAL_HASH(obj
, size
);
480 assert(entry
== &table
[index
]);
481 assert(entry
->ie_object
== obj
);
484 * We want to insert at hindex, but there may be collisions.
485 * If a collision occurs, search for the end of the clump
489 while (table
[hindex
].ie_index
!= 0) {
490 if (++hindex
== size
)
494 table
[hindex
].ie_index
= index
;
498 * Routine: ipc_hash_local_delete
500 * Deletes an entry from the space's reverse hash table.
502 * The space must be write-locked.
506 ipc_hash_local_delete(
509 mach_port_index_t index
,
513 ipc_entry_num_t size
;
514 mach_port_index_t hindex
, dindex
;
516 assert(index
!= MACH_PORT_NULL
);
517 assert(space
!= IS_NULL
);
518 assert(obj
!= IO_NULL
);
520 table
= space
->is_table
;
521 size
= space
->is_table_size
;
522 hindex
= IH_LOCAL_HASH(obj
, size
);
524 assert(entry
== &table
[index
]);
525 assert(entry
->ie_object
== obj
);
528 * First check we have the right hindex for this index.
529 * In case of collision, we have to search farther
530 * along in this clump.
533 while (table
[hindex
].ie_index
!= index
) {
534 if (++hindex
== size
)
539 * Now we want to set table[hindex].ie_index = 0.
540 * But if we aren't the last index in a clump,
541 * this might cause problems for lookups of objects
542 * farther along in the clump that are displaced
543 * due to collisions. Searches for them would fail
544 * at hindex instead of succeeding.
546 * So we must check the clump after hindex for objects
547 * that are so displaced, and move one up to the new hole.
549 * hindex - index of new hole in the clump
550 * dindex - index we are checking for a displaced object
552 * When we move a displaced object up into the hole,
553 * it creates a new hole, and we have to repeat the process
554 * until we get to the end of the clump.
557 for (dindex
= hindex
; index
!= 0; hindex
= dindex
) {
559 mach_port_index_t tindex
;
562 if (++dindex
== size
)
564 assert(dindex
!= hindex
);
566 /* are we at the end of the clump? */
568 index
= table
[dindex
].ie_index
;
572 /* is this a displaced object? */
574 tobj
= table
[index
].ie_object
;
575 assert(tobj
!= IO_NULL
);
576 tindex
= IH_LOCAL_HASH(tobj
, size
);
578 if ((dindex
< hindex
) ?
579 ((dindex
< tindex
) && (tindex
<= hindex
)) :
580 ((dindex
< tindex
) || (tindex
<= hindex
)))
584 table
[hindex
].ie_index
= index
;
589 * Routine: ipc_hash_init
591 * Initialize the reverse hash table implementation.
599 /* if not configured, initialize ipc_hash_global_size */
601 if (ipc_hash_global_size
== 0) {
602 ipc_hash_global_size
= ipc_tree_entry_max
>> 8;
603 if (ipc_hash_global_size
< 32)
604 ipc_hash_global_size
= 32;
607 /* make sure it is a power of two */
609 ipc_hash_global_mask
= ipc_hash_global_size
- 1;
610 if ((ipc_hash_global_size
& ipc_hash_global_mask
) != 0) {
613 /* round up to closest power of two */
615 for (bit
= 1;; bit
<<= 1) {
616 ipc_hash_global_mask
|= bit
;
617 ipc_hash_global_size
= ipc_hash_global_mask
+ 1;
619 if ((ipc_hash_global_size
& ipc_hash_global_mask
) == 0)
624 /* allocate ipc_hash_global_table */
626 ipc_hash_global_table
= (ipc_hash_global_bucket_t
)
627 kalloc((vm_size_t
) (ipc_hash_global_size
*
628 sizeof(struct ipc_hash_global_bucket
)));
629 assert(ipc_hash_global_table
!= IHGB_NULL
);
631 /* and initialize it */
633 for (i
= 0; i
< ipc_hash_global_size
; i
++) {
634 ipc_hash_global_bucket_t bucket
;
636 bucket
= &ipc_hash_global_table
[i
];
637 ihgb_lock_init(bucket
);
638 bucket
->ihgb_head
= ITE_NULL
;
645 * Routine: ipc_hash_info
647 * Return information about the global reverse hash table.
648 * Fills the buffer with as much information as possible
649 * and returns the desired size of the buffer.
651 * Nothing locked. The caller should provide
652 * possibly-pageable memory.
658 hash_info_bucket_t
*info
,
659 mach_msg_type_number_t count
)
663 if (ipc_hash_global_size
< count
)
664 count
= ipc_hash_global_size
;
666 for (i
= 0; i
< count
; i
++) {
667 ipc_hash_global_bucket_t bucket
= &ipc_hash_global_table
[i
];
668 unsigned int bucket_count
= 0;
669 ipc_tree_entry_t entry
;
672 for (entry
= bucket
->ihgb_head
;
674 entry
= entry
->ite_next
)
678 /* don't touch pageable memory while holding locks */
679 info
[i
].hib_count
= bucket_count
;
682 return ipc_hash_global_size
;
685 #endif /* MACH_IPC_DEBUG */