]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_hash.c
xnu-1228.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_hash.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: ipc/ipc_hash.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Entry hash table operations.
64 */
65
66#include <mach/boolean.h>
67#include <mach/port.h>
68#include <kern/lock.h>
69#include <kern/kalloc.h>
70#include <ipc/port.h>
71#include <ipc/ipc_space.h>
72#include <ipc/ipc_object.h>
73#include <ipc/ipc_entry.h>
74#include <ipc/ipc_hash.h>
75#include <ipc/ipc_init.h>
76
77#include <mach_ipc_debug.h>
78
79#if MACH_IPC_DEBUG
80#include <mach/kern_return.h>
81#include <mach_debug/hash_info.h>
82#include <vm/vm_map.h>
83#include <vm/vm_kern.h>
84#endif /* MACH_IPC_DEBUG */
85
86/*
87 * Forward declarations
88 */
89
90/* Lookup (space, obj) in global hash table */
91boolean_t ipc_hash_global_lookup(
92 ipc_space_t space,
93 ipc_object_t obj,
94 mach_port_name_t *namep,
95 ipc_tree_entry_t *entryp);
96
97/* Insert an entry into the global reverse hash table */
98void ipc_hash_global_insert(
99 ipc_space_t space,
100 ipc_object_t obj,
101 mach_port_name_t name,
102 ipc_tree_entry_t entry);
103
104/* Delete an entry from the local reverse hash table */
105void ipc_hash_local_delete(
106 ipc_space_t space,
107 ipc_object_t obj,
108 mach_port_index_t index,
109 ipc_entry_t entry);
110
111/*
112 * Routine: ipc_hash_lookup
113 * Purpose:
114 * Converts (space, obj) -> (name, entry).
115 * Returns TRUE if an entry was found.
116 * Conditions:
117 * The space must be locked (read or write) throughout.
118 */
119
120boolean_t
121ipc_hash_lookup(
122 ipc_space_t space,
123 ipc_object_t obj,
124 mach_port_name_t *namep,
125 ipc_entry_t *entryp)
126{
127 boolean_t rv;
128
129 rv = ipc_hash_local_lookup(space, obj, namep, entryp);
130 if (!rv) {
131 assert(!is_fast_space(space) || space->is_tree_hash == 0);
132 if (space->is_tree_hash > 0)
133 rv = ipc_hash_global_lookup(space, obj, namep,
134 (ipc_tree_entry_t *) entryp);
135 }
136 return (rv);
137}
138
139/*
140 * Routine: ipc_hash_insert
141 * Purpose:
142 * Inserts an entry into the appropriate reverse hash table,
143 * so that ipc_hash_lookup will find it.
144 * Conditions:
145 * The space must be write-locked.
146 */
147
148void
149ipc_hash_insert(
150 ipc_space_t space,
151 ipc_object_t obj,
152 mach_port_name_t name,
153 ipc_entry_t entry)
154{
155 mach_port_index_t index;
156
157 index = MACH_PORT_INDEX(name);
158 if ((index < space->is_table_size) &&
159 (entry == &space->is_table[index]))
160 ipc_hash_local_insert(space, obj, index, entry);
161 else {
162 assert(!is_fast_space(space));
163 ipc_hash_global_insert(space, obj, name,
164 (ipc_tree_entry_t) entry);
165 }
166}
167
168/*
169 * Routine: ipc_hash_delete
170 * Purpose:
171 * Deletes an entry from the appropriate reverse hash table.
172 * Conditions:
173 * The space must be write-locked.
174 */
175
176void
177ipc_hash_delete(
178 ipc_space_t space,
179 ipc_object_t obj,
180 mach_port_name_t name,
181 ipc_entry_t entry)
182{
183 mach_port_index_t index;
184
185 index = MACH_PORT_INDEX(name);
186 if ((index < space->is_table_size) &&
187 (entry == &space->is_table[index]))
188 ipc_hash_local_delete(space, obj, index, entry);
189 else {
190 assert(!is_fast_space(space));
191 ipc_hash_global_delete(space, obj, name,
192 (ipc_tree_entry_t) entry);
193 }
194}
195
196/*
197 * The global reverse hash table holds splay tree entries.
198 * It is a simple open-chaining hash table with singly-linked buckets.
199 * Each bucket is locked separately, with an exclusive lock.
200 * Within each bucket, move-to-front is used.
201 */
202
203typedef natural_t ipc_hash_index_t;
204
205ipc_hash_index_t ipc_hash_global_size;
206ipc_hash_index_t ipc_hash_global_mask;
207
208#define IH_GLOBAL_HASH(space, obj) \
209 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
210 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
211 ipc_hash_global_mask)
212
213typedef struct ipc_hash_global_bucket {
214 decl_mutex_data(, ihgb_lock_data)
215 ipc_tree_entry_t ihgb_head;
216} *ipc_hash_global_bucket_t;
217
218#define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
219
91447636 220#define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, 0)
1c79356b
A
221#define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
222#define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
223
224ipc_hash_global_bucket_t ipc_hash_global_table;
225
226/*
227 * Routine: ipc_hash_global_lookup
228 * Purpose:
229 * Converts (space, obj) -> (name, entry).
230 * Looks in the global table, for splay tree entries.
231 * Returns TRUE if an entry was found.
232 * Conditions:
233 * The space must be locked (read or write) throughout.
234 */
235
236boolean_t
237ipc_hash_global_lookup(
238 ipc_space_t space,
239 ipc_object_t obj,
240 mach_port_name_t *namep,
241 ipc_tree_entry_t *entryp)
242{
243 ipc_hash_global_bucket_t bucket;
244 ipc_tree_entry_t this, *last;
245
246 assert(space != IS_NULL);
247 assert(obj != IO_NULL);
248
249 assert(!is_fast_space(space));
250 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
251 ihgb_lock(bucket);
252
253 if ((this = bucket->ihgb_head) != ITE_NULL) {
254 if ((this->ite_object == obj) &&
255 (this->ite_space == space)) {
256 /* found it at front; no need to move */
257
258 *namep = this->ite_name;
259 *entryp = this;
260 } else for (last = &this->ite_next;
261 (this = *last) != ITE_NULL;
262 last = &this->ite_next) {
263 if ((this->ite_object == obj) &&
264 (this->ite_space == space)) {
265 /* found it; move to front */
266
267 *last = this->ite_next;
268 this->ite_next = bucket->ihgb_head;
269 bucket->ihgb_head = this;
270
271 *namep = this->ite_name;
272 *entryp = this;
273 break;
274 }
275 }
276 }
277
278 ihgb_unlock(bucket);
279 return this != ITE_NULL;
280}
281
282/*
283 * Routine: ipc_hash_global_insert
284 * Purpose:
285 * Inserts an entry into the global reverse hash table.
286 * Conditions:
287 * The space must be write-locked.
288 */
289
290void
291ipc_hash_global_insert(
91447636
A
292 ipc_space_t space,
293 ipc_object_t obj,
294 __assert_only mach_port_name_t name,
295 ipc_tree_entry_t entry)
1c79356b
A
296{
297 ipc_hash_global_bucket_t bucket;
298
1c79356b 299 assert(!is_fast_space(space));
1c79356b
A
300 assert(entry->ite_name == name);
301 assert(space != IS_NULL);
302 assert(entry->ite_space == space);
303 assert(obj != IO_NULL);
304 assert(entry->ite_object == obj);
305
306 space->is_tree_hash++;
307 assert(space->is_tree_hash <= space->is_tree_total);
308
309 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
310 ihgb_lock(bucket);
311
312 /* insert at front of bucket */
313
314 entry->ite_next = bucket->ihgb_head;
315 bucket->ihgb_head = entry;
316
317 ihgb_unlock(bucket);
318}
319
320/*
321 * Routine: ipc_hash_global_delete
322 * Purpose:
323 * Deletes an entry from the global reverse hash table.
324 * Conditions:
325 * The space must be write-locked.
326 */
327
328void
329ipc_hash_global_delete(
91447636
A
330 ipc_space_t space,
331 ipc_object_t obj,
332 __assert_only mach_port_name_t name,
333 ipc_tree_entry_t entry)
1c79356b
A
334{
335 ipc_hash_global_bucket_t bucket;
336 ipc_tree_entry_t this, *last;
337
338 assert(!is_fast_space(space));
1c79356b
A
339 assert(entry->ite_name == name);
340 assert(space != IS_NULL);
341 assert(entry->ite_space == space);
342 assert(obj != IO_NULL);
343 assert(entry->ite_object == obj);
344
345 assert(space->is_tree_hash > 0);
346 space->is_tree_hash--;
347
348 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
349 ihgb_lock(bucket);
350
351 for (last = &bucket->ihgb_head;
352 (this = *last) != ITE_NULL;
353 last = &this->ite_next) {
354 if (this == entry) {
355 /* found it; remove from bucket */
356
357 *last = this->ite_next;
358 break;
359 }
360 }
361 assert(this != ITE_NULL);
362
363 ihgb_unlock(bucket);
364}
365
366/*
367 * Each space has a local reverse hash table, which holds
368 * entries from the space's table. In fact, the hash table
369 * just uses a field (ie_index) in the table itself.
370 *
371 * The local hash table is an open-addressing hash table,
372 * which means that when a collision occurs, instead of
373 * throwing the entry into a bucket, the entry is rehashed
374 * to another position in the table. In this case the rehash
375 * is very simple: linear probing (ie, just increment the position).
376 * This simple rehash makes deletions tractable (they're still a pain),
377 * but it means that collisions tend to build up into clumps.
378 *
379 * Because at least one entry in the table (index 0) is always unused,
380 * there will always be room in the reverse hash table. If a table
381 * with n slots gets completely full, the reverse hash table will
382 * have one giant clump of n-1 slots and one free slot somewhere.
383 * Because entries are only entered into the reverse table if they
384 * are pure send rights (not receive, send-once, port-set,
385 * or dead-name rights), and free entries of course aren't entered,
386 * I expect the reverse hash table won't get unreasonably full.
387 *
388 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
389 * pp. 135-142.) may be desirable here. They can dramatically help
390 * unsuccessful lookups. But unsuccessful lookups are almost always
391 * followed by insertions, and those slow down somewhat. They
392 * also can help deletions somewhat. Successful lookups aren't affected.
393 * So possibly a small win; probably nothing significant.
394 */
395
396#define IH_LOCAL_HASH(obj, size) \
397 ((((mach_port_index_t) (obj)) >> 6) % (size))
398
399/*
400 * Routine: ipc_hash_local_lookup
401 * Purpose:
402 * Converts (space, obj) -> (name, entry).
403 * Looks in the space's local table, for table entries.
404 * Returns TRUE if an entry was found.
405 * Conditions:
406 * The space must be locked (read or write) throughout.
407 */
408
409boolean_t
410ipc_hash_local_lookup(
411 ipc_space_t space,
412 ipc_object_t obj,
413 mach_port_name_t *namep,
414 ipc_entry_t *entryp)
415{
416 ipc_entry_t table;
417 ipc_entry_num_t size;
418 mach_port_index_t hindex, index;
419
420 assert(space != IS_NULL);
421 assert(obj != IO_NULL);
422
423 table = space->is_table;
424 size = space->is_table_size;
425 hindex = IH_LOCAL_HASH(obj, size);
426
427 /*
428 * Ideally, table[hindex].ie_index is the name we want.
429 * However, must check ie_object to verify this,
430 * because collisions can happen. In case of a collision,
431 * search farther along in the clump.
432 */
433
434 while ((index = table[hindex].ie_index) != 0) {
435 ipc_entry_t entry = &table[index];
436
437 if (entry->ie_object == obj) {
438 *entryp = entry;
439 *namep = MACH_PORT_MAKE(index,
440 IE_BITS_GEN(entry->ie_bits));
441 return TRUE;
442 }
443
444 if (++hindex == size)
445 hindex = 0;
446 }
447
448 return FALSE;
449}
450
451/*
452 * Routine: ipc_hash_local_insert
453 * Purpose:
454 * Inserts an entry into the space's reverse hash table.
455 * Conditions:
456 * The space must be write-locked.
457 */
458
459void
460ipc_hash_local_insert(
91447636
A
461 ipc_space_t space,
462 ipc_object_t obj,
463 mach_port_index_t index,
464 __assert_only ipc_entry_t entry)
1c79356b
A
465{
466 ipc_entry_t table;
467 ipc_entry_num_t size;
468 mach_port_index_t hindex;
469
470 assert(index != 0);
471 assert(space != IS_NULL);
472 assert(obj != IO_NULL);
473
474 table = space->is_table;
475 size = space->is_table_size;
476 hindex = IH_LOCAL_HASH(obj, size);
477
478 assert(entry == &table[index]);
479 assert(entry->ie_object == obj);
480
481 /*
482 * We want to insert at hindex, but there may be collisions.
483 * If a collision occurs, search for the end of the clump
484 * and insert there.
485 */
486
487 while (table[hindex].ie_index != 0) {
488 if (++hindex == size)
489 hindex = 0;
490 }
491
492 table[hindex].ie_index = index;
493}
494
495/*
496 * Routine: ipc_hash_local_delete
497 * Purpose:
498 * Deletes an entry from the space's reverse hash table.
499 * Conditions:
500 * The space must be write-locked.
501 */
502
503void
504ipc_hash_local_delete(
91447636
A
505 ipc_space_t space,
506 ipc_object_t obj,
507 mach_port_index_t index,
508 __assert_only ipc_entry_t entry)
1c79356b
A
509{
510 ipc_entry_t table;
511 ipc_entry_num_t size;
512 mach_port_index_t hindex, dindex;
513
514 assert(index != MACH_PORT_NULL);
515 assert(space != IS_NULL);
516 assert(obj != IO_NULL);
517
518 table = space->is_table;
519 size = space->is_table_size;
520 hindex = IH_LOCAL_HASH(obj, size);
521
522 assert(entry == &table[index]);
523 assert(entry->ie_object == obj);
524
525 /*
526 * First check we have the right hindex for this index.
527 * In case of collision, we have to search farther
528 * along in this clump.
529 */
530
531 while (table[hindex].ie_index != index) {
532 if (++hindex == size)
533 hindex = 0;
534 }
535
536 /*
537 * Now we want to set table[hindex].ie_index = 0.
538 * But if we aren't the last index in a clump,
539 * this might cause problems for lookups of objects
540 * farther along in the clump that are displaced
541 * due to collisions. Searches for them would fail
542 * at hindex instead of succeeding.
543 *
544 * So we must check the clump after hindex for objects
545 * that are so displaced, and move one up to the new hole.
546 *
547 * hindex - index of new hole in the clump
548 * dindex - index we are checking for a displaced object
549 *
550 * When we move a displaced object up into the hole,
551 * it creates a new hole, and we have to repeat the process
552 * until we get to the end of the clump.
553 */
554
555 for (dindex = hindex; index != 0; hindex = dindex) {
556 for (;;) {
557 mach_port_index_t tindex;
558 ipc_object_t tobj;
559
560 if (++dindex == size)
561 dindex = 0;
562 assert(dindex != hindex);
563
564 /* are we at the end of the clump? */
565
566 index = table[dindex].ie_index;
567 if (index == 0)
568 break;
569
570 /* is this a displaced object? */
571
572 tobj = table[index].ie_object;
573 assert(tobj != IO_NULL);
574 tindex = IH_LOCAL_HASH(tobj, size);
575
576 if ((dindex < hindex) ?
577 ((dindex < tindex) && (tindex <= hindex)) :
578 ((dindex < tindex) || (tindex <= hindex)))
579 break;
580 }
581
582 table[hindex].ie_index = index;
583 }
584}
585
586/*
587 * Routine: ipc_hash_init
588 * Purpose:
589 * Initialize the reverse hash table implementation.
590 */
591
592void
593ipc_hash_init(void)
594{
595 ipc_hash_index_t i;
596
597 /* if not configured, initialize ipc_hash_global_size */
598
599 if (ipc_hash_global_size == 0) {
600 ipc_hash_global_size = ipc_tree_entry_max >> 8;
601 if (ipc_hash_global_size < 32)
602 ipc_hash_global_size = 32;
603 }
604
605 /* make sure it is a power of two */
606
607 ipc_hash_global_mask = ipc_hash_global_size - 1;
608 if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
609 natural_t bit;
610
611 /* round up to closest power of two */
612
613 for (bit = 1;; bit <<= 1) {
614 ipc_hash_global_mask |= bit;
615 ipc_hash_global_size = ipc_hash_global_mask + 1;
616
617 if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
618 break;
619 }
620 }
621
622 /* allocate ipc_hash_global_table */
623
624 ipc_hash_global_table = (ipc_hash_global_bucket_t)
625 kalloc((vm_size_t) (ipc_hash_global_size *
626 sizeof(struct ipc_hash_global_bucket)));
627 assert(ipc_hash_global_table != IHGB_NULL);
628
629 /* and initialize it */
630
631 for (i = 0; i < ipc_hash_global_size; i++) {
632 ipc_hash_global_bucket_t bucket;
633
634 bucket = &ipc_hash_global_table[i];
635 ihgb_lock_init(bucket);
636 bucket->ihgb_head = ITE_NULL;
637 }
638}
639
640#if MACH_IPC_DEBUG
641
2d21ac55
A
642/*
643 * Routine: ipc_hash_size
644 * Purpose:
645 * Return the size of the global reverse hash table.
646 */
647natural_t
648ipc_hash_size(void)
649{
650 return ipc_hash_global_size;
651}
652
1c79356b
A
653/*
654 * Routine: ipc_hash_info
655 * Purpose:
656 * Return information about the global reverse hash table.
657 * Fills the buffer with as much information as possible
658 * and returns the desired size of the buffer.
659 * Conditions:
660 * Nothing locked. The caller should provide
661 * possibly-pageable memory.
662 */
663
664
665ipc_hash_index_t
666ipc_hash_info(
667 hash_info_bucket_t *info,
2d21ac55 668 natural_t count)
1c79356b
A
669{
670 ipc_hash_index_t i;
671
672 if (ipc_hash_global_size < count)
673 count = ipc_hash_global_size;
674
675 for (i = 0; i < count; i++) {
676 ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
677 unsigned int bucket_count = 0;
678 ipc_tree_entry_t entry;
679
680 ihgb_lock(bucket);
681 for (entry = bucket->ihgb_head;
682 entry != ITE_NULL;
683 entry = entry->ite_next)
684 bucket_count++;
685 ihgb_unlock(bucket);
686
687 /* don't touch pageable memory while holding locks */
688 info[i].hib_count = bucket_count;
689 }
690
691 return ipc_hash_global_size;
692}
693
694#endif /* MACH_IPC_DEBUG */