]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_hash.c
xnu-792.6.22.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_hash.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: ipc/ipc_hash.c
54 * Author: Rich Draves
55 * Date: 1989
56 *
57 * Entry hash table operations.
58 */
59
60#include <mach/boolean.h>
61#include <mach/port.h>
62#include <kern/lock.h>
63#include <kern/kalloc.h>
64#include <ipc/port.h>
65#include <ipc/ipc_space.h>
66#include <ipc/ipc_object.h>
67#include <ipc/ipc_entry.h>
68#include <ipc/ipc_hash.h>
69#include <ipc/ipc_init.h>
70
71#include <mach_ipc_debug.h>
72
73#if MACH_IPC_DEBUG
74#include <mach/kern_return.h>
75#include <mach_debug/hash_info.h>
76#include <vm/vm_map.h>
77#include <vm/vm_kern.h>
78#endif /* MACH_IPC_DEBUG */
79
80/*
81 * Forward declarations
82 */
83
84/* Lookup (space, obj) in global hash table */
85boolean_t ipc_hash_global_lookup(
86 ipc_space_t space,
87 ipc_object_t obj,
88 mach_port_name_t *namep,
89 ipc_tree_entry_t *entryp);
90
91/* Insert an entry into the global reverse hash table */
92void ipc_hash_global_insert(
93 ipc_space_t space,
94 ipc_object_t obj,
95 mach_port_name_t name,
96 ipc_tree_entry_t entry);
97
98/* Delete an entry from the local reverse hash table */
99void ipc_hash_local_delete(
100 ipc_space_t space,
101 ipc_object_t obj,
102 mach_port_index_t index,
103 ipc_entry_t entry);
104
105/*
106 * Routine: ipc_hash_lookup
107 * Purpose:
108 * Converts (space, obj) -> (name, entry).
109 * Returns TRUE if an entry was found.
110 * Conditions:
111 * The space must be locked (read or write) throughout.
112 */
113
114boolean_t
115ipc_hash_lookup(
116 ipc_space_t space,
117 ipc_object_t obj,
118 mach_port_name_t *namep,
119 ipc_entry_t *entryp)
120{
121 boolean_t rv;
122
123 rv = ipc_hash_local_lookup(space, obj, namep, entryp);
124 if (!rv) {
125 assert(!is_fast_space(space) || space->is_tree_hash == 0);
126 if (space->is_tree_hash > 0)
127 rv = ipc_hash_global_lookup(space, obj, namep,
128 (ipc_tree_entry_t *) entryp);
129 }
130 return (rv);
131}
132
133/*
134 * Routine: ipc_hash_insert
135 * Purpose:
136 * Inserts an entry into the appropriate reverse hash table,
137 * so that ipc_hash_lookup will find it.
138 * Conditions:
139 * The space must be write-locked.
140 */
141
142void
143ipc_hash_insert(
144 ipc_space_t space,
145 ipc_object_t obj,
146 mach_port_name_t name,
147 ipc_entry_t entry)
148{
149 mach_port_index_t index;
150
151 index = MACH_PORT_INDEX(name);
152 if ((index < space->is_table_size) &&
153 (entry == &space->is_table[index]))
154 ipc_hash_local_insert(space, obj, index, entry);
155 else {
156 assert(!is_fast_space(space));
157 ipc_hash_global_insert(space, obj, name,
158 (ipc_tree_entry_t) entry);
159 }
160}
161
162/*
163 * Routine: ipc_hash_delete
164 * Purpose:
165 * Deletes an entry from the appropriate reverse hash table.
166 * Conditions:
167 * The space must be write-locked.
168 */
169
170void
171ipc_hash_delete(
172 ipc_space_t space,
173 ipc_object_t obj,
174 mach_port_name_t name,
175 ipc_entry_t entry)
176{
177 mach_port_index_t index;
178
179 index = MACH_PORT_INDEX(name);
180 if ((index < space->is_table_size) &&
181 (entry == &space->is_table[index]))
182 ipc_hash_local_delete(space, obj, index, entry);
183 else {
184 assert(!is_fast_space(space));
185 ipc_hash_global_delete(space, obj, name,
186 (ipc_tree_entry_t) entry);
187 }
188}
189
190/*
191 * The global reverse hash table holds splay tree entries.
192 * It is a simple open-chaining hash table with singly-linked buckets.
193 * Each bucket is locked separately, with an exclusive lock.
194 * Within each bucket, move-to-front is used.
195 */
196
197typedef natural_t ipc_hash_index_t;
198
199ipc_hash_index_t ipc_hash_global_size;
200ipc_hash_index_t ipc_hash_global_mask;
201
202#define IH_GLOBAL_HASH(space, obj) \
203 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
204 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
205 ipc_hash_global_mask)
206
207typedef struct ipc_hash_global_bucket {
208 decl_mutex_data(, ihgb_lock_data)
209 ipc_tree_entry_t ihgb_head;
210} *ipc_hash_global_bucket_t;
211
212#define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
213
91447636 214#define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, 0)
1c79356b
A
215#define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
216#define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
217
218ipc_hash_global_bucket_t ipc_hash_global_table;
219
220/*
221 * Routine: ipc_hash_global_lookup
222 * Purpose:
223 * Converts (space, obj) -> (name, entry).
224 * Looks in the global table, for splay tree entries.
225 * Returns TRUE if an entry was found.
226 * Conditions:
227 * The space must be locked (read or write) throughout.
228 */
229
230boolean_t
231ipc_hash_global_lookup(
232 ipc_space_t space,
233 ipc_object_t obj,
234 mach_port_name_t *namep,
235 ipc_tree_entry_t *entryp)
236{
237 ipc_hash_global_bucket_t bucket;
238 ipc_tree_entry_t this, *last;
239
240 assert(space != IS_NULL);
241 assert(obj != IO_NULL);
242
243 assert(!is_fast_space(space));
244 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
245 ihgb_lock(bucket);
246
247 if ((this = bucket->ihgb_head) != ITE_NULL) {
248 if ((this->ite_object == obj) &&
249 (this->ite_space == space)) {
250 /* found it at front; no need to move */
251
252 *namep = this->ite_name;
253 *entryp = this;
254 } else for (last = &this->ite_next;
255 (this = *last) != ITE_NULL;
256 last = &this->ite_next) {
257 if ((this->ite_object == obj) &&
258 (this->ite_space == space)) {
259 /* found it; move to front */
260
261 *last = this->ite_next;
262 this->ite_next = bucket->ihgb_head;
263 bucket->ihgb_head = this;
264
265 *namep = this->ite_name;
266 *entryp = this;
267 break;
268 }
269 }
270 }
271
272 ihgb_unlock(bucket);
273 return this != ITE_NULL;
274}
275
276/*
277 * Routine: ipc_hash_global_insert
278 * Purpose:
279 * Inserts an entry into the global reverse hash table.
280 * Conditions:
281 * The space must be write-locked.
282 */
283
284void
285ipc_hash_global_insert(
91447636
A
286 ipc_space_t space,
287 ipc_object_t obj,
288 __assert_only mach_port_name_t name,
289 ipc_tree_entry_t entry)
1c79356b
A
290{
291 ipc_hash_global_bucket_t bucket;
292
1c79356b 293 assert(!is_fast_space(space));
1c79356b
A
294 assert(entry->ite_name == name);
295 assert(space != IS_NULL);
296 assert(entry->ite_space == space);
297 assert(obj != IO_NULL);
298 assert(entry->ite_object == obj);
299
300 space->is_tree_hash++;
301 assert(space->is_tree_hash <= space->is_tree_total);
302
303 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
304 ihgb_lock(bucket);
305
306 /* insert at front of bucket */
307
308 entry->ite_next = bucket->ihgb_head;
309 bucket->ihgb_head = entry;
310
311 ihgb_unlock(bucket);
312}
313
314/*
315 * Routine: ipc_hash_global_delete
316 * Purpose:
317 * Deletes an entry from the global reverse hash table.
318 * Conditions:
319 * The space must be write-locked.
320 */
321
322void
323ipc_hash_global_delete(
91447636
A
324 ipc_space_t space,
325 ipc_object_t obj,
326 __assert_only mach_port_name_t name,
327 ipc_tree_entry_t entry)
1c79356b
A
328{
329 ipc_hash_global_bucket_t bucket;
330 ipc_tree_entry_t this, *last;
331
332 assert(!is_fast_space(space));
1c79356b
A
333 assert(entry->ite_name == name);
334 assert(space != IS_NULL);
335 assert(entry->ite_space == space);
336 assert(obj != IO_NULL);
337 assert(entry->ite_object == obj);
338
339 assert(space->is_tree_hash > 0);
340 space->is_tree_hash--;
341
342 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
343 ihgb_lock(bucket);
344
345 for (last = &bucket->ihgb_head;
346 (this = *last) != ITE_NULL;
347 last = &this->ite_next) {
348 if (this == entry) {
349 /* found it; remove from bucket */
350
351 *last = this->ite_next;
352 break;
353 }
354 }
355 assert(this != ITE_NULL);
356
357 ihgb_unlock(bucket);
358}
359
360/*
361 * Each space has a local reverse hash table, which holds
362 * entries from the space's table. In fact, the hash table
363 * just uses a field (ie_index) in the table itself.
364 *
365 * The local hash table is an open-addressing hash table,
366 * which means that when a collision occurs, instead of
367 * throwing the entry into a bucket, the entry is rehashed
368 * to another position in the table. In this case the rehash
369 * is very simple: linear probing (ie, just increment the position).
370 * This simple rehash makes deletions tractable (they're still a pain),
371 * but it means that collisions tend to build up into clumps.
372 *
373 * Because at least one entry in the table (index 0) is always unused,
374 * there will always be room in the reverse hash table. If a table
375 * with n slots gets completely full, the reverse hash table will
376 * have one giant clump of n-1 slots and one free slot somewhere.
377 * Because entries are only entered into the reverse table if they
378 * are pure send rights (not receive, send-once, port-set,
379 * or dead-name rights), and free entries of course aren't entered,
380 * I expect the reverse hash table won't get unreasonably full.
381 *
382 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
383 * pp. 135-142.) may be desirable here. They can dramatically help
384 * unsuccessful lookups. But unsuccessful lookups are almost always
385 * followed by insertions, and those slow down somewhat. They
386 * also can help deletions somewhat. Successful lookups aren't affected.
387 * So possibly a small win; probably nothing significant.
388 */
389
390#define IH_LOCAL_HASH(obj, size) \
391 ((((mach_port_index_t) (obj)) >> 6) % (size))
392
393/*
394 * Routine: ipc_hash_local_lookup
395 * Purpose:
396 * Converts (space, obj) -> (name, entry).
397 * Looks in the space's local table, for table entries.
398 * Returns TRUE if an entry was found.
399 * Conditions:
400 * The space must be locked (read or write) throughout.
401 */
402
403boolean_t
404ipc_hash_local_lookup(
405 ipc_space_t space,
406 ipc_object_t obj,
407 mach_port_name_t *namep,
408 ipc_entry_t *entryp)
409{
410 ipc_entry_t table;
411 ipc_entry_num_t size;
412 mach_port_index_t hindex, index;
413
414 assert(space != IS_NULL);
415 assert(obj != IO_NULL);
416
417 table = space->is_table;
418 size = space->is_table_size;
419 hindex = IH_LOCAL_HASH(obj, size);
420
421 /*
422 * Ideally, table[hindex].ie_index is the name we want.
423 * However, must check ie_object to verify this,
424 * because collisions can happen. In case of a collision,
425 * search farther along in the clump.
426 */
427
428 while ((index = table[hindex].ie_index) != 0) {
429 ipc_entry_t entry = &table[index];
430
431 if (entry->ie_object == obj) {
432 *entryp = entry;
433 *namep = MACH_PORT_MAKE(index,
434 IE_BITS_GEN(entry->ie_bits));
435 return TRUE;
436 }
437
438 if (++hindex == size)
439 hindex = 0;
440 }
441
442 return FALSE;
443}
444
445/*
446 * Routine: ipc_hash_local_insert
447 * Purpose:
448 * Inserts an entry into the space's reverse hash table.
449 * Conditions:
450 * The space must be write-locked.
451 */
452
453void
454ipc_hash_local_insert(
91447636
A
455 ipc_space_t space,
456 ipc_object_t obj,
457 mach_port_index_t index,
458 __assert_only ipc_entry_t entry)
1c79356b
A
459{
460 ipc_entry_t table;
461 ipc_entry_num_t size;
462 mach_port_index_t hindex;
463
464 assert(index != 0);
465 assert(space != IS_NULL);
466 assert(obj != IO_NULL);
467
468 table = space->is_table;
469 size = space->is_table_size;
470 hindex = IH_LOCAL_HASH(obj, size);
471
472 assert(entry == &table[index]);
473 assert(entry->ie_object == obj);
474
475 /*
476 * We want to insert at hindex, but there may be collisions.
477 * If a collision occurs, search for the end of the clump
478 * and insert there.
479 */
480
481 while (table[hindex].ie_index != 0) {
482 if (++hindex == size)
483 hindex = 0;
484 }
485
486 table[hindex].ie_index = index;
487}
488
489/*
490 * Routine: ipc_hash_local_delete
491 * Purpose:
492 * Deletes an entry from the space's reverse hash table.
493 * Conditions:
494 * The space must be write-locked.
495 */
496
497void
498ipc_hash_local_delete(
91447636
A
499 ipc_space_t space,
500 ipc_object_t obj,
501 mach_port_index_t index,
502 __assert_only ipc_entry_t entry)
1c79356b
A
503{
504 ipc_entry_t table;
505 ipc_entry_num_t size;
506 mach_port_index_t hindex, dindex;
507
508 assert(index != MACH_PORT_NULL);
509 assert(space != IS_NULL);
510 assert(obj != IO_NULL);
511
512 table = space->is_table;
513 size = space->is_table_size;
514 hindex = IH_LOCAL_HASH(obj, size);
515
516 assert(entry == &table[index]);
517 assert(entry->ie_object == obj);
518
519 /*
520 * First check we have the right hindex for this index.
521 * In case of collision, we have to search farther
522 * along in this clump.
523 */
524
525 while (table[hindex].ie_index != index) {
526 if (++hindex == size)
527 hindex = 0;
528 }
529
530 /*
531 * Now we want to set table[hindex].ie_index = 0.
532 * But if we aren't the last index in a clump,
533 * this might cause problems for lookups of objects
534 * farther along in the clump that are displaced
535 * due to collisions. Searches for them would fail
536 * at hindex instead of succeeding.
537 *
538 * So we must check the clump after hindex for objects
539 * that are so displaced, and move one up to the new hole.
540 *
541 * hindex - index of new hole in the clump
542 * dindex - index we are checking for a displaced object
543 *
544 * When we move a displaced object up into the hole,
545 * it creates a new hole, and we have to repeat the process
546 * until we get to the end of the clump.
547 */
548
549 for (dindex = hindex; index != 0; hindex = dindex) {
550 for (;;) {
551 mach_port_index_t tindex;
552 ipc_object_t tobj;
553
554 if (++dindex == size)
555 dindex = 0;
556 assert(dindex != hindex);
557
558 /* are we at the end of the clump? */
559
560 index = table[dindex].ie_index;
561 if (index == 0)
562 break;
563
564 /* is this a displaced object? */
565
566 tobj = table[index].ie_object;
567 assert(tobj != IO_NULL);
568 tindex = IH_LOCAL_HASH(tobj, size);
569
570 if ((dindex < hindex) ?
571 ((dindex < tindex) && (tindex <= hindex)) :
572 ((dindex < tindex) || (tindex <= hindex)))
573 break;
574 }
575
576 table[hindex].ie_index = index;
577 }
578}
579
580/*
581 * Routine: ipc_hash_init
582 * Purpose:
583 * Initialize the reverse hash table implementation.
584 */
585
586void
587ipc_hash_init(void)
588{
589 ipc_hash_index_t i;
590
591 /* if not configured, initialize ipc_hash_global_size */
592
593 if (ipc_hash_global_size == 0) {
594 ipc_hash_global_size = ipc_tree_entry_max >> 8;
595 if (ipc_hash_global_size < 32)
596 ipc_hash_global_size = 32;
597 }
598
599 /* make sure it is a power of two */
600
601 ipc_hash_global_mask = ipc_hash_global_size - 1;
602 if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
603 natural_t bit;
604
605 /* round up to closest power of two */
606
607 for (bit = 1;; bit <<= 1) {
608 ipc_hash_global_mask |= bit;
609 ipc_hash_global_size = ipc_hash_global_mask + 1;
610
611 if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
612 break;
613 }
614 }
615
616 /* allocate ipc_hash_global_table */
617
618 ipc_hash_global_table = (ipc_hash_global_bucket_t)
619 kalloc((vm_size_t) (ipc_hash_global_size *
620 sizeof(struct ipc_hash_global_bucket)));
621 assert(ipc_hash_global_table != IHGB_NULL);
622
623 /* and initialize it */
624
625 for (i = 0; i < ipc_hash_global_size; i++) {
626 ipc_hash_global_bucket_t bucket;
627
628 bucket = &ipc_hash_global_table[i];
629 ihgb_lock_init(bucket);
630 bucket->ihgb_head = ITE_NULL;
631 }
632}
633
634#if MACH_IPC_DEBUG
635
636/*
637 * Routine: ipc_hash_info
638 * Purpose:
639 * Return information about the global reverse hash table.
640 * Fills the buffer with as much information as possible
641 * and returns the desired size of the buffer.
642 * Conditions:
643 * Nothing locked. The caller should provide
644 * possibly-pageable memory.
645 */
646
647
648ipc_hash_index_t
649ipc_hash_info(
650 hash_info_bucket_t *info,
651 mach_msg_type_number_t count)
652{
653 ipc_hash_index_t i;
654
655 if (ipc_hash_global_size < count)
656 count = ipc_hash_global_size;
657
658 for (i = 0; i < count; i++) {
659 ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
660 unsigned int bucket_count = 0;
661 ipc_tree_entry_t entry;
662
663 ihgb_lock(bucket);
664 for (entry = bucket->ihgb_head;
665 entry != ITE_NULL;
666 entry = entry->ite_next)
667 bucket_count++;
668 ihgb_unlock(bucket);
669
670 /* don't touch pageable memory while holding locks */
671 info[i].hib_count = bucket_count;
672 }
673
674 return ipc_hash_global_size;
675}
676
677#endif /* MACH_IPC_DEBUG */