]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_hash.c
825ee58385193cd4979aad96f80dbdca5937fd85
[apple/xnu.git] / osfmk / ipc / ipc_hash.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53 /*
54 * File: ipc/ipc_hash.c
55 * Author: Rich Draves
56 * Date: 1989
57 *
58 * Entry hash table operations.
59 */
60
61 #include <mach/boolean.h>
62 #include <mach/port.h>
63 #include <kern/lock.h>
64 #include <kern/kalloc.h>
65 #include <ipc/port.h>
66 #include <ipc/ipc_space.h>
67 #include <ipc/ipc_object.h>
68 #include <ipc/ipc_entry.h>
69 #include <ipc/ipc_hash.h>
70 #include <ipc/ipc_init.h>
71
72 #include <mach_ipc_debug.h>
73
74 #if MACH_IPC_DEBUG
75 #include <mach/kern_return.h>
76 #include <mach_debug/hash_info.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #endif /* MACH_IPC_DEBUG */
80
81 /*
82 * Forward declarations
83 */
84
85 /* Lookup (space, obj) in global hash table */
86 boolean_t ipc_hash_global_lookup(
87 ipc_space_t space,
88 ipc_object_t obj,
89 mach_port_name_t *namep,
90 ipc_tree_entry_t *entryp);
91
92 /* Insert an entry into the global reverse hash table */
93 void ipc_hash_global_insert(
94 ipc_space_t space,
95 ipc_object_t obj,
96 mach_port_name_t name,
97 ipc_tree_entry_t entry);
98
99 /* Delete an entry from the local reverse hash table */
100 void ipc_hash_local_delete(
101 ipc_space_t space,
102 ipc_object_t obj,
103 mach_port_index_t index,
104 ipc_entry_t entry);
105
106 /*
107 * Routine: ipc_hash_lookup
108 * Purpose:
109 * Converts (space, obj) -> (name, entry).
110 * Returns TRUE if an entry was found.
111 * Conditions:
112 * The space must be locked (read or write) throughout.
113 */
114
115 boolean_t
116 ipc_hash_lookup(
117 ipc_space_t space,
118 ipc_object_t obj,
119 mach_port_name_t *namep,
120 ipc_entry_t *entryp)
121 {
122 boolean_t rv;
123
124 rv = ipc_hash_local_lookup(space, obj, namep, entryp);
125 if (!rv) {
126 assert(!is_fast_space(space) || space->is_tree_hash == 0);
127 if (space->is_tree_hash > 0)
128 rv = ipc_hash_global_lookup(space, obj, namep,
129 (ipc_tree_entry_t *) entryp);
130 }
131 return (rv);
132 }
133
134 /*
135 * Routine: ipc_hash_insert
136 * Purpose:
137 * Inserts an entry into the appropriate reverse hash table,
138 * so that ipc_hash_lookup will find it.
139 * Conditions:
140 * The space must be write-locked.
141 */
142
143 void
144 ipc_hash_insert(
145 ipc_space_t space,
146 ipc_object_t obj,
147 mach_port_name_t name,
148 ipc_entry_t entry)
149 {
150 mach_port_index_t index;
151
152 index = MACH_PORT_INDEX(name);
153 if ((index < space->is_table_size) &&
154 (entry == &space->is_table[index]))
155 ipc_hash_local_insert(space, obj, index, entry);
156 else {
157 assert(!is_fast_space(space));
158 ipc_hash_global_insert(space, obj, name,
159 (ipc_tree_entry_t) entry);
160 }
161 }
162
163 /*
164 * Routine: ipc_hash_delete
165 * Purpose:
166 * Deletes an entry from the appropriate reverse hash table.
167 * Conditions:
168 * The space must be write-locked.
169 */
170
171 void
172 ipc_hash_delete(
173 ipc_space_t space,
174 ipc_object_t obj,
175 mach_port_name_t name,
176 ipc_entry_t entry)
177 {
178 mach_port_index_t index;
179
180 index = MACH_PORT_INDEX(name);
181 if ((index < space->is_table_size) &&
182 (entry == &space->is_table[index]))
183 ipc_hash_local_delete(space, obj, index, entry);
184 else {
185 assert(!is_fast_space(space));
186 ipc_hash_global_delete(space, obj, name,
187 (ipc_tree_entry_t) entry);
188 }
189 }
190
191 /*
192 * The global reverse hash table holds splay tree entries.
193 * It is a simple open-chaining hash table with singly-linked buckets.
194 * Each bucket is locked separately, with an exclusive lock.
195 * Within each bucket, move-to-front is used.
196 */
197
198 typedef natural_t ipc_hash_index_t;
199
200 ipc_hash_index_t ipc_hash_global_size;
201 ipc_hash_index_t ipc_hash_global_mask;
202
203 #define IH_GLOBAL_HASH(space, obj) \
204 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
205 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
206 ipc_hash_global_mask)
207
208 typedef struct ipc_hash_global_bucket {
209 decl_mutex_data(, ihgb_lock_data)
210 ipc_tree_entry_t ihgb_head;
211 } *ipc_hash_global_bucket_t;
212
213 #define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
214
215 #define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, 0)
216 #define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
217 #define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
218
219 ipc_hash_global_bucket_t ipc_hash_global_table;
220
221 /*
222 * Routine: ipc_hash_global_lookup
223 * Purpose:
224 * Converts (space, obj) -> (name, entry).
225 * Looks in the global table, for splay tree entries.
226 * Returns TRUE if an entry was found.
227 * Conditions:
228 * The space must be locked (read or write) throughout.
229 */
230
231 boolean_t
232 ipc_hash_global_lookup(
233 ipc_space_t space,
234 ipc_object_t obj,
235 mach_port_name_t *namep,
236 ipc_tree_entry_t *entryp)
237 {
238 ipc_hash_global_bucket_t bucket;
239 ipc_tree_entry_t this, *last;
240
241 assert(space != IS_NULL);
242 assert(obj != IO_NULL);
243
244 assert(!is_fast_space(space));
245 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
246 ihgb_lock(bucket);
247
248 if ((this = bucket->ihgb_head) != ITE_NULL) {
249 if ((this->ite_object == obj) &&
250 (this->ite_space == space)) {
251 /* found it at front; no need to move */
252
253 *namep = this->ite_name;
254 *entryp = this;
255 } else for (last = &this->ite_next;
256 (this = *last) != ITE_NULL;
257 last = &this->ite_next) {
258 if ((this->ite_object == obj) &&
259 (this->ite_space == space)) {
260 /* found it; move to front */
261
262 *last = this->ite_next;
263 this->ite_next = bucket->ihgb_head;
264 bucket->ihgb_head = this;
265
266 *namep = this->ite_name;
267 *entryp = this;
268 break;
269 }
270 }
271 }
272
273 ihgb_unlock(bucket);
274 return this != ITE_NULL;
275 }
276
277 /*
278 * Routine: ipc_hash_global_insert
279 * Purpose:
280 * Inserts an entry into the global reverse hash table.
281 * Conditions:
282 * The space must be write-locked.
283 */
284
285 void
286 ipc_hash_global_insert(
287 ipc_space_t space,
288 ipc_object_t obj,
289 __assert_only mach_port_name_t name,
290 ipc_tree_entry_t entry)
291 {
292 ipc_hash_global_bucket_t bucket;
293
294 assert(!is_fast_space(space));
295 assert(entry->ite_name == name);
296 assert(space != IS_NULL);
297 assert(entry->ite_space == space);
298 assert(obj != IO_NULL);
299 assert(entry->ite_object == obj);
300
301 space->is_tree_hash++;
302 assert(space->is_tree_hash <= space->is_tree_total);
303
304 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
305 ihgb_lock(bucket);
306
307 /* insert at front of bucket */
308
309 entry->ite_next = bucket->ihgb_head;
310 bucket->ihgb_head = entry;
311
312 ihgb_unlock(bucket);
313 }
314
315 /*
316 * Routine: ipc_hash_global_delete
317 * Purpose:
318 * Deletes an entry from the global reverse hash table.
319 * Conditions:
320 * The space must be write-locked.
321 */
322
323 void
324 ipc_hash_global_delete(
325 ipc_space_t space,
326 ipc_object_t obj,
327 __assert_only mach_port_name_t name,
328 ipc_tree_entry_t entry)
329 {
330 ipc_hash_global_bucket_t bucket;
331 ipc_tree_entry_t this, *last;
332
333 assert(!is_fast_space(space));
334 assert(entry->ite_name == name);
335 assert(space != IS_NULL);
336 assert(entry->ite_space == space);
337 assert(obj != IO_NULL);
338 assert(entry->ite_object == obj);
339
340 assert(space->is_tree_hash > 0);
341 space->is_tree_hash--;
342
343 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
344 ihgb_lock(bucket);
345
346 for (last = &bucket->ihgb_head;
347 (this = *last) != ITE_NULL;
348 last = &this->ite_next) {
349 if (this == entry) {
350 /* found it; remove from bucket */
351
352 *last = this->ite_next;
353 break;
354 }
355 }
356 assert(this != ITE_NULL);
357
358 ihgb_unlock(bucket);
359 }
360
361 /*
362 * Each space has a local reverse hash table, which holds
363 * entries from the space's table. In fact, the hash table
364 * just uses a field (ie_index) in the table itself.
365 *
366 * The local hash table is an open-addressing hash table,
367 * which means that when a collision occurs, instead of
368 * throwing the entry into a bucket, the entry is rehashed
369 * to another position in the table. In this case the rehash
370 * is very simple: linear probing (ie, just increment the position).
371 * This simple rehash makes deletions tractable (they're still a pain),
372 * but it means that collisions tend to build up into clumps.
373 *
374 * Because at least one entry in the table (index 0) is always unused,
375 * there will always be room in the reverse hash table. If a table
376 * with n slots gets completely full, the reverse hash table will
377 * have one giant clump of n-1 slots and one free slot somewhere.
378 * Because entries are only entered into the reverse table if they
379 * are pure send rights (not receive, send-once, port-set,
380 * or dead-name rights), and free entries of course aren't entered,
381 * I expect the reverse hash table won't get unreasonably full.
382 *
383 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
384 * pp. 135-142.) may be desirable here. They can dramatically help
385 * unsuccessful lookups. But unsuccessful lookups are almost always
386 * followed by insertions, and those slow down somewhat. They
387 * also can help deletions somewhat. Successful lookups aren't affected.
388 * So possibly a small win; probably nothing significant.
389 */
390
391 #define IH_LOCAL_HASH(obj, size) \
392 ((((mach_port_index_t) (obj)) >> 6) % (size))
393
394 /*
395 * Routine: ipc_hash_local_lookup
396 * Purpose:
397 * Converts (space, obj) -> (name, entry).
398 * Looks in the space's local table, for table entries.
399 * Returns TRUE if an entry was found.
400 * Conditions:
401 * The space must be locked (read or write) throughout.
402 */
403
404 boolean_t
405 ipc_hash_local_lookup(
406 ipc_space_t space,
407 ipc_object_t obj,
408 mach_port_name_t *namep,
409 ipc_entry_t *entryp)
410 {
411 ipc_entry_t table;
412 ipc_entry_num_t size;
413 mach_port_index_t hindex, index;
414
415 assert(space != IS_NULL);
416 assert(obj != IO_NULL);
417
418 table = space->is_table;
419 size = space->is_table_size;
420 hindex = IH_LOCAL_HASH(obj, size);
421
422 /*
423 * Ideally, table[hindex].ie_index is the name we want.
424 * However, must check ie_object to verify this,
425 * because collisions can happen. In case of a collision,
426 * search farther along in the clump.
427 */
428
429 while ((index = table[hindex].ie_index) != 0) {
430 ipc_entry_t entry = &table[index];
431
432 if (entry->ie_object == obj) {
433 *entryp = entry;
434 *namep = MACH_PORT_MAKE(index,
435 IE_BITS_GEN(entry->ie_bits));
436 return TRUE;
437 }
438
439 if (++hindex == size)
440 hindex = 0;
441 }
442
443 return FALSE;
444 }
445
446 /*
447 * Routine: ipc_hash_local_insert
448 * Purpose:
449 * Inserts an entry into the space's reverse hash table.
450 * Conditions:
451 * The space must be write-locked.
452 */
453
454 void
455 ipc_hash_local_insert(
456 ipc_space_t space,
457 ipc_object_t obj,
458 mach_port_index_t index,
459 __assert_only ipc_entry_t entry)
460 {
461 ipc_entry_t table;
462 ipc_entry_num_t size;
463 mach_port_index_t hindex;
464
465 assert(index != 0);
466 assert(space != IS_NULL);
467 assert(obj != IO_NULL);
468
469 table = space->is_table;
470 size = space->is_table_size;
471 hindex = IH_LOCAL_HASH(obj, size);
472
473 assert(entry == &table[index]);
474 assert(entry->ie_object == obj);
475
476 /*
477 * We want to insert at hindex, but there may be collisions.
478 * If a collision occurs, search for the end of the clump
479 * and insert there.
480 */
481
482 while (table[hindex].ie_index != 0) {
483 if (++hindex == size)
484 hindex = 0;
485 }
486
487 table[hindex].ie_index = index;
488 }
489
490 /*
491 * Routine: ipc_hash_local_delete
492 * Purpose:
493 * Deletes an entry from the space's reverse hash table.
494 * Conditions:
495 * The space must be write-locked.
496 */
497
498 void
499 ipc_hash_local_delete(
500 ipc_space_t space,
501 ipc_object_t obj,
502 mach_port_index_t index,
503 __assert_only ipc_entry_t entry)
504 {
505 ipc_entry_t table;
506 ipc_entry_num_t size;
507 mach_port_index_t hindex, dindex;
508
509 assert(index != MACH_PORT_NULL);
510 assert(space != IS_NULL);
511 assert(obj != IO_NULL);
512
513 table = space->is_table;
514 size = space->is_table_size;
515 hindex = IH_LOCAL_HASH(obj, size);
516
517 assert(entry == &table[index]);
518 assert(entry->ie_object == obj);
519
520 /*
521 * First check we have the right hindex for this index.
522 * In case of collision, we have to search farther
523 * along in this clump.
524 */
525
526 while (table[hindex].ie_index != index) {
527 if (++hindex == size)
528 hindex = 0;
529 }
530
531 /*
532 * Now we want to set table[hindex].ie_index = 0.
533 * But if we aren't the last index in a clump,
534 * this might cause problems for lookups of objects
535 * farther along in the clump that are displaced
536 * due to collisions. Searches for them would fail
537 * at hindex instead of succeeding.
538 *
539 * So we must check the clump after hindex for objects
540 * that are so displaced, and move one up to the new hole.
541 *
542 * hindex - index of new hole in the clump
543 * dindex - index we are checking for a displaced object
544 *
545 * When we move a displaced object up into the hole,
546 * it creates a new hole, and we have to repeat the process
547 * until we get to the end of the clump.
548 */
549
550 for (dindex = hindex; index != 0; hindex = dindex) {
551 for (;;) {
552 mach_port_index_t tindex;
553 ipc_object_t tobj;
554
555 if (++dindex == size)
556 dindex = 0;
557 assert(dindex != hindex);
558
559 /* are we at the end of the clump? */
560
561 index = table[dindex].ie_index;
562 if (index == 0)
563 break;
564
565 /* is this a displaced object? */
566
567 tobj = table[index].ie_object;
568 assert(tobj != IO_NULL);
569 tindex = IH_LOCAL_HASH(tobj, size);
570
571 if ((dindex < hindex) ?
572 ((dindex < tindex) && (tindex <= hindex)) :
573 ((dindex < tindex) || (tindex <= hindex)))
574 break;
575 }
576
577 table[hindex].ie_index = index;
578 }
579 }
580
581 /*
582 * Routine: ipc_hash_init
583 * Purpose:
584 * Initialize the reverse hash table implementation.
585 */
586
587 void
588 ipc_hash_init(void)
589 {
590 ipc_hash_index_t i;
591
592 /* if not configured, initialize ipc_hash_global_size */
593
594 if (ipc_hash_global_size == 0) {
595 ipc_hash_global_size = ipc_tree_entry_max >> 8;
596 if (ipc_hash_global_size < 32)
597 ipc_hash_global_size = 32;
598 }
599
600 /* make sure it is a power of two */
601
602 ipc_hash_global_mask = ipc_hash_global_size - 1;
603 if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
604 natural_t bit;
605
606 /* round up to closest power of two */
607
608 for (bit = 1;; bit <<= 1) {
609 ipc_hash_global_mask |= bit;
610 ipc_hash_global_size = ipc_hash_global_mask + 1;
611
612 if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
613 break;
614 }
615 }
616
617 /* allocate ipc_hash_global_table */
618
619 ipc_hash_global_table = (ipc_hash_global_bucket_t)
620 kalloc((vm_size_t) (ipc_hash_global_size *
621 sizeof(struct ipc_hash_global_bucket)));
622 assert(ipc_hash_global_table != IHGB_NULL);
623
624 /* and initialize it */
625
626 for (i = 0; i < ipc_hash_global_size; i++) {
627 ipc_hash_global_bucket_t bucket;
628
629 bucket = &ipc_hash_global_table[i];
630 ihgb_lock_init(bucket);
631 bucket->ihgb_head = ITE_NULL;
632 }
633 }
634
635 #if MACH_IPC_DEBUG
636
637 /*
638 * Routine: ipc_hash_info
639 * Purpose:
640 * Return information about the global reverse hash table.
641 * Fills the buffer with as much information as possible
642 * and returns the desired size of the buffer.
643 * Conditions:
644 * Nothing locked. The caller should provide
645 * possibly-pageable memory.
646 */
647
648
649 ipc_hash_index_t
650 ipc_hash_info(
651 hash_info_bucket_t *info,
652 mach_msg_type_number_t count)
653 {
654 ipc_hash_index_t i;
655
656 if (ipc_hash_global_size < count)
657 count = ipc_hash_global_size;
658
659 for (i = 0; i < count; i++) {
660 ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
661 unsigned int bucket_count = 0;
662 ipc_tree_entry_t entry;
663
664 ihgb_lock(bucket);
665 for (entry = bucket->ihgb_head;
666 entry != ITE_NULL;
667 entry = entry->ite_next)
668 bucket_count++;
669 ihgb_unlock(bucket);
670
671 /* don't touch pageable memory while holding locks */
672 info[i].hib_count = bucket_count;
673 }
674
675 return ipc_hash_global_size;
676 }
677
678 #endif /* MACH_IPC_DEBUG */