]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_hash.c
56971e0b9ad3fbc1542a14f5ef46c45020108bc8
[apple/xnu.git] / osfmk / ipc / ipc_hash.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: ipc/ipc_hash.c
62 * Author: Rich Draves
63 * Date: 1989
64 *
65 * Entry hash table operations.
66 */
67
68 #include <mach/boolean.h>
69 #include <mach/port.h>
70 #include <kern/lock.h>
71 #include <kern/kalloc.h>
72 #include <ipc/port.h>
73 #include <ipc/ipc_space.h>
74 #include <ipc/ipc_object.h>
75 #include <ipc/ipc_entry.h>
76 #include <ipc/ipc_hash.h>
77 #include <ipc/ipc_init.h>
78
79 #include <mach_ipc_debug.h>
80
81 #if MACH_IPC_DEBUG
82 #include <mach/kern_return.h>
83 #include <mach_debug/hash_info.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_kern.h>
86 #endif /* MACH_IPC_DEBUG */
87
88 /*
89 * Forward declarations
90 */
91
92 /* Lookup (space, obj) in global hash table */
93 boolean_t ipc_hash_global_lookup(
94 ipc_space_t space,
95 ipc_object_t obj,
96 mach_port_name_t *namep,
97 ipc_tree_entry_t *entryp);
98
99 /* Insert an entry into the global reverse hash table */
100 void ipc_hash_global_insert(
101 ipc_space_t space,
102 ipc_object_t obj,
103 mach_port_name_t name,
104 ipc_tree_entry_t entry);
105
106 /* Delete an entry from the local reverse hash table */
107 void ipc_hash_local_delete(
108 ipc_space_t space,
109 ipc_object_t obj,
110 mach_port_index_t index,
111 ipc_entry_t entry);
112
113 /*
114 * Routine: ipc_hash_lookup
115 * Purpose:
116 * Converts (space, obj) -> (name, entry).
117 * Returns TRUE if an entry was found.
118 * Conditions:
119 * The space must be locked (read or write) throughout.
120 */
121
122 boolean_t
123 ipc_hash_lookup(
124 ipc_space_t space,
125 ipc_object_t obj,
126 mach_port_name_t *namep,
127 ipc_entry_t *entryp)
128 {
129 boolean_t rv;
130
131 rv = ipc_hash_local_lookup(space, obj, namep, entryp);
132 if (!rv) {
133 assert(!is_fast_space(space) || space->is_tree_hash == 0);
134 if (space->is_tree_hash > 0)
135 rv = ipc_hash_global_lookup(space, obj, namep,
136 (ipc_tree_entry_t *) entryp);
137 }
138 return (rv);
139 }
140
141 /*
142 * Routine: ipc_hash_insert
143 * Purpose:
144 * Inserts an entry into the appropriate reverse hash table,
145 * so that ipc_hash_lookup will find it.
146 * Conditions:
147 * The space must be write-locked.
148 */
149
150 void
151 ipc_hash_insert(
152 ipc_space_t space,
153 ipc_object_t obj,
154 mach_port_name_t name,
155 ipc_entry_t entry)
156 {
157 mach_port_index_t index;
158
159 index = MACH_PORT_INDEX(name);
160 if ((index < space->is_table_size) &&
161 (entry == &space->is_table[index]))
162 ipc_hash_local_insert(space, obj, index, entry);
163 else {
164 assert(!is_fast_space(space));
165 ipc_hash_global_insert(space, obj, name,
166 (ipc_tree_entry_t) entry);
167 }
168 }
169
170 /*
171 * Routine: ipc_hash_delete
172 * Purpose:
173 * Deletes an entry from the appropriate reverse hash table.
174 * Conditions:
175 * The space must be write-locked.
176 */
177
178 void
179 ipc_hash_delete(
180 ipc_space_t space,
181 ipc_object_t obj,
182 mach_port_name_t name,
183 ipc_entry_t entry)
184 {
185 mach_port_index_t index;
186
187 index = MACH_PORT_INDEX(name);
188 if ((index < space->is_table_size) &&
189 (entry == &space->is_table[index]))
190 ipc_hash_local_delete(space, obj, index, entry);
191 else {
192 assert(!is_fast_space(space));
193 ipc_hash_global_delete(space, obj, name,
194 (ipc_tree_entry_t) entry);
195 }
196 }
197
198 /*
199 * The global reverse hash table holds splay tree entries.
200 * It is a simple open-chaining hash table with singly-linked buckets.
201 * Each bucket is locked separately, with an exclusive lock.
202 * Within each bucket, move-to-front is used.
203 */
204
205 typedef natural_t ipc_hash_index_t;
206
207 ipc_hash_index_t ipc_hash_global_size;
208 ipc_hash_index_t ipc_hash_global_mask;
209
210 #define IH_GLOBAL_HASH(space, obj) \
211 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
212 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
213 ipc_hash_global_mask)
214
215 typedef struct ipc_hash_global_bucket {
216 decl_mutex_data(, ihgb_lock_data)
217 ipc_tree_entry_t ihgb_head;
218 } *ipc_hash_global_bucket_t;
219
220 #define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
221
222 #define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, 0)
223 #define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
224 #define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
225
226 ipc_hash_global_bucket_t ipc_hash_global_table;
227
228 /*
229 * Routine: ipc_hash_global_lookup
230 * Purpose:
231 * Converts (space, obj) -> (name, entry).
232 * Looks in the global table, for splay tree entries.
233 * Returns TRUE if an entry was found.
234 * Conditions:
235 * The space must be locked (read or write) throughout.
236 */
237
238 boolean_t
239 ipc_hash_global_lookup(
240 ipc_space_t space,
241 ipc_object_t obj,
242 mach_port_name_t *namep,
243 ipc_tree_entry_t *entryp)
244 {
245 ipc_hash_global_bucket_t bucket;
246 ipc_tree_entry_t this, *last;
247
248 assert(space != IS_NULL);
249 assert(obj != IO_NULL);
250
251 assert(!is_fast_space(space));
252 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
253 ihgb_lock(bucket);
254
255 if ((this = bucket->ihgb_head) != ITE_NULL) {
256 if ((this->ite_object == obj) &&
257 (this->ite_space == space)) {
258 /* found it at front; no need to move */
259
260 *namep = this->ite_name;
261 *entryp = this;
262 } else for (last = &this->ite_next;
263 (this = *last) != ITE_NULL;
264 last = &this->ite_next) {
265 if ((this->ite_object == obj) &&
266 (this->ite_space == space)) {
267 /* found it; move to front */
268
269 *last = this->ite_next;
270 this->ite_next = bucket->ihgb_head;
271 bucket->ihgb_head = this;
272
273 *namep = this->ite_name;
274 *entryp = this;
275 break;
276 }
277 }
278 }
279
280 ihgb_unlock(bucket);
281 return this != ITE_NULL;
282 }
283
284 /*
285 * Routine: ipc_hash_global_insert
286 * Purpose:
287 * Inserts an entry into the global reverse hash table.
288 * Conditions:
289 * The space must be write-locked.
290 */
291
292 void
293 ipc_hash_global_insert(
294 ipc_space_t space,
295 ipc_object_t obj,
296 __assert_only mach_port_name_t name,
297 ipc_tree_entry_t entry)
298 {
299 ipc_hash_global_bucket_t bucket;
300
301 assert(!is_fast_space(space));
302 assert(entry->ite_name == name);
303 assert(space != IS_NULL);
304 assert(entry->ite_space == space);
305 assert(obj != IO_NULL);
306 assert(entry->ite_object == obj);
307
308 space->is_tree_hash++;
309 assert(space->is_tree_hash <= space->is_tree_total);
310
311 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
312 ihgb_lock(bucket);
313
314 /* insert at front of bucket */
315
316 entry->ite_next = bucket->ihgb_head;
317 bucket->ihgb_head = entry;
318
319 ihgb_unlock(bucket);
320 }
321
322 /*
323 * Routine: ipc_hash_global_delete
324 * Purpose:
325 * Deletes an entry from the global reverse hash table.
326 * Conditions:
327 * The space must be write-locked.
328 */
329
330 void
331 ipc_hash_global_delete(
332 ipc_space_t space,
333 ipc_object_t obj,
334 __assert_only mach_port_name_t name,
335 ipc_tree_entry_t entry)
336 {
337 ipc_hash_global_bucket_t bucket;
338 ipc_tree_entry_t this, *last;
339
340 assert(!is_fast_space(space));
341 assert(entry->ite_name == name);
342 assert(space != IS_NULL);
343 assert(entry->ite_space == space);
344 assert(obj != IO_NULL);
345 assert(entry->ite_object == obj);
346
347 assert(space->is_tree_hash > 0);
348 space->is_tree_hash--;
349
350 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
351 ihgb_lock(bucket);
352
353 for (last = &bucket->ihgb_head;
354 (this = *last) != ITE_NULL;
355 last = &this->ite_next) {
356 if (this == entry) {
357 /* found it; remove from bucket */
358
359 *last = this->ite_next;
360 break;
361 }
362 }
363 assert(this != ITE_NULL);
364
365 ihgb_unlock(bucket);
366 }
367
368 /*
369 * Each space has a local reverse hash table, which holds
370 * entries from the space's table. In fact, the hash table
371 * just uses a field (ie_index) in the table itself.
372 *
373 * The local hash table is an open-addressing hash table,
374 * which means that when a collision occurs, instead of
375 * throwing the entry into a bucket, the entry is rehashed
376 * to another position in the table. In this case the rehash
377 * is very simple: linear probing (ie, just increment the position).
378 * This simple rehash makes deletions tractable (they're still a pain),
379 * but it means that collisions tend to build up into clumps.
380 *
381 * Because at least one entry in the table (index 0) is always unused,
382 * there will always be room in the reverse hash table. If a table
383 * with n slots gets completely full, the reverse hash table will
384 * have one giant clump of n-1 slots and one free slot somewhere.
385 * Because entries are only entered into the reverse table if they
386 * are pure send rights (not receive, send-once, port-set,
387 * or dead-name rights), and free entries of course aren't entered,
388 * I expect the reverse hash table won't get unreasonably full.
389 *
390 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
391 * pp. 135-142.) may be desirable here. They can dramatically help
392 * unsuccessful lookups. But unsuccessful lookups are almost always
393 * followed by insertions, and those slow down somewhat. They
394 * also can help deletions somewhat. Successful lookups aren't affected.
395 * So possibly a small win; probably nothing significant.
396 */
397
398 #define IH_LOCAL_HASH(obj, size) \
399 ((((mach_port_index_t) (obj)) >> 6) % (size))
400
401 /*
402 * Routine: ipc_hash_local_lookup
403 * Purpose:
404 * Converts (space, obj) -> (name, entry).
405 * Looks in the space's local table, for table entries.
406 * Returns TRUE if an entry was found.
407 * Conditions:
408 * The space must be locked (read or write) throughout.
409 */
410
411 boolean_t
412 ipc_hash_local_lookup(
413 ipc_space_t space,
414 ipc_object_t obj,
415 mach_port_name_t *namep,
416 ipc_entry_t *entryp)
417 {
418 ipc_entry_t table;
419 ipc_entry_num_t size;
420 mach_port_index_t hindex, index;
421
422 assert(space != IS_NULL);
423 assert(obj != IO_NULL);
424
425 table = space->is_table;
426 size = space->is_table_size;
427 hindex = IH_LOCAL_HASH(obj, size);
428
429 /*
430 * Ideally, table[hindex].ie_index is the name we want.
431 * However, must check ie_object to verify this,
432 * because collisions can happen. In case of a collision,
433 * search farther along in the clump.
434 */
435
436 while ((index = table[hindex].ie_index) != 0) {
437 ipc_entry_t entry = &table[index];
438
439 if (entry->ie_object == obj) {
440 *entryp = entry;
441 *namep = MACH_PORT_MAKE(index,
442 IE_BITS_GEN(entry->ie_bits));
443 return TRUE;
444 }
445
446 if (++hindex == size)
447 hindex = 0;
448 }
449
450 return FALSE;
451 }
452
453 /*
454 * Routine: ipc_hash_local_insert
455 * Purpose:
456 * Inserts an entry into the space's reverse hash table.
457 * Conditions:
458 * The space must be write-locked.
459 */
460
461 void
462 ipc_hash_local_insert(
463 ipc_space_t space,
464 ipc_object_t obj,
465 mach_port_index_t index,
466 __assert_only ipc_entry_t entry)
467 {
468 ipc_entry_t table;
469 ipc_entry_num_t size;
470 mach_port_index_t hindex;
471
472 assert(index != 0);
473 assert(space != IS_NULL);
474 assert(obj != IO_NULL);
475
476 table = space->is_table;
477 size = space->is_table_size;
478 hindex = IH_LOCAL_HASH(obj, size);
479
480 assert(entry == &table[index]);
481 assert(entry->ie_object == obj);
482
483 /*
484 * We want to insert at hindex, but there may be collisions.
485 * If a collision occurs, search for the end of the clump
486 * and insert there.
487 */
488
489 while (table[hindex].ie_index != 0) {
490 if (++hindex == size)
491 hindex = 0;
492 }
493
494 table[hindex].ie_index = index;
495 }
496
497 /*
498 * Routine: ipc_hash_local_delete
499 * Purpose:
500 * Deletes an entry from the space's reverse hash table.
501 * Conditions:
502 * The space must be write-locked.
503 */
504
505 void
506 ipc_hash_local_delete(
507 ipc_space_t space,
508 ipc_object_t obj,
509 mach_port_index_t index,
510 __assert_only ipc_entry_t entry)
511 {
512 ipc_entry_t table;
513 ipc_entry_num_t size;
514 mach_port_index_t hindex, dindex;
515
516 assert(index != MACH_PORT_NULL);
517 assert(space != IS_NULL);
518 assert(obj != IO_NULL);
519
520 table = space->is_table;
521 size = space->is_table_size;
522 hindex = IH_LOCAL_HASH(obj, size);
523
524 assert(entry == &table[index]);
525 assert(entry->ie_object == obj);
526
527 /*
528 * First check we have the right hindex for this index.
529 * In case of collision, we have to search farther
530 * along in this clump.
531 */
532
533 while (table[hindex].ie_index != index) {
534 if (++hindex == size)
535 hindex = 0;
536 }
537
538 /*
539 * Now we want to set table[hindex].ie_index = 0.
540 * But if we aren't the last index in a clump,
541 * this might cause problems for lookups of objects
542 * farther along in the clump that are displaced
543 * due to collisions. Searches for them would fail
544 * at hindex instead of succeeding.
545 *
546 * So we must check the clump after hindex for objects
547 * that are so displaced, and move one up to the new hole.
548 *
549 * hindex - index of new hole in the clump
550 * dindex - index we are checking for a displaced object
551 *
552 * When we move a displaced object up into the hole,
553 * it creates a new hole, and we have to repeat the process
554 * until we get to the end of the clump.
555 */
556
557 for (dindex = hindex; index != 0; hindex = dindex) {
558 for (;;) {
559 mach_port_index_t tindex;
560 ipc_object_t tobj;
561
562 if (++dindex == size)
563 dindex = 0;
564 assert(dindex != hindex);
565
566 /* are we at the end of the clump? */
567
568 index = table[dindex].ie_index;
569 if (index == 0)
570 break;
571
572 /* is this a displaced object? */
573
574 tobj = table[index].ie_object;
575 assert(tobj != IO_NULL);
576 tindex = IH_LOCAL_HASH(tobj, size);
577
578 if ((dindex < hindex) ?
579 ((dindex < tindex) && (tindex <= hindex)) :
580 ((dindex < tindex) || (tindex <= hindex)))
581 break;
582 }
583
584 table[hindex].ie_index = index;
585 }
586 }
587
588 /*
589 * Routine: ipc_hash_init
590 * Purpose:
591 * Initialize the reverse hash table implementation.
592 */
593
594 void
595 ipc_hash_init(void)
596 {
597 ipc_hash_index_t i;
598
599 /* if not configured, initialize ipc_hash_global_size */
600
601 if (ipc_hash_global_size == 0) {
602 ipc_hash_global_size = ipc_tree_entry_max >> 8;
603 if (ipc_hash_global_size < 32)
604 ipc_hash_global_size = 32;
605 }
606
607 /* make sure it is a power of two */
608
609 ipc_hash_global_mask = ipc_hash_global_size - 1;
610 if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
611 natural_t bit;
612
613 /* round up to closest power of two */
614
615 for (bit = 1;; bit <<= 1) {
616 ipc_hash_global_mask |= bit;
617 ipc_hash_global_size = ipc_hash_global_mask + 1;
618
619 if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
620 break;
621 }
622 }
623
624 /* allocate ipc_hash_global_table */
625
626 ipc_hash_global_table = (ipc_hash_global_bucket_t)
627 kalloc((vm_size_t) (ipc_hash_global_size *
628 sizeof(struct ipc_hash_global_bucket)));
629 assert(ipc_hash_global_table != IHGB_NULL);
630
631 /* and initialize it */
632
633 for (i = 0; i < ipc_hash_global_size; i++) {
634 ipc_hash_global_bucket_t bucket;
635
636 bucket = &ipc_hash_global_table[i];
637 ihgb_lock_init(bucket);
638 bucket->ihgb_head = ITE_NULL;
639 }
640 }
641
642 #if MACH_IPC_DEBUG
643
644 /*
645 * Routine: ipc_hash_info
646 * Purpose:
647 * Return information about the global reverse hash table.
648 * Fills the buffer with as much information as possible
649 * and returns the desired size of the buffer.
650 * Conditions:
651 * Nothing locked. The caller should provide
652 * possibly-pageable memory.
653 */
654
655
656 ipc_hash_index_t
657 ipc_hash_info(
658 hash_info_bucket_t *info,
659 mach_msg_type_number_t count)
660 {
661 ipc_hash_index_t i;
662
663 if (ipc_hash_global_size < count)
664 count = ipc_hash_global_size;
665
666 for (i = 0; i < count; i++) {
667 ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
668 unsigned int bucket_count = 0;
669 ipc_tree_entry_t entry;
670
671 ihgb_lock(bucket);
672 for (entry = bucket->ihgb_head;
673 entry != ITE_NULL;
674 entry = entry->ite_next)
675 bucket_count++;
676 ihgb_unlock(bucket);
677
678 /* don't touch pageable memory while holding locks */
679 info[i].hib_count = bucket_count;
680 }
681
682 return ipc_hash_global_size;
683 }
684
685 #endif /* MACH_IPC_DEBUG */