]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_hash.c
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_hash.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: ipc/ipc_hash.c
54 * Author: Rich Draves
55 * Date: 1989
56 *
57 * Entry hash table operations.
58 */
59
60#include <mach/boolean.h>
61#include <mach/port.h>
62#include <kern/lock.h>
63#include <kern/kalloc.h>
64#include <ipc/port.h>
65#include <ipc/ipc_space.h>
66#include <ipc/ipc_object.h>
67#include <ipc/ipc_entry.h>
68#include <ipc/ipc_hash.h>
69#include <ipc/ipc_init.h>
70
71#include <mach_ipc_debug.h>
72
73#if MACH_IPC_DEBUG
74#include <mach/kern_return.h>
75#include <mach_debug/hash_info.h>
76#include <vm/vm_map.h>
77#include <vm/vm_kern.h>
78#endif /* MACH_IPC_DEBUG */
79
80/*
81 * Forward declarations
82 */
83
84/* Lookup (space, obj) in global hash table */
85boolean_t ipc_hash_global_lookup(
86 ipc_space_t space,
87 ipc_object_t obj,
88 mach_port_name_t *namep,
89 ipc_tree_entry_t *entryp);
90
91/* Insert an entry into the global reverse hash table */
92void ipc_hash_global_insert(
93 ipc_space_t space,
94 ipc_object_t obj,
95 mach_port_name_t name,
96 ipc_tree_entry_t entry);
97
98/* Delete an entry from the local reverse hash table */
99void ipc_hash_local_delete(
100 ipc_space_t space,
101 ipc_object_t obj,
102 mach_port_index_t index,
103 ipc_entry_t entry);
104
105/*
106 * Routine: ipc_hash_lookup
107 * Purpose:
108 * Converts (space, obj) -> (name, entry).
109 * Returns TRUE if an entry was found.
110 * Conditions:
111 * The space must be locked (read or write) throughout.
112 */
113
114boolean_t
115ipc_hash_lookup(
116 ipc_space_t space,
117 ipc_object_t obj,
118 mach_port_name_t *namep,
119 ipc_entry_t *entryp)
120{
121 boolean_t rv;
122
123 rv = ipc_hash_local_lookup(space, obj, namep, entryp);
124 if (!rv) {
125 assert(!is_fast_space(space) || space->is_tree_hash == 0);
126 if (space->is_tree_hash > 0)
127 rv = ipc_hash_global_lookup(space, obj, namep,
128 (ipc_tree_entry_t *) entryp);
129 }
130 return (rv);
131}
132
133/*
134 * Routine: ipc_hash_insert
135 * Purpose:
136 * Inserts an entry into the appropriate reverse hash table,
137 * so that ipc_hash_lookup will find it.
138 * Conditions:
139 * The space must be write-locked.
140 */
141
142void
143ipc_hash_insert(
144 ipc_space_t space,
145 ipc_object_t obj,
146 mach_port_name_t name,
147 ipc_entry_t entry)
148{
149 mach_port_index_t index;
150
151 index = MACH_PORT_INDEX(name);
152 if ((index < space->is_table_size) &&
153 (entry == &space->is_table[index]))
154 ipc_hash_local_insert(space, obj, index, entry);
155 else {
156 assert(!is_fast_space(space));
157 ipc_hash_global_insert(space, obj, name,
158 (ipc_tree_entry_t) entry);
159 }
160}
161
162/*
163 * Routine: ipc_hash_delete
164 * Purpose:
165 * Deletes an entry from the appropriate reverse hash table.
166 * Conditions:
167 * The space must be write-locked.
168 */
169
170void
171ipc_hash_delete(
172 ipc_space_t space,
173 ipc_object_t obj,
174 mach_port_name_t name,
175 ipc_entry_t entry)
176{
177 mach_port_index_t index;
178
179 index = MACH_PORT_INDEX(name);
180 if ((index < space->is_table_size) &&
181 (entry == &space->is_table[index]))
182 ipc_hash_local_delete(space, obj, index, entry);
183 else {
184 assert(!is_fast_space(space));
185 ipc_hash_global_delete(space, obj, name,
186 (ipc_tree_entry_t) entry);
187 }
188}
189
190/*
191 * The global reverse hash table holds splay tree entries.
192 * It is a simple open-chaining hash table with singly-linked buckets.
193 * Each bucket is locked separately, with an exclusive lock.
194 * Within each bucket, move-to-front is used.
195 */
196
197typedef natural_t ipc_hash_index_t;
198
199ipc_hash_index_t ipc_hash_global_size;
200ipc_hash_index_t ipc_hash_global_mask;
201
202#define IH_GLOBAL_HASH(space, obj) \
203 (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
204 (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
205 ipc_hash_global_mask)
206
207typedef struct ipc_hash_global_bucket {
208 decl_mutex_data(, ihgb_lock_data)
209 ipc_tree_entry_t ihgb_head;
210} *ipc_hash_global_bucket_t;
211
212#define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
213
214#define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, \
215 ETAP_IPC_IHGB)
216#define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data)
217#define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data)
218
219ipc_hash_global_bucket_t ipc_hash_global_table;
220
221/*
222 * Routine: ipc_hash_global_lookup
223 * Purpose:
224 * Converts (space, obj) -> (name, entry).
225 * Looks in the global table, for splay tree entries.
226 * Returns TRUE if an entry was found.
227 * Conditions:
228 * The space must be locked (read or write) throughout.
229 */
230
231boolean_t
232ipc_hash_global_lookup(
233 ipc_space_t space,
234 ipc_object_t obj,
235 mach_port_name_t *namep,
236 ipc_tree_entry_t *entryp)
237{
238 ipc_hash_global_bucket_t bucket;
239 ipc_tree_entry_t this, *last;
240
241 assert(space != IS_NULL);
242 assert(obj != IO_NULL);
243
244 assert(!is_fast_space(space));
245 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
246 ihgb_lock(bucket);
247
248 if ((this = bucket->ihgb_head) != ITE_NULL) {
249 if ((this->ite_object == obj) &&
250 (this->ite_space == space)) {
251 /* found it at front; no need to move */
252
253 *namep = this->ite_name;
254 *entryp = this;
255 } else for (last = &this->ite_next;
256 (this = *last) != ITE_NULL;
257 last = &this->ite_next) {
258 if ((this->ite_object == obj) &&
259 (this->ite_space == space)) {
260 /* found it; move to front */
261
262 *last = this->ite_next;
263 this->ite_next = bucket->ihgb_head;
264 bucket->ihgb_head = this;
265
266 *namep = this->ite_name;
267 *entryp = this;
268 break;
269 }
270 }
271 }
272
273 ihgb_unlock(bucket);
274 return this != ITE_NULL;
275}
276
277/*
278 * Routine: ipc_hash_global_insert
279 * Purpose:
280 * Inserts an entry into the global reverse hash table.
281 * Conditions:
282 * The space must be write-locked.
283 */
284
285void
286ipc_hash_global_insert(
287 ipc_space_t space,
288 ipc_object_t obj,
289 mach_port_name_t name,
290 ipc_tree_entry_t entry)
291{
292 ipc_hash_global_bucket_t bucket;
293
294
295 assert(!is_fast_space(space));
296
297
298 assert(entry->ite_name == name);
299 assert(space != IS_NULL);
300 assert(entry->ite_space == space);
301 assert(obj != IO_NULL);
302 assert(entry->ite_object == obj);
303
304 space->is_tree_hash++;
305 assert(space->is_tree_hash <= space->is_tree_total);
306
307 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
308 ihgb_lock(bucket);
309
310 /* insert at front of bucket */
311
312 entry->ite_next = bucket->ihgb_head;
313 bucket->ihgb_head = entry;
314
315 ihgb_unlock(bucket);
316}
317
318/*
319 * Routine: ipc_hash_global_delete
320 * Purpose:
321 * Deletes an entry from the global reverse hash table.
322 * Conditions:
323 * The space must be write-locked.
324 */
325
326void
327ipc_hash_global_delete(
328 ipc_space_t space,
329 ipc_object_t obj,
330 mach_port_name_t name,
331 ipc_tree_entry_t entry)
332{
333 ipc_hash_global_bucket_t bucket;
334 ipc_tree_entry_t this, *last;
335
336 assert(!is_fast_space(space));
337
338 assert(entry->ite_name == name);
339 assert(space != IS_NULL);
340 assert(entry->ite_space == space);
341 assert(obj != IO_NULL);
342 assert(entry->ite_object == obj);
343
344 assert(space->is_tree_hash > 0);
345 space->is_tree_hash--;
346
347 bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
348 ihgb_lock(bucket);
349
350 for (last = &bucket->ihgb_head;
351 (this = *last) != ITE_NULL;
352 last = &this->ite_next) {
353 if (this == entry) {
354 /* found it; remove from bucket */
355
356 *last = this->ite_next;
357 break;
358 }
359 }
360 assert(this != ITE_NULL);
361
362 ihgb_unlock(bucket);
363}
364
365/*
366 * Each space has a local reverse hash table, which holds
367 * entries from the space's table. In fact, the hash table
368 * just uses a field (ie_index) in the table itself.
369 *
370 * The local hash table is an open-addressing hash table,
371 * which means that when a collision occurs, instead of
372 * throwing the entry into a bucket, the entry is rehashed
373 * to another position in the table. In this case the rehash
374 * is very simple: linear probing (ie, just increment the position).
375 * This simple rehash makes deletions tractable (they're still a pain),
376 * but it means that collisions tend to build up into clumps.
377 *
378 * Because at least one entry in the table (index 0) is always unused,
379 * there will always be room in the reverse hash table. If a table
380 * with n slots gets completely full, the reverse hash table will
381 * have one giant clump of n-1 slots and one free slot somewhere.
382 * Because entries are only entered into the reverse table if they
383 * are pure send rights (not receive, send-once, port-set,
384 * or dead-name rights), and free entries of course aren't entered,
385 * I expect the reverse hash table won't get unreasonably full.
386 *
387 * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
388 * pp. 135-142.) may be desirable here. They can dramatically help
389 * unsuccessful lookups. But unsuccessful lookups are almost always
390 * followed by insertions, and those slow down somewhat. They
391 * also can help deletions somewhat. Successful lookups aren't affected.
392 * So possibly a small win; probably nothing significant.
393 */
394
395#define IH_LOCAL_HASH(obj, size) \
396 ((((mach_port_index_t) (obj)) >> 6) % (size))
397
398/*
399 * Routine: ipc_hash_local_lookup
400 * Purpose:
401 * Converts (space, obj) -> (name, entry).
402 * Looks in the space's local table, for table entries.
403 * Returns TRUE if an entry was found.
404 * Conditions:
405 * The space must be locked (read or write) throughout.
406 */
407
408boolean_t
409ipc_hash_local_lookup(
410 ipc_space_t space,
411 ipc_object_t obj,
412 mach_port_name_t *namep,
413 ipc_entry_t *entryp)
414{
415 ipc_entry_t table;
416 ipc_entry_num_t size;
417 mach_port_index_t hindex, index;
418
419 assert(space != IS_NULL);
420 assert(obj != IO_NULL);
421
422 table = space->is_table;
423 size = space->is_table_size;
424 hindex = IH_LOCAL_HASH(obj, size);
425
426 /*
427 * Ideally, table[hindex].ie_index is the name we want.
428 * However, must check ie_object to verify this,
429 * because collisions can happen. In case of a collision,
430 * search farther along in the clump.
431 */
432
433 while ((index = table[hindex].ie_index) != 0) {
434 ipc_entry_t entry = &table[index];
435
436 if (entry->ie_object == obj) {
437 *entryp = entry;
438 *namep = MACH_PORT_MAKE(index,
439 IE_BITS_GEN(entry->ie_bits));
440 return TRUE;
441 }
442
443 if (++hindex == size)
444 hindex = 0;
445 }
446
447 return FALSE;
448}
449
450/*
451 * Routine: ipc_hash_local_insert
452 * Purpose:
453 * Inserts an entry into the space's reverse hash table.
454 * Conditions:
455 * The space must be write-locked.
456 */
457
458void
459ipc_hash_local_insert(
460 ipc_space_t space,
461 ipc_object_t obj,
462 mach_port_index_t index,
463 ipc_entry_t entry)
464{
465 ipc_entry_t table;
466 ipc_entry_num_t size;
467 mach_port_index_t hindex;
468
469 assert(index != 0);
470 assert(space != IS_NULL);
471 assert(obj != IO_NULL);
472
473 table = space->is_table;
474 size = space->is_table_size;
475 hindex = IH_LOCAL_HASH(obj, size);
476
477 assert(entry == &table[index]);
478 assert(entry->ie_object == obj);
479
480 /*
481 * We want to insert at hindex, but there may be collisions.
482 * If a collision occurs, search for the end of the clump
483 * and insert there.
484 */
485
486 while (table[hindex].ie_index != 0) {
487 if (++hindex == size)
488 hindex = 0;
489 }
490
491 table[hindex].ie_index = index;
492}
493
494/*
495 * Routine: ipc_hash_local_delete
496 * Purpose:
497 * Deletes an entry from the space's reverse hash table.
498 * Conditions:
499 * The space must be write-locked.
500 */
501
502void
503ipc_hash_local_delete(
504 ipc_space_t space,
505 ipc_object_t obj,
506 mach_port_index_t index,
507 ipc_entry_t entry)
508{
509 ipc_entry_t table;
510 ipc_entry_num_t size;
511 mach_port_index_t hindex, dindex;
512
513 assert(index != MACH_PORT_NULL);
514 assert(space != IS_NULL);
515 assert(obj != IO_NULL);
516
517 table = space->is_table;
518 size = space->is_table_size;
519 hindex = IH_LOCAL_HASH(obj, size);
520
521 assert(entry == &table[index]);
522 assert(entry->ie_object == obj);
523
524 /*
525 * First check we have the right hindex for this index.
526 * In case of collision, we have to search farther
527 * along in this clump.
528 */
529
530 while (table[hindex].ie_index != index) {
531 if (++hindex == size)
532 hindex = 0;
533 }
534
535 /*
536 * Now we want to set table[hindex].ie_index = 0.
537 * But if we aren't the last index in a clump,
538 * this might cause problems for lookups of objects
539 * farther along in the clump that are displaced
540 * due to collisions. Searches for them would fail
541 * at hindex instead of succeeding.
542 *
543 * So we must check the clump after hindex for objects
544 * that are so displaced, and move one up to the new hole.
545 *
546 * hindex - index of new hole in the clump
547 * dindex - index we are checking for a displaced object
548 *
549 * When we move a displaced object up into the hole,
550 * it creates a new hole, and we have to repeat the process
551 * until we get to the end of the clump.
552 */
553
554 for (dindex = hindex; index != 0; hindex = dindex) {
555 for (;;) {
556 mach_port_index_t tindex;
557 ipc_object_t tobj;
558
559 if (++dindex == size)
560 dindex = 0;
561 assert(dindex != hindex);
562
563 /* are we at the end of the clump? */
564
565 index = table[dindex].ie_index;
566 if (index == 0)
567 break;
568
569 /* is this a displaced object? */
570
571 tobj = table[index].ie_object;
572 assert(tobj != IO_NULL);
573 tindex = IH_LOCAL_HASH(tobj, size);
574
575 if ((dindex < hindex) ?
576 ((dindex < tindex) && (tindex <= hindex)) :
577 ((dindex < tindex) || (tindex <= hindex)))
578 break;
579 }
580
581 table[hindex].ie_index = index;
582 }
583}
584
585/*
586 * Routine: ipc_hash_init
587 * Purpose:
588 * Initialize the reverse hash table implementation.
589 */
590
591void
592ipc_hash_init(void)
593{
594 ipc_hash_index_t i;
595
596 /* if not configured, initialize ipc_hash_global_size */
597
598 if (ipc_hash_global_size == 0) {
599 ipc_hash_global_size = ipc_tree_entry_max >> 8;
600 if (ipc_hash_global_size < 32)
601 ipc_hash_global_size = 32;
602 }
603
604 /* make sure it is a power of two */
605
606 ipc_hash_global_mask = ipc_hash_global_size - 1;
607 if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
608 natural_t bit;
609
610 /* round up to closest power of two */
611
612 for (bit = 1;; bit <<= 1) {
613 ipc_hash_global_mask |= bit;
614 ipc_hash_global_size = ipc_hash_global_mask + 1;
615
616 if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
617 break;
618 }
619 }
620
621 /* allocate ipc_hash_global_table */
622
623 ipc_hash_global_table = (ipc_hash_global_bucket_t)
624 kalloc((vm_size_t) (ipc_hash_global_size *
625 sizeof(struct ipc_hash_global_bucket)));
626 assert(ipc_hash_global_table != IHGB_NULL);
627
628 /* and initialize it */
629
630 for (i = 0; i < ipc_hash_global_size; i++) {
631 ipc_hash_global_bucket_t bucket;
632
633 bucket = &ipc_hash_global_table[i];
634 ihgb_lock_init(bucket);
635 bucket->ihgb_head = ITE_NULL;
636 }
637}
638
639#if MACH_IPC_DEBUG
640
641/*
642 * Routine: ipc_hash_info
643 * Purpose:
644 * Return information about the global reverse hash table.
645 * Fills the buffer with as much information as possible
646 * and returns the desired size of the buffer.
647 * Conditions:
648 * Nothing locked. The caller should provide
649 * possibly-pageable memory.
650 */
651
652
653ipc_hash_index_t
654ipc_hash_info(
655 hash_info_bucket_t *info,
656 mach_msg_type_number_t count)
657{
658 ipc_hash_index_t i;
659
660 if (ipc_hash_global_size < count)
661 count = ipc_hash_global_size;
662
663 for (i = 0; i < count; i++) {
664 ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
665 unsigned int bucket_count = 0;
666 ipc_tree_entry_t entry;
667
668 ihgb_lock(bucket);
669 for (entry = bucket->ihgb_head;
670 entry != ITE_NULL;
671 entry = entry->ite_next)
672 bucket_count++;
673 ihgb_unlock(bucket);
674
675 /* don't touch pageable memory while holding locks */
676 info[i].hib_count = bucket_count;
677 }
678
679 return ipc_hash_global_size;
680}
681
682#endif /* MACH_IPC_DEBUG */