]> git.saurik.com Git - apple/dyld.git/blob - include/objc-shared-cache.h
dyld-832.7.1.tar.gz
[apple/dyld.git] / include / objc-shared-cache.h
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /*
25 Portions derived from:
26
27 --------------------------------------------------------------------
28 lookup8.c, by Bob Jenkins, January 4 1997, Public Domain.
29 hash(), hash2(), hash3, and mix() are externally useful functions.
30 Routines to test the hash are included if SELF_TEST is defined.
31 You can use this free for any purpose. It has no warranty.
32 --------------------------------------------------------------------
33
34 ------------------------------------------------------------------------------
35 perfect.c: code to generate code for a hash for perfect hashing.
36 (c) Bob Jenkins, September 1996, December 1999
37 You may use this code in any way you wish, and it is free. No warranty.
38 I hereby place this in the public domain.
39 Source is http://burtleburtle.net/bob/c/perfect.c
40 ------------------------------------------------------------------------------
41 */
42
43 /*
44 * objc-selopt.h
45 * Interface between libobjc and dyld
46 * for selector uniquing in the dyld shared cache.
47 *
48 * When building the shared cache, dyld locates all selectors and selector
49 * references in the cached images. It builds a perfect hash table out of
50 * them and writes the table into the shared cache copy of libobjc.
51 * libobjc then uses that table as the builtin selector list.
52 *
53 * Versioning
54 * The table has a version number. dyld and objc can both ignore the table
55 * if the other used the wrong version number.
56 *
57 * Completeness
58 * Not all libraries are in the shared cache. Libraries that are in the
59 * shared cache and were optimized are specially marked. Libraries on
60 * disk never include those marks.
61 *
62 * Coherency
63 * Libraries optimized in the shared cache can be replaced by unoptimized
64 * copies from disk when loaded. The copy from disk is not marked and will
65 * be fixed up by libobjc. The shared cache copy is still mapped into the
66 * process, so the table can point to cstring data in that library's part
67 * of the shared cache without trouble.
68 *
69 * Atomicity
70 * dyld writes the table itself last. If dyld marks some metadata as
71 * updated but then fails to write a table for some reason, libobjc
72 * fixes up all metadata as if it were not marked.
73 */
74
75 #ifndef _OBJC_SELOPT_H
76 #define _OBJC_SELOPT_H
77
78 /*
79 DO NOT INCLUDE ANY objc HEADERS HERE
80 dyld USES THIS FILE AND CANNOT SEE THEM
81 */
82 #include <stdint.h>
83 #include <stdlib.h>
84 #ifdef CLOSURE_SELOPT_WRITE
85 #include "Array.h"
86 #include "Map.h"
87 #endif
88 #ifdef SELOPT_WRITE
89 #include <unordered_map>
90 #endif
91 /*
92 DO NOT INCLUDE ANY objc HEADERS HERE
93 dyld USES THIS FILE AND CANNOT SEE THEM
94 */
95
96 #ifndef STATIC_ASSERT
97 # define STATIC_ASSERT(x) _STATIC_ASSERT2(x, __LINE__)
98 # define _STATIC_ASSERT2(x, line) _STATIC_ASSERT3(x, line)
99 # define _STATIC_ASSERT3(x, line) \
100 typedef struct { \
101 int _static_assert[(x) ? 0 : -1]; \
102 } _static_assert_ ## line __attribute__((unavailable))
103 #endif
104
105 #define SELOPT_DEBUG 0
106
107 #define S32(x) x = little_endian ? OSSwapHostToLittleInt32(x) : OSSwapHostToBigInt32(x)
108 #define S64(x) x = little_endian ? OSSwapHostToLittleInt64(x) : OSSwapHostToBigInt64(x)
109
110 namespace objc_opt {
111
112 typedef int32_t objc_stringhash_offset_t;
113 typedef uint8_t objc_stringhash_check_t;
114
115 static uint64_t lookup8( uint8_t *k, size_t length, uint64_t level);
116
117 #if defined(SELOPT_WRITE) || defined(CLOSURE_SELOPT_WRITE)
118
119 // Perfect hash code is at the end of this file.
120
121 struct __attribute__((packed)) perfect_hash {
122 uint32_t capacity;
123 uint32_t occupied;
124 uint32_t shift;
125 uint32_t mask;
126 uint64_t salt;
127
128 uint32_t scramble[256];
129 dyld3::OverflowSafeArray<uint8_t> tab; // count == mask+1
130
131 perfect_hash() { }
132
133 ~perfect_hash() { }
134 };
135
136 struct eqstr {
137 bool operator()(const char* s1, const char* s2) const {
138 return strcmp(s1, s2) == 0;
139 }
140 };
141
142 struct hashstr {
143 size_t operator()(const char *s) const {
144 return (size_t)lookup8((uint8_t *)s, strlen(s), 0);
145 }
146 };
147
148 #endif // defined(SELOPT_WRITE) || defined(CLOSURE_SELOPT_WRITE)
149
150 #ifdef SELOPT_WRITE
151
152 // cstring => cstring's vmaddress
153 // (used for selector names and class names)
154 typedef std::unordered_map<const char *, uint64_t, hashstr, eqstr> string_map;
155
156 // protocol name => protocol vmaddress
157 typedef std::unordered_map<const char *, uint64_t, hashstr, eqstr> legacy_protocol_map;
158
159 // protocol name => (protocol vmaddress, header_info vmaddress)
160 typedef std::unordered_multimap<const char *, std::pair<uint64_t, uint64_t>, hashstr, eqstr> protocol_map;
161
162 // class name => (class vmaddress, header_info vmaddress)
163 typedef std::unordered_multimap<const char *, std::pair<uint64_t, uint64_t>, hashstr, eqstr> class_map;
164
165 static void make_perfect(const string_map& strings, perfect_hash& phash);
166
167 #endif // defined(SELOPT_WRITE)
168
169
170 // Precomputed perfect hash table of strings.
171 // Base class for precomputed selector table and class table.
172 // Edit objc-sel-table.s if you change this structure.
173 struct __attribute__((packed)) objc_stringhash_t {
174 uint32_t capacity;
175 uint32_t occupied;
176 uint32_t shift;
177 uint32_t mask;
178 uint32_t unused1; // was zero
179 uint32_t unused2; // alignment pad
180 uint64_t salt;
181
182 uint32_t scramble[256];
183 uint8_t tab[0]; /* tab[mask+1] (always power-of-2) */
184 // uint8_t checkbytes[capacity]; /* check byte for each string */
185 // int32_t offsets[capacity]; /* offsets from &capacity to cstrings */
186
187 objc_stringhash_check_t *checkbytes() { return (objc_stringhash_check_t *)&tab[mask+1]; }
188 const objc_stringhash_check_t *checkbytes() const { return (const objc_stringhash_check_t *)&tab[mask+1]; }
189
190 objc_stringhash_offset_t *offsets() { return (objc_stringhash_offset_t *)&checkbytes()[capacity]; }
191 const objc_stringhash_offset_t *offsets() const { return (const objc_stringhash_offset_t *)&checkbytes()[capacity]; }
192
193 uint32_t hash(const char *key, size_t keylen) const
194 {
195 uint64_t val = lookup8((uint8_t*)key, keylen, salt);
196 uint32_t index = (uint32_t)(val>>shift) ^ scramble[tab[val&mask]];
197 return index;
198 }
199
200 uint32_t hash(const char *key) const
201 {
202 return hash(key, strlen(key));
203 }
204
205 // The check bytes areused to reject strings that aren't in the table
206 // without paging in the table's cstring data. This checkbyte calculation
207 // catches 4785/4815 rejects when launching Safari; a perfect checkbyte
208 // would catch 4796/4815.
209 objc_stringhash_check_t checkbyte(const char *key, size_t keylen) const
210 {
211 return
212 ((key[0] & 0x7) << 5)
213 |
214 ((uint8_t)keylen & 0x1f);
215 }
216
217 objc_stringhash_check_t checkbyte(const char *key) const
218 {
219 return checkbyte(key, strlen(key));
220 }
221
222
223 #define INDEX_NOT_FOUND (~(uint32_t)0)
224
225 uint32_t getIndex(const char *key) const
226 {
227 size_t keylen = strlen(key);
228 uint32_t h = hash(key, keylen);
229
230 // Use check byte to reject without paging in the table's cstrings
231 objc_stringhash_check_t h_check = checkbytes()[h];
232 objc_stringhash_check_t key_check = checkbyte(key, keylen);
233 bool check_fail = (h_check != key_check);
234 #if ! SELOPT_DEBUG
235 if (check_fail) return INDEX_NOT_FOUND;
236 #endif
237
238 objc_stringhash_offset_t offset = offsets()[h];
239 if (offset == 0) return INDEX_NOT_FOUND;
240 const char *result = (const char *)this + offset;
241 if (0 != strcmp(key, result)) return INDEX_NOT_FOUND;
242
243 #if SELOPT_DEBUG
244 if (check_fail) abort();
245 #endif
246
247 return h;
248 }
249
250 #ifdef SELOPT_WRITE
251
252 size_t size()
253 {
254 return sizeof(objc_stringhash_t)
255 + mask+1
256 + capacity * sizeof(objc_stringhash_check_t)
257 + capacity * sizeof(objc_stringhash_offset_t);
258 }
259
260 void byteswap(bool little_endian)
261 {
262 // tab and checkbytes are arrays of bytes, no swap needed
263 for (uint32_t i = 0; i < 256; i++) {
264 S32(scramble[i]);
265 }
266 objc_stringhash_offset_t *o = offsets();
267 for (uint32_t i = 0; i < capacity; i++) {
268 S32(o[i]);
269 }
270
271 S32(capacity);
272 S32(occupied);
273 S32(shift);
274 S32(mask);
275 S64(salt);
276 }
277
278 const char *write(uint64_t base, size_t remaining, string_map& strings)
279 {
280 if (sizeof(objc_stringhash_t) > remaining) {
281 return "selector section too small (metadata not optimized)";
282 }
283
284 if (strings.size() == 0) {
285 bzero(this, sizeof(objc_stringhash_t));
286 return NULL;
287 }
288
289 perfect_hash phash;
290 make_perfect(strings, phash);
291 if (phash.capacity == 0) {
292 return "perfect hash failed (metadata not optimized)";
293 }
294
295 // Set header
296 capacity = phash.capacity;
297 occupied = phash.occupied;
298 shift = phash.shift;
299 mask = phash.mask;
300 unused1 = 0;
301 unused2 = 0;
302 salt = phash.salt;
303
304 if (size() > remaining) {
305 return "selector section too small (metadata not optimized)";
306 }
307
308 // Set hash data
309 for (uint32_t i = 0; i < 256; i++) {
310 scramble[i] = phash.scramble[i];
311 }
312 for (uint32_t i = 0; i < phash.mask+1; i++) {
313 tab[i] = phash.tab[i];
314 }
315
316 // Set offsets to 0
317 for (uint32_t i = 0; i < phash.capacity; i++) {
318 offsets()[i] = 0;
319 }
320 // Set checkbytes to 0
321 for (uint32_t i = 0; i < phash.capacity; i++) {
322 checkbytes()[i] = 0;
323 }
324
325 // Set real string offsets and checkbytes
326 # define SHIFT (64 - 8*sizeof(objc_stringhash_offset_t))
327 string_map::const_iterator s;
328 for (s = strings.begin(); s != strings.end(); ++s) {
329 int64_t offset = s->second - base;
330 if ((offset<<SHIFT)>>SHIFT != offset) {
331 return "selector offset too big (metadata not optimized)";
332 }
333
334 uint32_t h = hash(s->first);
335 offsets()[h] = (objc_stringhash_offset_t)offset;
336 checkbytes()[h] = checkbyte(s->first);
337 }
338 # undef SHIFT
339
340 return NULL;
341 }
342
343 // SELOPT_WRITE
344 #endif
345 };
346
347 // Precomputed selector table.
348 // Edit objc-sel-table.s if you change this structure.
349 struct objc_selopt_t : objc_stringhash_t {
350
351 const char* getEntryForIndex(uint32_t index) const {
352 return (const char *)this + offsets()[index];
353 }
354
355 uint32_t getIndexForKey(const char *key) const {
356 return getIndex(key);
357 }
358
359 uint32_t getSentinelIndex() const {
360 return INDEX_NOT_FOUND;
361 }
362
363 const char* get(const char *key) const
364 {
365 uint32_t h = getIndex(key);
366 if (h == INDEX_NOT_FOUND) return NULL;
367 return getEntryForIndex(h);
368 }
369
370 size_t usedCount() const {
371 return capacity;
372 }
373 };
374
375 // Precomputed class list.
376 // Edit objc-sel-table.s if you change these structures.
377
378 struct objc_classheader_t {
379 objc_stringhash_offset_t clsOffset;
380 objc_stringhash_offset_t hiOffset;
381
382 // For duplicate class names:
383 // clsOffset = count<<1 | 1
384 // duplicated classes are duplicateOffsets[hiOffset..hiOffset+count-1]
385 bool isDuplicate() const { return clsOffset & 1; }
386 uint32_t duplicateCount() const { return clsOffset >> 1; }
387 uint32_t duplicateIndex() const { return hiOffset; }
388 };
389
390
391 struct objc_clsopt_t : objc_stringhash_t {
392 // ...objc_stringhash_t fields...
393 // objc_classheader_t classOffsets[capacity]; /* offsets from &capacity to class_t and header_info */
394 // uint32_t duplicateCount;
395 // objc_classheader_t duplicateOffsets[duplicatedClasses];
396
397 objc_classheader_t *classOffsets() { return (objc_classheader_t *)&offsets()[capacity]; }
398 const objc_classheader_t *classOffsets() const { return (const objc_classheader_t *)&offsets()[capacity]; }
399
400 uint32_t& duplicateCount() { return *(uint32_t *)&classOffsets()[capacity]; }
401 const uint32_t& duplicateCount() const { return *(const uint32_t *)&classOffsets()[capacity]; }
402
403 objc_classheader_t *duplicateOffsets() { return (objc_classheader_t *)(&duplicateCount()+1); }
404 const objc_classheader_t *duplicateOffsets() const { return (const objc_classheader_t *)(&duplicateCount()+1); }
405
406 const char* getClassNameForIndex(uint32_t index) const {
407 return (const char *)this + offsets()[index];
408 }
409
410 void* getClassForIndex(uint32_t index, uint32_t duplicateIndex) const {
411 const objc_classheader_t& clshi = classOffsets()[index];
412 if (! clshi.isDuplicate()) {
413 // class appears in exactly one header
414 return (void *)((const char *)this + clshi.clsOffset);
415 }
416 else {
417 // class appears in more than one header - use getClassesAndHeaders
418 const objc_classheader_t *list = &duplicateOffsets()[clshi.duplicateIndex()];
419 return (void *)((const char *)this + list[duplicateIndex].clsOffset);
420 }
421 }
422
423 // 0/NULL/NULL: not found
424 // 1/ptr/ptr: found exactly one
425 // n/NULL/NULL: found N - use getClassesAndHeaders() instead
426 uint32_t getClassHeaderAndIndex(const char *key, void*& cls, void*& hi, uint32_t& index) const
427 {
428 uint32_t h = getIndex(key);
429 if (h == INDEX_NOT_FOUND) {
430 cls = NULL;
431 hi = NULL;
432 index = 0;
433 return 0;
434 }
435
436 index = h;
437
438 const objc_classheader_t& clshi = classOffsets()[h];
439 if (! clshi.isDuplicate()) {
440 // class appears in exactly one header
441 cls = (void *)((const char *)this + clshi.clsOffset);
442 hi = (void *)((const char *)this + clshi.hiOffset);
443 return 1;
444 }
445 else {
446 // class appears in more than one header - use getClassesAndHeaders
447 cls = NULL;
448 hi = NULL;
449 return clshi.duplicateCount();
450 }
451 }
452
453 void getClassesAndHeaders(const char *key, void **cls, void **hi) const
454 {
455 uint32_t h = getIndex(key);
456 if (h == INDEX_NOT_FOUND) return;
457
458 const objc_classheader_t& clshi = classOffsets()[h];
459 if (! clshi.isDuplicate()) {
460 // class appears in exactly one header
461 cls[0] = (void *)((const char *)this + clshi.clsOffset);
462 hi[0] = (void *)((const char *)this + clshi.hiOffset);
463 }
464 else {
465 // class appears in more than one header
466 uint32_t count = clshi.duplicateCount();
467 const objc_classheader_t *list =
468 &duplicateOffsets()[clshi.duplicateIndex()];
469 for (uint32_t i = 0; i < count; i++) {
470 cls[i] = (void *)((const char *)this + list[i].clsOffset);
471 hi[i] = (void *)((const char *)this + list[i].hiOffset);
472 }
473 }
474 }
475
476 // 0/NULL/NULL: not found
477 // 1/ptr/ptr: found exactly one
478 // n/NULL/NULL: found N - use getClassesAndHeaders() instead
479 uint32_t getClassAndHeader(const char *key, void*& cls, void*& hi) const
480 {
481 uint32_t unusedIndex = 0;
482 return getClassHeaderAndIndex(key, cls, hi, unusedIndex);
483 }
484
485 #ifdef SELOPT_WRITE
486
487 size_t size()
488 {
489 return
490 objc_stringhash_t::size()
491 + capacity * sizeof(objc_classheader_t)
492 + sizeof(duplicateCount())
493 + duplicateCount() * sizeof(objc_classheader_t);
494 }
495
496 size_t sizeWithoutDups()
497 {
498 return
499 objc_stringhash_t::size()
500 + capacity * sizeof(objc_classheader_t);
501 }
502
503 void byteswap(bool little_endian)
504 {
505 objc_classheader_t *o;
506
507 o = classOffsets();
508 for (uint32_t i = 0; i < capacity; i++) {
509 S32(o[i].clsOffset);
510 S32(o[i].hiOffset);
511 }
512
513 o = duplicateOffsets();
514 for (uint32_t i = 0; i < duplicateCount(); i++) {
515 S32(o[i].clsOffset);
516 S32(o[i].hiOffset);
517 }
518
519 S32(duplicateCount());
520
521 objc_stringhash_t::byteswap(little_endian);
522 }
523
524 const char *write(uint64_t base, size_t remaining,
525 string_map& strings, class_map& classes, bool verbose)
526 {
527 const char *err;
528 err = objc_stringhash_t::write(base, remaining, strings);
529 if (err) return err;
530
531 if (sizeWithoutDups() > remaining) {
532 return "selector section too small (metadata not optimized)";
533 }
534 if (size() > remaining) {
535 return "selector section too small (metadata not optimized)";
536 }
537
538 // Set class offsets to 0
539 for (uint32_t i = 0; i < capacity; i++) {
540 classOffsets()[i].clsOffset = 0;
541 classOffsets()[i].hiOffset = 0;
542 }
543
544 // Set real class offsets
545 # define SHIFT (64 - 8*sizeof(objc_stringhash_offset_t))
546 class_map::const_iterator c;
547 for (c = classes.begin(); c != classes.end(); ++c) {
548 uint32_t h = getIndex(c->first);
549 if (h == INDEX_NOT_FOUND) {
550 return "class list busted (metadata not optimized)";
551 }
552
553 if (classOffsets()[h].clsOffset != 0) {
554 // already did this class
555 continue;
556 }
557
558 uint32_t count = (uint32_t)classes.count(c->first);
559 if (count == 1) {
560 // only one class with this name
561
562 int64_t coff = c->second.first - base;
563 int64_t hoff = c->second.second - base;
564 if ((coff<<SHIFT)>>SHIFT != coff) {
565 return "class offset too big (metadata not optimized)";
566 }
567 if ((hoff<<SHIFT)>>SHIFT != hoff) {
568 return "header offset too big (metadata not optimized)";
569 }
570
571 classOffsets()[h].clsOffset = (objc_stringhash_offset_t)coff;
572 classOffsets()[h].hiOffset = (objc_stringhash_offset_t)hoff;
573 }
574 else {
575 // class name has duplicates - write them all now
576 if (verbose) {
577 fprintf(stderr, "update_dyld_shared_cache: %u duplicates of Objective-C class %s\n", count, c->first);
578 }
579
580 uint32_t dest = duplicateCount();
581 duplicateCount() += count;
582 if (size() > remaining) {
583 return "selector section too small (metadata not optimized)";
584 }
585
586 // classOffsets() instead contains count and array index
587 classOffsets()[h].clsOffset = count*2 + 1;
588 classOffsets()[h].hiOffset = dest;
589
590 std::pair<class_map::const_iterator, class_map::const_iterator>
591 duplicates = classes.equal_range(c->first);
592 class_map::const_iterator dup;
593 for (dup = duplicates.first; dup != duplicates.second; ++dup) {
594 int64_t coff = dup->second.first - base;
595 int64_t hoff = dup->second.second - base;
596 if ((coff<<SHIFT)>>SHIFT != coff) {
597 return "class offset too big (metadata not optimized)";
598 }
599 if ((hoff<<SHIFT)>>SHIFT != hoff) {
600 return "header offset too big (metadata not optimized)";
601 }
602
603 duplicateOffsets()[dest].clsOffset = (objc_stringhash_offset_t)coff;
604 duplicateOffsets()[dest].hiOffset = (objc_stringhash_offset_t)hoff;
605 dest++;
606 }
607 }
608 }
609 # undef SHIFT
610
611 return NULL;
612 }
613
614 // SELOPT_WRITE
615 #endif
616 };
617
618
619 struct objc_protocolopt_t : objc_stringhash_t {
620 // ...objc_stringhash_t fields...
621 // uint32_t protocolOffsets[capacity]; /* offsets from &capacity to protocol_t */
622
623 objc_stringhash_offset_t *protocolOffsets() { return (objc_stringhash_offset_t *)&offsets()[capacity]; }
624 const objc_stringhash_offset_t *protocolOffsets() const { return (const objc_stringhash_offset_t *)&offsets()[capacity]; }
625
626 void* getProtocol(const char *key) const
627 {
628 uint32_t h = getIndex(key);
629 if (h == INDEX_NOT_FOUND) {
630 return NULL;
631 }
632
633 return (void *)((const char *)this + protocolOffsets()[h]);
634 }
635
636 #ifdef SELOPT_WRITE
637
638 size_t size()
639 {
640 return
641 objc_stringhash_t::size() + capacity * sizeof(objc_stringhash_offset_t);
642 }
643
644 void byteswap(bool little_endian)
645 {
646 objc_stringhash_offset_t *o;
647
648 o = protocolOffsets();
649 for (objc_stringhash_offset_t i = 0; i < (int)capacity; i++) {
650 S32(o[i]);
651 }
652
653 objc_stringhash_t::byteswap(little_endian);
654 }
655
656 const char *write(uint64_t base, size_t remaining,
657 string_map& strings, legacy_protocol_map& protocols,
658 bool verbose)
659 {
660 const char *err;
661 err = objc_stringhash_t::write(base, remaining, strings);
662 if (err) return err;
663
664 if (size() > remaining) {
665 return "selector section too small (metadata not optimized)";
666 }
667
668 // Set protocol offsets to 0
669 for (uint32_t i = 0; i < capacity; i++) {
670 protocolOffsets()[i] = 0;
671 }
672
673 // Set real protocol offsets
674 # define SHIFT (64 - 8*sizeof(objc_stringhash_offset_t))
675 legacy_protocol_map::const_iterator c;
676 for (c = protocols.begin(); c != protocols.end(); ++c) {
677 uint32_t h = getIndex(c->first);
678 if (h == INDEX_NOT_FOUND) {
679 return "protocol list busted (metadata not optimized)";
680 }
681
682 int64_t offset = c->second - base;
683 if ((offset<<SHIFT)>>SHIFT != offset) {
684 return "protocol offset too big (metadata not optimized)";
685 }
686
687 protocolOffsets()[h] = (objc_stringhash_offset_t)offset;
688 }
689 # undef SHIFT
690
691 return NULL;
692 }
693
694 // SELOPT_WRITE
695 #endif
696 };
697
698 struct objc_protocolopt2_t : objc_clsopt_t {
699
700 void* getProtocol(const char *key,
701 bool (*callback)(const void* header_info)) const
702 {
703 uint32_t h = getIndex(key);
704 if (h == INDEX_NOT_FOUND) {
705 return NULL;
706 }
707
708 const objc_classheader_t& clshi = classOffsets()[h];
709 if (! clshi.isDuplicate()) {
710 // protocol appears in exactly one header
711 void* cls = (void *)((const char *)this + clshi.clsOffset);
712 void* hi = (void *)((const char *)this + clshi.hiOffset);
713 return callback(hi) ? cls : NULL;
714 }
715 else {
716 // protocol appears in more than one header
717 uint32_t count = clshi.duplicateCount();
718 const objc_classheader_t *list = &duplicateOffsets()[clshi.duplicateIndex()];
719 for (uint32_t i = 0; i < count; i++) {
720 void* cls = (void *)((const char *)this + list[i].clsOffset);
721 void* hi = (void *)((const char *)this + list[i].hiOffset);
722 if (callback(hi))
723 return cls;
724 }
725 return NULL;
726 }
727 }
728
729 };
730
731
732 // Precomputed image list.
733 struct objc_headeropt_ro_t;
734
735 // Precomputed image list.
736 struct objc_headeropt_rw_t;
737
738 // Precomputed class list.
739 struct objc_clsopt_t;
740
741 // Edit objc-sel-table.s if you change this value.
742 // lldb and Symbolication read these structures. Inform them of any changes.
743 enum { VERSION = 15 };
744
745 // Values for objc_opt_t::flags
746 enum : uint32_t {
747 IsProduction = (1 << 0), // never set in development cache
748 NoMissingWeakSuperclasses = (1 << 1) // set in development cache and customer
749 };
750
751 // Top-level optimization structure.
752 // Edit objc-sel-table.s if you change this structure.
753 struct alignas(alignof(void*)) objc_opt_t {
754 uint32_t version;
755 uint32_t flags;
756 int32_t selopt_offset;
757 int32_t headeropt_ro_offset;
758 int32_t clsopt_offset;
759 int32_t unused_protocolopt_offset; // This is now 0 as we've moved to the new protocolopt_offset
760 int32_t headeropt_rw_offset;
761 int32_t protocolopt_offset;
762
763 const objc_selopt_t* selopt() const {
764 if (selopt_offset == 0) return NULL;
765 return (objc_selopt_t *)((uint8_t *)this + selopt_offset);
766 }
767 objc_selopt_t* selopt() {
768 if (selopt_offset == 0) return NULL;
769 return (objc_selopt_t *)((uint8_t *)this + selopt_offset);
770 }
771
772 struct objc_headeropt_ro_t* headeropt_ro() const {
773 if (headeropt_ro_offset == 0) return NULL;
774 return (struct objc_headeropt_ro_t *)((uint8_t *)this + headeropt_ro_offset);
775 }
776
777 struct objc_clsopt_t* clsopt() const {
778 if (clsopt_offset == 0) return NULL;
779 return (objc_clsopt_t *)((uint8_t *)this + clsopt_offset);
780 }
781
782 struct objc_protocolopt_t* protocolopt() const {
783 if (unused_protocolopt_offset == 0) return NULL;
784 return (objc_protocolopt_t *)((uint8_t *)this + unused_protocolopt_offset);
785 }
786
787 struct objc_protocolopt2_t* protocolopt2() const {
788 if (protocolopt_offset == 0) return NULL;
789 return (objc_protocolopt2_t *)((uint8_t *)this + protocolopt_offset);
790 }
791
792 struct objc_headeropt_rw_t* headeropt_rw() const {
793 if (headeropt_rw_offset == 0) return NULL;
794 return (struct objc_headeropt_rw_t *)((uint8_t *)this + headeropt_rw_offset);
795 }
796 };
797
798 // sizeof(objc_opt_t) must be pointer-aligned
799 STATIC_ASSERT(sizeof(objc_opt_t) % sizeof(void*) == 0);
800
801
802 // List of offsets in libobjc that the shared cache optimization needs to use.
803 template <typename T>
804 struct objc_opt_pointerlist_tt {
805 T protocolClass;
806 };
807 typedef struct objc_opt_pointerlist_tt<uintptr_t> objc_opt_pointerlist_t;
808
809
810 /*
811 --------------------------------------------------------------------
812 mix -- mix 3 64-bit values reversibly.
813 mix() takes 48 machine instructions, but only 24 cycles on a superscalar
814 machine (like Intel's new MMX architecture). It requires 4 64-bit
815 registers for 4::2 parallelism.
816 All 1-bit deltas, all 2-bit deltas, all deltas composed of top bits of
817 (a,b,c), and all deltas of bottom bits were tested. All deltas were
818 tested both on random keys and on keys that were nearly all zero.
819 These deltas all cause every bit of c to change between 1/3 and 2/3
820 of the time (well, only 113/400 to 287/400 of the time for some
821 2-bit delta). These deltas all cause at least 80 bits to change
822 among (a,b,c) when the mix is run either forward or backward (yes it
823 is reversible).
824 This implies that a hash using mix64 has no funnels. There may be
825 characteristics with 3-bit deltas or bigger, I didn't test for
826 those.
827 --------------------------------------------------------------------
828 */
829 #define mix64(a,b,c) \
830 { \
831 a -= b; a -= c; a ^= (c>>43); \
832 b -= c; b -= a; b ^= (a<<9); \
833 c -= a; c -= b; c ^= (b>>8); \
834 a -= b; a -= c; a ^= (c>>38); \
835 b -= c; b -= a; b ^= (a<<23); \
836 c -= a; c -= b; c ^= (b>>5); \
837 a -= b; a -= c; a ^= (c>>35); \
838 b -= c; b -= a; b ^= (a<<49); \
839 c -= a; c -= b; c ^= (b>>11); \
840 a -= b; a -= c; a ^= (c>>12); \
841 b -= c; b -= a; b ^= (a<<18); \
842 c -= a; c -= b; c ^= (b>>22); \
843 }
844
845 /*
846 --------------------------------------------------------------------
847 hash() -- hash a variable-length key into a 64-bit value
848 k : the key (the unaligned variable-length array of bytes)
849 len : the length of the key, counting by bytes
850 level : can be any 8-byte value
851 Returns a 64-bit value. Every bit of the key affects every bit of
852 the return value. No funnels. Every 1-bit and 2-bit delta achieves
853 avalanche. About 41+5len instructions.
854
855 The best hash table sizes are powers of 2. There is no need to do
856 mod a prime (mod is sooo slow!). If you need less than 64 bits,
857 use a bitmask. For example, if you need only 10 bits, do
858 h = (h & hashmask(10));
859 In which case, the hash table should have hashsize(10) elements.
860
861 If you are hashing n strings (uint8_t **)k, do it like this:
862 for (i=0, h=0; i<n; ++i) h = hash( k[i], len[i], h);
863
864 By Bob Jenkins, Jan 4 1997. bob_jenkins@burtleburtle.net. You may
865 use this code any way you wish, private, educational, or commercial,
866 but I would appreciate if you give me credit.
867
868 See http://burtleburtle.net/bob/hash/evahash.html
869 Use for hash table lookup, or anything where one collision in 2^^64
870 is acceptable. Do NOT use for cryptographic purposes.
871 --------------------------------------------------------------------
872 */
873
874 static uint64_t lookup8( uint8_t *k, size_t length, uint64_t level)
875 // uint8_t *k; /* the key */
876 // uint64_t length; /* the length of the key */
877 // uint64_t level; /* the previous hash, or an arbitrary value */
878 {
879 uint64_t a,b,c;
880 size_t len;
881
882 /* Set up the internal state */
883 len = length;
884 a = b = level; /* the previous hash value */
885 c = 0x9e3779b97f4a7c13LL; /* the golden ratio; an arbitrary value */
886
887 /*---------------------------------------- handle most of the key */
888 while (len >= 24)
889 {
890 a += (k[0] +((uint64_t)k[ 1]<< 8)+((uint64_t)k[ 2]<<16)+((uint64_t)k[ 3]<<24)
891 +((uint64_t)k[4 ]<<32)+((uint64_t)k[ 5]<<40)+((uint64_t)k[ 6]<<48)+((uint64_t)k[ 7]<<56));
892 b += (k[8] +((uint64_t)k[ 9]<< 8)+((uint64_t)k[10]<<16)+((uint64_t)k[11]<<24)
893 +((uint64_t)k[12]<<32)+((uint64_t)k[13]<<40)+((uint64_t)k[14]<<48)+((uint64_t)k[15]<<56));
894 c += (k[16] +((uint64_t)k[17]<< 8)+((uint64_t)k[18]<<16)+((uint64_t)k[19]<<24)
895 +((uint64_t)k[20]<<32)+((uint64_t)k[21]<<40)+((uint64_t)k[22]<<48)+((uint64_t)k[23]<<56));
896 mix64(a,b,c);
897 k += 24; len -= 24;
898 }
899
900 /*------------------------------------- handle the last 23 bytes */
901 c += length;
902 #pragma clang diagnostic push
903 #pragma clang diagnostic ignored "-Wimplicit-fallthrough"
904 switch(len) /* all the case statements fall through */
905 {
906 case 23: c+=((uint64_t)k[22]<<56);
907 case 22: c+=((uint64_t)k[21]<<48);
908 case 21: c+=((uint64_t)k[20]<<40);
909 case 20: c+=((uint64_t)k[19]<<32);
910 case 19: c+=((uint64_t)k[18]<<24);
911 case 18: c+=((uint64_t)k[17]<<16);
912 case 17: c+=((uint64_t)k[16]<<8);
913 /* the first byte of c is reserved for the length */
914 case 16: b+=((uint64_t)k[15]<<56);
915 case 15: b+=((uint64_t)k[14]<<48);
916 case 14: b+=((uint64_t)k[13]<<40);
917 case 13: b+=((uint64_t)k[12]<<32);
918 case 12: b+=((uint64_t)k[11]<<24);
919 case 11: b+=((uint64_t)k[10]<<16);
920 case 10: b+=((uint64_t)k[ 9]<<8);
921 case 9: b+=((uint64_t)k[ 8]);
922 case 8: a+=((uint64_t)k[ 7]<<56);
923 case 7: a+=((uint64_t)k[ 6]<<48);
924 case 6: a+=((uint64_t)k[ 5]<<40);
925 case 5: a+=((uint64_t)k[ 4]<<32);
926 case 4: a+=((uint64_t)k[ 3]<<24);
927 case 3: a+=((uint64_t)k[ 2]<<16);
928 case 2: a+=((uint64_t)k[ 1]<<8);
929 case 1: a+=((uint64_t)k[ 0]);
930 /* case 0: nothing left to add */
931 }
932 #pragma clang diagnostic pop
933 mix64(a,b,c);
934 /*-------------------------------------------- report the result */
935 return c;
936 }
937
938
939 #if defined(SELOPT_WRITE) || defined(CLOSURE_SELOPT_WRITE)
940
941 /*
942 ------------------------------------------------------------------------------
943 This generates a minimal perfect hash function. That means, given a
944 set of n keys, this determines a hash function that maps each of
945 those keys into a value in 0..n-1 with no collisions.
946
947 The perfect hash function first uses a normal hash function on the key
948 to determine (a,b) such that the pair (a,b) is distinct for all
949 keys, then it computes a^scramble[tab[b]] to get the final perfect hash.
950 tab[] is an array of 1-byte values and scramble[] is a 256-term array of
951 2-byte or 4-byte values. If there are n keys, the length of tab[] is a
952 power of two between n/3 and n.
953
954 I found the idea of computing distinct (a,b) values in "Practical minimal
955 perfect hash functions for large databases", Fox, Heath, Chen, and Daoud,
956 Communications of the ACM, January 1992. They found the idea in Chichelli
957 (CACM Jan 1980). Beyond that, our methods differ.
958
959 The key is hashed to a pair (a,b) where a in 0..*alen*-1 and b in
960 0..*blen*-1. A fast hash function determines both a and b
961 simultaneously. Any decent hash function is likely to produce
962 hashes so that (a,b) is distinct for all pairs. I try the hash
963 using different values of *salt* until all pairs are distinct.
964
965 The final hash is (a XOR scramble[tab[b]]). *scramble* is a
966 predetermined mapping of 0..255 into 0..smax-1. *tab* is an
967 array that we fill in in such a way as to make the hash perfect.
968
969 First we fill in all values of *tab* that are used by more than one
970 key. We try all possible values for each position until one works.
971
972 This leaves m unmapped keys and m values that something could hash to.
973 If you treat unmapped keys as lefthand nodes and unused hash values
974 as righthand nodes, and draw a line connecting each key to each hash
975 value it could map to, you get a bipartite graph. We attempt to
976 find a perfect matching in this graph. If we succeed, we have
977 determined a perfect hash for the whole set of keys.
978
979 *scramble* is used because (a^tab[i]) clusters keys around *a*.
980 ------------------------------------------------------------------------------
981 */
982
983 typedef uint64_t ub8;
984 #define UB8MAXVAL 0xffffffffffffffffLL
985 #define UB8BITS 64
986 typedef uint32_t ub4;
987 #define UB4MAXVAL 0xffffffff
988 #define UB4BITS 32
989 typedef uint16_t ub2;
990 #define UB2MAXVAL 0xffff
991 #define UB2BITS 16
992 typedef uint8_t ub1;
993 #define UB1MAXVAL 0xff
994 #define UB1BITS 8
995
996 #define TRUE 1
997 #define FALSE 0
998
999 #define SCRAMBLE_LEN 256 // ((ub4)1<<16) /* length of *scramble* */
1000 #define RETRY_INITKEY 2048 /* number of times to try to find distinct (a,b) */
1001 #define RETRY_PERFECT 4 /* number of times to try to make a perfect hash */
1002
1003
1004 /* representation of a key */
1005 struct key
1006 {
1007 ub1 *name_k; /* the actual key */
1008 ub4 len_k; /* the length of the actual key */
1009 ub4 hash_k; /* the initial hash value for this key */
1010 /* beyond this point is mapping-dependent */
1011 ub4 a_k; /* a, of the key maps to (a,b) */
1012 ub4 b_k; /* b, of the key maps to (a,b) */
1013 struct key *nextb_k; /* next key with this b */
1014 };
1015 typedef struct key key;
1016
1017 /* things indexed by b of original (a,b) pair */
1018 struct bstuff
1019 {
1020 ub2 val_b; /* hash=a^tabb[b].val_b */
1021 key *list_b; /* tabb[i].list_b is list of keys with b==i */
1022 ub4 listlen_b; /* length of list_b */
1023 ub4 water_b; /* high watermark of who has visited this map node */
1024 };
1025 typedef struct bstuff bstuff;
1026
1027 /* things indexed by final hash value */
1028 struct hstuff
1029 {
1030 key *key_h; /* tabh[i].key_h is the key with a hash of i */
1031 };
1032 typedef struct hstuff hstuff;
1033
1034 /* things indexed by queue position */
1035 struct qstuff
1036 {
1037 bstuff *b_q; /* b that currently occupies this hash */
1038 ub4 parent_q; /* queue position of parent that could use this hash */
1039 ub2 newval_q; /* what to change parent tab[b] to to use this hash */
1040 ub2 oldval_q; /* original value of tab[b] */
1041 };
1042 typedef struct qstuff qstuff;
1043
1044
1045 /*
1046 ------------------------------------------------------------------------------
1047 Find the mapping that will produce a perfect hash
1048 ------------------------------------------------------------------------------
1049 */
1050
1051 /* return the ceiling of the log (base 2) of val */
1052 static ub4 log2u(ub4 val)
1053 {
1054 ub4 i;
1055 for (i=0; ((ub4)1<<i) < val; ++i)
1056 ;
1057 return i;
1058 }
1059
1060 /* compute p(x), where p is a permutation of 0..(1<<nbits)-1 */
1061 /* permute(0)=0. This is intended and useful. */
1062 static ub4 permute(ub4 x, ub4 nbits)
1063 // ub4 x; /* input, a value in some range */
1064 // ub4 nbits; /* input, number of bits in range */
1065 {
1066 int i;
1067 int mask = ((ub4)1<<nbits)-1; /* all ones */
1068 int const2 = 1+nbits/2;
1069 int const3 = 1+nbits/3;
1070 int const4 = 1+nbits/4;
1071 int const5 = 1+nbits/5;
1072 for (i=0; i<20; ++i)
1073 {
1074 x = (x+(x<<const2)) & mask;
1075 x = (x^(x>>const3));
1076 x = (x+(x<<const4)) & mask;
1077 x = (x^(x>>const5));
1078 }
1079 return x;
1080 }
1081
1082 /* initialize scramble[] with distinct random values in 0..smax-1 */
1083 static void scrambleinit(ub4 *scramble, ub4 smax)
1084 // ub4 *scramble; /* hash is a^scramble[tab[b]] */
1085 // ub4 smax; /* scramble values should be in 0..smax-1 */
1086 {
1087 ub4 i;
1088
1089 /* fill scramble[] with distinct random integers in 0..smax-1 */
1090 for (i=0; i<SCRAMBLE_LEN; ++i)
1091 {
1092 scramble[i] = permute(i, log2u(smax));
1093 }
1094 }
1095
1096
1097 /*
1098 * put keys in tabb according to key->b_k
1099 * check if the initial hash might work
1100 */
1101 static int inittab(dyld3::OverflowSafeArray<bstuff>& tabb, dyld3::OverflowSafeArray<key>& keys, int complete)
1102 // bstuff *tabb; /* output, list of keys with b for (a,b) */
1103 // ub4 blen; /* length of tabb */
1104 // key *keys; /* list of keys already hashed */
1105 // int complete; /* TRUE means to complete init despite collisions */
1106 {
1107 int nocollision = TRUE;
1108 ub4 i;
1109
1110 memset((void *)tabb.begin(), 0, (size_t)(sizeof(bstuff)*tabb.maxCount()));
1111
1112 /* Two keys with the same (a,b) guarantees a collision */
1113 for (i = 0; i < keys.count(); i++) {
1114 key *mykey = &keys[i];
1115 key *otherkey;
1116
1117 for (otherkey=tabb[mykey->b_k].list_b;
1118 otherkey;
1119 otherkey=otherkey->nextb_k)
1120 {
1121 if (mykey->a_k == otherkey->a_k)
1122 {
1123 nocollision = FALSE;
1124 if (!complete)
1125 return FALSE;
1126 }
1127 }
1128 ++tabb[mykey->b_k].listlen_b;
1129 mykey->nextb_k = tabb[mykey->b_k].list_b;
1130 tabb[mykey->b_k].list_b = mykey;
1131 }
1132
1133 /* no two keys have the same (a,b) pair */
1134 return nocollision;
1135 }
1136
1137
1138 /* Do the initial hash for normal mode (use lookup and checksum) */
1139 static void initnorm(dyld3::OverflowSafeArray<key>& keys, ub4 alen, ub4 blen, ub4 smax, ub8 salt)
1140 // key *keys; /* list of all keys */
1141 // ub4 alen; /* (a,b) has a in 0..alen-1, a power of 2 */
1142 // ub4 blen; /* (a,b) has b in 0..blen-1, a power of 2 */
1143 // ub4 smax; /* maximum range of computable hash values */
1144 // ub4 salt; /* used to initialize the hash function */
1145 // gencode *final; /* output, code for the final hash */
1146 {
1147 ub4 loga = log2u(alen); /* log based 2 of blen */
1148 #if BUILDING_CACHE_BUILDER
1149 dispatch_apply(keys.count(), DISPATCH_APPLY_AUTO, ^(size_t index) {
1150 ub4 i = (ub4)index;
1151 key *mykey = &keys[i];
1152 ub8 hash = lookup8(mykey->name_k, mykey->len_k, salt);
1153 mykey->a_k = (loga > 0) ? (ub4)(hash >> (UB8BITS-loga)) : 0;
1154 mykey->b_k = (blen > 1) ? (hash & (blen-1)) : 0;
1155 });
1156 #else
1157 for (size_t index = 0; index != keys.count(); ++index) {
1158 ub4 i = (ub4)index;
1159 key *mykey = &keys[i];
1160 ub8 hash = lookup8(mykey->name_k, mykey->len_k, salt);
1161 mykey->a_k = (loga > 0) ? (ub4)(hash >> (UB8BITS-loga)) : 0;
1162 mykey->b_k = (blen > 1) ? (hash & (blen-1)) : 0;
1163 };
1164 #endif
1165 }
1166
1167
1168 /* Try to apply an augmenting list */
1169 static int apply(dyld3::OverflowSafeArray<bstuff>& tabb,
1170 dyld3::OverflowSafeArray<hstuff>& tabh,
1171 dyld3::OverflowSafeArray<qstuff>& tabq,
1172 ub4 *scramble, ub4 tail, int rollback)
1173 // bstuff *tabb;
1174 // hstuff *tabh;
1175 // qstuff *tabq;
1176 // ub4 blen;
1177 // ub4 *scramble;
1178 // ub4 tail;
1179 // int rollback; /* FALSE applies augmenting path, TRUE rolls back */
1180 {
1181 ub4 hash;
1182 key *mykey;
1183 bstuff *pb;
1184 ub4 child;
1185 ub4 parent;
1186 ub4 stabb; /* scramble[tab[b]] */
1187
1188 /* walk from child to parent */
1189 for (child=tail-1; child; child=parent)
1190 {
1191 parent = tabq[child].parent_q; /* find child's parent */
1192 pb = tabq[parent].b_q; /* find parent's list of siblings */
1193
1194 /* erase old hash values */
1195 stabb = scramble[pb->val_b];
1196 for (mykey=pb->list_b; mykey; mykey=mykey->nextb_k)
1197 {
1198 hash = mykey->a_k^stabb;
1199 if (mykey == tabh[hash].key_h)
1200 { /* erase hash for all of child's siblings */
1201 tabh[hash].key_h = (key *)0;
1202 }
1203 }
1204
1205 /* change pb->val_b, which will change the hashes of all parent siblings */
1206 pb->val_b = (rollback ? tabq[child].oldval_q : tabq[child].newval_q);
1207
1208 /* set new hash values */
1209 stabb = scramble[pb->val_b];
1210 for (mykey=pb->list_b; mykey; mykey=mykey->nextb_k)
1211 {
1212 hash = mykey->a_k^stabb;
1213 if (rollback)
1214 {
1215 if (parent == 0) continue; /* root never had a hash */
1216 }
1217 else if (tabh[hash].key_h)
1218 {
1219 /* very rare: roll back any changes */
1220 apply(tabb, tabh, tabq, scramble, tail, TRUE);
1221 return FALSE; /* failure, collision */
1222 }
1223 tabh[hash].key_h = mykey;
1224 }
1225 }
1226 return TRUE;
1227 }
1228
1229
1230 /*
1231 -------------------------------------------------------------------------------
1232 augment(): Add item to the mapping.
1233
1234 Construct a spanning tree of *b*s with *item* as root, where each
1235 parent can have all its hashes changed (by some new val_b) with
1236 at most one collision, and each child is the b of that collision.
1237
1238 I got this from Tarjan's "Data Structures and Network Algorithms". The
1239 path from *item* to a *b* that can be remapped with no collision is
1240 an "augmenting path". Change values of tab[b] along the path so that
1241 the unmapped key gets mapped and the unused hash value gets used.
1242
1243 Assuming 1 key per b, if m out of n hash values are still unused,
1244 you should expect the transitive closure to cover n/m nodes before
1245 an unused node is found. Sum(i=1..n)(n/i) is about nlogn, so expect
1246 this approach to take about nlogn time to map all single-key b's.
1247 -------------------------------------------------------------------------------
1248 */
1249 static int augment(dyld3::OverflowSafeArray<bstuff>& tabb,
1250 dyld3::OverflowSafeArray<hstuff>& tabh,
1251 dyld3::OverflowSafeArray<qstuff>& tabq,
1252 ub4 *scramble, ub4 smax, bstuff *item, ub4 nkeys,
1253 ub4 highwater)
1254 // bstuff *tabb; /* stuff indexed by b */
1255 // hstuff *tabh; /* which key is associated with which hash, indexed by hash */
1256 // qstuff *tabq; /* queue of *b* values, this is the spanning tree */
1257 // ub4 *scramble; /* final hash is a^scramble[tab[b]] */
1258 // ub4 smax; /* highest value in scramble */
1259 // bstuff *item; /* &tabb[b] for the b to be mapped */
1260 // ub4 nkeys; /* final hash must be in 0..nkeys-1 */
1261 // ub4 highwater; /* a value higher than any now in tabb[].water_b */
1262 {
1263 ub4 q; /* current position walking through the queue */
1264 ub4 tail; /* tail of the queue. 0 is the head of the queue. */
1265 ub4 limit=UB1MAXVAL+1;
1266 ub4 highhash = smax;
1267
1268 /* initialize the root of the spanning tree */
1269 tabq[0].b_q = item;
1270 tail = 1;
1271
1272 /* construct the spanning tree by walking the queue, add children to tail */
1273 for (q=0; q<tail; ++q)
1274 {
1275 bstuff *myb = tabq[q].b_q; /* the b for this node */
1276 ub4 i; /* possible value for myb->val_b */
1277
1278 if (q == 1)
1279 break; /* don't do transitive closure */
1280
1281 for (i=0; i<limit; ++i)
1282 {
1283 bstuff *childb = (bstuff *)0; /* the b that this i maps to */
1284 key *mykey; /* for walking through myb's keys */
1285
1286 for (mykey = myb->list_b; mykey; mykey=mykey->nextb_k)
1287 {
1288 key *childkey;
1289 ub4 hash = mykey->a_k^scramble[i];
1290
1291 if (hash >= highhash) break; /* out of bounds */
1292 childkey = tabh[hash].key_h;
1293
1294 if (childkey)
1295 {
1296 bstuff *hitb = &tabb[childkey->b_k];
1297
1298 if (childb)
1299 {
1300 if (childb != hitb) break; /* hit at most one child b */
1301 }
1302 else
1303 {
1304 childb = hitb; /* remember this as childb */
1305 if (childb->water_b == highwater) break; /* already explored */
1306 }
1307 }
1308 }
1309 if (mykey) continue; /* myb with i has multiple collisions */
1310
1311 /* add childb to the queue of reachable things */
1312 if (childb) childb->water_b = highwater;
1313 tabq[tail].b_q = childb;
1314 tabq[tail].newval_q = i; /* how to make parent (myb) use this hash */
1315 tabq[tail].oldval_q = myb->val_b; /* need this for rollback */
1316 tabq[tail].parent_q = q;
1317 ++tail;
1318
1319 if (!childb)
1320 { /* found an *i* with no collisions? */
1321 /* try to apply the augmenting path */
1322 if (apply(tabb, tabh, tabq, scramble, tail, FALSE))
1323 return TRUE; /* success, item was added to the perfect hash */
1324
1325 --tail; /* don't know how to handle such a child! */
1326 }
1327 }
1328 }
1329 return FALSE;
1330 }
1331
1332
1333 /* find a mapping that makes this a perfect hash */
1334 static int perfect(dyld3::OverflowSafeArray<bstuff>& tabb,
1335 dyld3::OverflowSafeArray<hstuff>& tabh,
1336 dyld3::OverflowSafeArray<qstuff>& tabq,
1337 ub4 smax, ub4 *scramble, ub4 nkeys)
1338 {
1339 ub4 maxkeys; /* maximum number of keys for any b */
1340 ub4 i, j;
1341
1342 const ub4 blen = (ub4)tabb.count();
1343
1344 #if SELOPT_DEBUG
1345 fprintf(stderr, " blen %d smax %d nkeys %d\n", blen, smax, nkeys);
1346 #endif
1347
1348 /* clear any state from previous attempts */
1349 memset((void *)tabh.begin(), 0, sizeof(hstuff)*smax);
1350 memset((void *)tabq.begin(), 0, sizeof(qstuff)*(blen+1));
1351
1352 for (maxkeys=0,i=0; i<blen; ++i)
1353 if (tabb[i].listlen_b > maxkeys)
1354 maxkeys = tabb[i].listlen_b;
1355
1356 /* In descending order by number of keys, map all *b*s */
1357 for (j=maxkeys; j>0; --j)
1358 for (i=0; i<blen; ++i)
1359 if (tabb[i].listlen_b == j)
1360 if (!augment(tabb, tabh, tabq, scramble, smax, &tabb[i], nkeys,
1361 i+1))
1362 {
1363 return FALSE;
1364 }
1365
1366 /* Success! We found a perfect hash of all keys into 0..nkeys-1. */
1367 return TRUE;
1368 }
1369
1370
1371 /* guess initial values for alen and blen */
1372 static void initalen(ub4 *alen, ub4 *blen, ub4 smax, ub4 nkeys)
1373 // ub4 *alen; /* output, initial alen */
1374 // ub4 *blen; /* output, initial blen */
1375 // ub4 smax; /* input, power of two greater or equal to max hash value */
1376 // ub4 nkeys; /* number of keys being hashed */
1377 {
1378 /*
1379 * Find initial *alen, *blen
1380 * Initial alen and blen values were found empirically. Some factors:
1381 *
1382 * If smax<256 there is no scramble, so tab[b] needs to cover 0..smax-1.
1383 *
1384 * alen and blen must be powers of 2 because the values in 0..alen-1 and
1385 * 0..blen-1 are produced by applying a bitmask to the initial hash function.
1386 *
1387 * alen must be less than smax, in fact less than nkeys, because otherwise
1388 * there would often be no i such that a^scramble[i] is in 0..nkeys-1 for
1389 * all the *a*s associated with a given *b*, so there would be no legal
1390 * value to assign to tab[b]. This only matters when we're doing a minimal
1391 * perfect hash.
1392 *
1393 * It takes around 800 trials to find distinct (a,b) with nkey=smax*(5/8)
1394 * and alen*blen = smax*smax/32.
1395 *
1396 * Values of blen less than smax/4 never work, and smax/2 always works.
1397 *
1398 * We want blen as small as possible because it is the number of bytes in
1399 * the huge array we must create for the perfect hash.
1400 *
1401 * When nkey <= smax*(5/8), blen=smax/4 works much more often with
1402 * alen=smax/8 than with alen=smax/4. Above smax*(5/8), blen=smax/4
1403 * doesn't seem to care whether alen=smax/8 or alen=smax/4. I think it
1404 * has something to do with 5/8 = 1/8 * 5. For example examine 80000,
1405 * 85000, and 90000 keys with different values of alen. This only matters
1406 * if we're doing a minimal perfect hash.
1407 *
1408 * When alen*blen <= 1<<UB4BITS, the initial hash must produce one integer.
1409 * Bigger than that it must produce two integers, which increases the
1410 * cost of the hash per character hashed.
1411 */
1412 *alen = smax; /* no reason to restrict alen to smax/2 */
1413 *blen = ((nkeys <= smax*0.6) ? smax/16 :
1414 (nkeys <= smax*0.8) ? smax/8 : smax/4);
1415
1416 if (*alen < 1) *alen = 1;
1417 if (*blen < 1) *blen = 1;
1418
1419 #if SELOPT_DEBUG
1420 fprintf(stderr, "alen %d blen %d smax %d nkeys %d\n", *alen, *blen, smax, nkeys);
1421 #endif
1422 }
1423
1424 /*
1425 ** Try to find a perfect hash function.
1426 ** Return the successful initializer for the initial hash.
1427 ** Return 0 if no perfect hash could be found.
1428 */
1429 static int findhash(dyld3::OverflowSafeArray<bstuff>& tabb,
1430 ub4 *alen, ub8 *salt,
1431 ub4 *scramble, ub4 smax, dyld3::OverflowSafeArray<key>& keys)
1432 // bstuff **tabb; /* output, tab[] of the perfect hash, length *blen */
1433 // ub4 *alen; /* output, 0..alen-1 is range for a of (a,b) */
1434 // ub4 *blen; /* output, 0..blen-1 is range for b of (a,b) */
1435 // ub4 *salt; /* output, initializes initial hash */
1436 // ub4 *scramble; /* input, hash = a^scramble[tab[b]] */
1437 // ub4 smax; /* input, scramble[i] in 0..smax-1 */
1438 // key *keys; /* input, keys to hash */
1439 // ub4 nkeys; /* input, number of keys being hashed */
1440 {
1441 ub4 bad_initkey; /* how many times did initkey fail? */
1442 ub4 bad_perfect; /* how many times did perfect fail? */
1443 ub4 si; /* trial initializer for initial hash */
1444 ub4 maxalen;
1445 dyld3::OverflowSafeArray<hstuff>tabh; /* table of keys indexed by hash value */
1446 dyld3::OverflowSafeArray<qstuff>tabq; /* table of stuff indexed by queue value, used by augment */
1447
1448 /* guess initial values for alen and blen */
1449 ub4 blen = 0;
1450 initalen(alen, &blen, smax, (ub4)keys.count());
1451
1452 scrambleinit(scramble, smax);
1453
1454 maxalen = smax;
1455
1456 /* allocate working memory */
1457 tabb.resize(blen);
1458 tabq.resize(blen+1);
1459 tabh.resize(smax);
1460
1461 /* Actually find the perfect hash */
1462 *salt = 0;
1463 bad_initkey = 0;
1464 bad_perfect = 0;
1465 for (si=1; ; ++si)
1466 {
1467 ub4 rslinit;
1468 /* Try to find distinct (A,B) for all keys */
1469 *salt = si * 0x9e3779b97f4a7c13LL; /* golden ratio (arbitrary value) */
1470 initnorm(keys, *alen, blen, smax, *salt);
1471 rslinit = inittab(tabb, keys, FALSE);
1472 if (rslinit == 0)
1473 {
1474 /* didn't find distinct (a,b) */
1475 if (++bad_initkey >= RETRY_INITKEY)
1476 {
1477 /* Try to put more bits in (A,B) to make distinct (A,B) more likely */
1478 if (*alen < maxalen)
1479 {
1480 *alen *= 2;
1481 }
1482 else if (blen < smax)
1483 {
1484 blen *= 2;
1485 tabb.resize(blen);
1486 tabq.resize(blen+1);
1487 }
1488 bad_initkey = 0;
1489 bad_perfect = 0;
1490 }
1491 continue; /* two keys have same (a,b) pair */
1492 }
1493
1494 /* Given distinct (A,B) for all keys, build a perfect hash */
1495 if (!perfect(tabb, tabh, tabq, smax, scramble, (ub4)keys.count()))
1496 {
1497 if (++bad_perfect >= RETRY_PERFECT)
1498 {
1499 if (blen < smax)
1500 {
1501 blen *= 2;
1502 tabb.resize(blen);
1503 tabq.resize(blen+1);
1504 --si; /* we know this salt got distinct (A,B) */
1505 }
1506 else
1507 {
1508 return 0;
1509 }
1510 bad_perfect = 0;
1511 }
1512 continue;
1513 }
1514
1515 break;
1516 }
1517
1518 return 1;
1519 }
1520
1521 /*
1522 ------------------------------------------------------------------------------
1523 Input/output type routines
1524 ------------------------------------------------------------------------------
1525 */
1526
1527
1528 static void
1529 make_perfect(dyld3::OverflowSafeArray<key>& keys, perfect_hash& result)
1530 {
1531 dyld3::OverflowSafeArray<bstuff> tab; /* table indexed by b */
1532 ub4 smax; /* scramble[] values in 0..smax-1, a power of 2 */
1533 ub4 alen; /* a in 0..alen-1, a power of 2 */
1534 ub8 salt; /* a parameter to the hash function */
1535 ub4 scramble[SCRAMBLE_LEN]; /* used in final hash function */
1536 int ok;
1537 uint32_t i;
1538
1539 /* find the hash */
1540 smax = ((ub4)1<<log2u((ub4)keys.count()));
1541 ok = findhash(tab, &alen, &salt,
1542 scramble, smax, keys);
1543 if (!ok) {
1544 smax = 2 * ((ub4)1<<log2u((ub4)keys.count()));
1545 ok = findhash(tab, &alen, &salt,
1546 scramble, smax, keys);
1547 }
1548 if (!ok) {
1549 bzero(&result, sizeof(result));
1550 } else {
1551 /* build the tables */
1552 result.capacity = smax;
1553 result.occupied = (ub4)keys.count();
1554 result.shift = UB8BITS - log2u(alen);
1555 result.mask = (ub4)tab.count() - 1;
1556 result.salt = salt;
1557
1558 result.tab.resize(tab.count());
1559 for (i = 0; i < tab.count(); i++) {
1560 result.tab[i] = tab[i].val_b;
1561 }
1562 for (i = 0; i < 256; i++) {
1563 result.scramble[i] = scramble[i];
1564 }
1565 }
1566 }
1567
1568 // SELOPT_WRITE || CLOSURE_SELOPT_WRITE
1569 #endif
1570
1571 #ifdef SELOPT_WRITE
1572
1573 static void
1574 make_perfect(const string_map& strings, perfect_hash& phash)
1575 {
1576 dyld3::OverflowSafeArray<key> keys;
1577
1578 /* read in the list of keywords */
1579 keys.reserve(strings.size());
1580 size_t i;
1581 string_map::const_iterator s;
1582 for (i = 0, s = strings.begin(); s != strings.end(); ++s, ++i) {
1583 key mykey;
1584 mykey.name_k = (ub1 *)s->first;
1585 mykey.len_k = (ub4)strlen(s->first);
1586 keys.push_back(mykey);
1587 }
1588
1589 make_perfect(keys, phash);
1590 }
1591
1592 // SELOPT_WRITE
1593 #endif
1594
1595 #ifdef CLOSURE_SELOPT_WRITE
1596
1597 static void
1598 make_perfect(const dyld3::OverflowSafeArray<const char*>& strings, perfect_hash& phash)
1599 {
1600 dyld3::OverflowSafeArray<key> keys;
1601
1602 /* read in the list of keywords */
1603 keys.reserve(strings.count());
1604 for (const char* s : strings) {
1605 key mykey;
1606 mykey.name_k = (ub1 *)s;
1607 mykey.len_k = (ub4)strlen(s);
1608 keys.push_back(mykey);
1609 }
1610
1611 make_perfect(keys, phash);
1612 }
1613
1614 // CLOSURE_SELOPT_WRITE
1615 #endif
1616
1617 // namespace objc_selopt
1618 };
1619
1620 #undef S32
1621 #undef S64
1622
1623 #endif