]> git.saurik.com Git - apple/cf.git/blob - Collections.subproj/CFSet.c
CF-368.25.tar.gz
[apple/cf.git] / Collections.subproj / CFSet.c
1 /*
2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* CFSet.c
24 Copyright 1998-2002, Apple, Inc. All rights reserved.
25 Responsibility: Christopher Kane
26 */
27
28 #include <CoreFoundation/CFSet.h>
29 #include "CFInternal.h"
30
31 const CFSetCallBacks kCFTypeSetCallBacks = {0, __CFTypeCollectionRetain, __CFTypeCollectionRelease, CFCopyDescription, CFEqual, CFHash};
32 const CFSetCallBacks kCFCopyStringSetCallBacks = {0, (void *)CFStringCreateCopy, __CFTypeCollectionRelease, CFCopyDescription, CFEqual, CFHash};
33 static const CFSetCallBacks __kCFNullSetCallBacks = {0, NULL, NULL, NULL, NULL, NULL};
34
35 static const uint32_t __CFSetCapacities[42] = {
36 4, 8, 17, 29, 47, 76, 123, 199, 322, 521, 843, 1364, 2207, 3571, 5778, 9349,
37 15127, 24476, 39603, 64079, 103682, 167761, 271443, 439204, 710647, 1149851, 1860498,
38 3010349, 4870847, 7881196, 12752043, 20633239, 33385282, 54018521, 87403803, 141422324,
39 228826127, 370248451, 599074578, 969323029, 1568397607, 2537720636U
40 };
41
42 static const uint32_t __CFSetBuckets[42] = { // primes
43 5, 11, 23, 41, 67, 113, 199, 317, 521, 839, 1361, 2207, 3571, 5779, 9349, 15121,
44 24473, 39607, 64081, 103681, 167759, 271429, 439199, 710641, 1149857, 1860503, 3010349,
45 4870843, 7881193, 12752029, 20633237, 33385273, 54018521, 87403763, 141422317, 228826121,
46 370248451, 599074561, 969323023, 1568397599, 2537720629U, 4106118251U
47 };
48
49 CF_INLINE CFIndex __CFSetRoundUpCapacity(CFIndex capacity) {
50 CFIndex idx;
51 for (idx = 0; idx < 42 && __CFSetCapacities[idx] < (uint32_t)capacity; idx++);
52 if (42 <= idx) HALT;
53 return __CFSetCapacities[idx];
54 }
55
56 CF_INLINE CFIndex __CFSetNumBucketsForCapacity(CFIndex capacity) {
57 CFIndex idx;
58 for (idx = 0; idx < 42 && __CFSetCapacities[idx] < (uint32_t)capacity; idx++);
59 if (42 <= idx) HALT;
60 return __CFSetBuckets[idx];
61 }
62
63 enum { /* Bits 1-0 */
64 __kCFSetImmutable = 0, /* unchangable and fixed capacity */
65 __kCFSetMutable = 1, /* changeable and variable capacity */
66 __kCFSetFixedMutable = 3 /* changeable and fixed capacity */
67 };
68
69 enum { /* Bits 5-4 (value), 3-2 (key) */
70 __kCFSetHasNullCallBacks = 0,
71 __kCFSetHasCFTypeCallBacks = 1,
72 __kCFSetHasCustomCallBacks = 3 /* callbacks are at end of header */
73 };
74
75 // Under GC, we fudge the key/value memory in two ways
76 // First, if we had null callbacks or null for both retain/release, we use unscanned memory
77 // This means that if people were doing addValue:[xxx new] and never removing, well, that doesn't work
78 //
79 // Second, if we notice standard retain/release implementations we substitute scanned memory
80 // and zero out the retain/release callbacks. This is fine, but when copying we need to restore them
81
82 enum {
83 __kCFSetRestoreKeys = (1 << 0),
84 __kCFSetRestoreValues = (1 << 1),
85 __kCFSetRestoreStringKeys = (1 << 2),
86 __kCFSetWeakKeys = (1 << 3)
87 };
88
89 struct __CFSet {
90 CFRuntimeBase _base;
91 CFIndex _count; /* number of values */
92 CFIndex _capacity; /* maximum number of values */
93 CFIndex _bucketsNum; /* number of slots */
94 uintptr_t _marker;
95 void *_context; /* private */
96 CFIndex _deletes;
97 CFOptionFlags _xflags; /* bits for GC */
98 const void **_keys; /* can be NULL if not allocated yet */
99 };
100
101 /* Bits 1-0 of the base reserved bits are used for mutability variety */
102 /* Bits 3-2 of the base reserved bits are used for key callback indicator bits */
103 /* Bits 5-4 of the base reserved bits are used for value callback indicator bits */
104 /* Bit 6 is special KVO actions bit */
105
106 CF_INLINE CFIndex __CFSetGetType(CFSetRef set) {
107 return __CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 1, 0);
108 }
109
110 CF_INLINE CFIndex __CFSetGetSizeOfType(CFIndex t) {
111 CFIndex size = sizeof(struct __CFSet);
112 if (__CFBitfieldGetValue(t, 3, 2) == __kCFSetHasCustomCallBacks) {
113 size += sizeof(CFSetCallBacks);
114 }
115 return size;
116 }
117
118 CF_INLINE const CFSetCallBacks *__CFSetGetKeyCallBacks(CFSetRef set) {
119 CFSetCallBacks *result = NULL;
120 switch (__CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 3, 2)) {
121 case __kCFSetHasNullCallBacks:
122 return &__kCFNullSetCallBacks;
123 case __kCFSetHasCFTypeCallBacks:
124 return &kCFTypeSetCallBacks;
125 case __kCFSetHasCustomCallBacks:
126 break;
127 }
128 result = (CFSetCallBacks *)((uint8_t *)set + sizeof(struct __CFSet));
129 return result;
130 }
131
132 CF_INLINE bool __CFSetCallBacksMatchNull(const CFSetCallBacks *c) {
133 return (NULL == c ||
134 (c->retain == __kCFNullSetCallBacks.retain &&
135 c->release == __kCFNullSetCallBacks.release &&
136 c->copyDescription == __kCFNullSetCallBacks.copyDescription &&
137 c->equal == __kCFNullSetCallBacks.equal &&
138 c->hash == __kCFNullSetCallBacks.hash));
139 }
140
141 CF_INLINE bool __CFSetCallBacksMatchCFType(const CFSetCallBacks *c) {
142 return (&kCFTypeSetCallBacks == c ||
143 (c->retain == kCFTypeSetCallBacks.retain &&
144 c->release == kCFTypeSetCallBacks.release &&
145 c->copyDescription == kCFTypeSetCallBacks.copyDescription &&
146 c->equal == kCFTypeSetCallBacks.equal &&
147 c->hash == kCFTypeSetCallBacks.hash));
148 }
149
150 #define CF_OBJC_KVO_WILLCHANGE(obj, sel)
151 #define CF_OBJC_KVO_DIDCHANGE(obj, sel)
152
153 static CFIndex __CFSetFindBuckets1a(CFSetRef set, const void *key) {
154 CFHashCode keyHash = (CFHashCode)key;
155 const void **keys = set->_keys;
156 uintptr_t marker = set->_marker;
157 CFIndex probe = keyHash % set->_bucketsNum;
158 CFIndex probeskip = 1; // See RemoveValue() for notes before changing this value
159 CFIndex start = probe;
160 for (;;) {
161 uintptr_t currKey = (uintptr_t)keys[probe];
162 if (marker == currKey) { /* empty */
163 return kCFNotFound;
164 } else if (~marker == currKey) { /* deleted */
165 /* do nothing */
166 } else if (currKey == (uintptr_t)key) {
167 return probe;
168 }
169 probe = probe + probeskip;
170 // This alternative to probe % buckets assumes that
171 // probeskip is always positive and less than the
172 // number of buckets.
173 if (set->_bucketsNum <= probe) {
174 probe -= set->_bucketsNum;
175 }
176 if (start == probe) {
177 return kCFNotFound;
178 }
179 }
180 }
181
182 static CFIndex __CFSetFindBuckets1b(CFSetRef set, const void *key) {
183 const CFSetCallBacks *cb = __CFSetGetKeyCallBacks(set);
184 CFHashCode keyHash = cb->hash ? (CFHashCode)INVOKE_CALLBACK2(((CFHashCode (*)(const void *, void *))cb->hash), key, set->_context) : (CFHashCode)key;
185 const void **keys = set->_keys;
186 uintptr_t marker = set->_marker;
187 CFIndex probe = keyHash % set->_bucketsNum;
188 CFIndex probeskip = 1; // See RemoveValue() for notes before changing this value
189 CFIndex start = probe;
190 for (;;) {
191 uintptr_t currKey = (uintptr_t)keys[probe];
192 if (marker == currKey) { /* empty */
193 return kCFNotFound;
194 } else if (~marker == currKey) { /* deleted */
195 /* do nothing */
196 } else if (currKey == (uintptr_t)key || (cb->equal && INVOKE_CALLBACK3((Boolean (*)(const void *, const void *, void*))cb->equal, (void *)currKey, key, set->_context))) {
197 return probe;
198 }
199 probe = probe + probeskip;
200 // This alternative to probe % buckets assumes that
201 // probeskip is always positive and less than the
202 // number of buckets.
203 if (set->_bucketsNum <= probe) {
204 probe -= set->_bucketsNum;
205 }
206 if (start == probe) {
207 return kCFNotFound;
208 }
209 }
210 }
211
212 static void __CFSetFindBuckets2(CFSetRef set, const void *key, CFIndex *match, CFIndex *nomatch) {
213 const CFSetCallBacks *cb = __CFSetGetKeyCallBacks(set);
214 CFHashCode keyHash = cb->hash ? (CFHashCode)INVOKE_CALLBACK2(((CFHashCode (*)(const void *, void *))cb->hash), key, set->_context) : (CFHashCode)key;
215 const void **keys = set->_keys;
216 uintptr_t marker = set->_marker;
217 CFIndex probe = keyHash % set->_bucketsNum;
218 CFIndex probeskip = 1; // See RemoveValue() for notes before changing this value
219 CFIndex start = probe;
220 *match = kCFNotFound;
221 *nomatch = kCFNotFound;
222 for (;;) {
223 uintptr_t currKey = (uintptr_t)keys[probe];
224 if (marker == currKey) { /* empty */
225 if (nomatch) *nomatch = probe;
226 return;
227 } else if (~marker == currKey) { /* deleted */
228 if (nomatch) {
229 *nomatch = probe;
230 nomatch = NULL;
231 }
232 } else if (currKey == (uintptr_t)key || (cb->equal && INVOKE_CALLBACK3((Boolean (*)(const void *, const void *, void*))cb->equal, (void *)currKey, key, set->_context))) {
233 *match = probe;
234 return;
235 }
236 probe = probe + probeskip;
237 // This alternative to probe % buckets assumes that
238 // probeskip is always positive and less than the
239 // number of buckets.
240 if (set->_bucketsNum <= probe) {
241 probe -= set->_bucketsNum;
242 }
243 if (start == probe) {
244 return;
245 }
246 }
247 }
248
249 static void __CFSetFindNewMarker(CFSetRef set) {
250 const void **keys = set->_keys;
251 uintptr_t newMarker;
252 CFIndex idx, nbuckets;
253 bool hit;
254
255 nbuckets = set->_bucketsNum;
256 newMarker = set->_marker;
257 do {
258 newMarker--;
259 hit = false;
260 for (idx = 0; idx < nbuckets; idx++) {
261 if (newMarker == (uintptr_t)keys[idx] || ~newMarker == (uintptr_t)keys[idx]) {
262 hit = true;
263 break;
264 }
265 }
266 } while (hit);
267 for (idx = 0; idx < nbuckets; idx++) {
268 if (set->_marker == (uintptr_t)keys[idx]) {
269 keys[idx] = (const void *)newMarker;
270 } else if (~set->_marker == (uintptr_t)keys[idx]) {
271 keys[idx] = (const void *)~newMarker;
272 }
273 }
274 ((struct __CFSet *)set)->_marker = newMarker;
275 }
276
277 static bool __CFSetEqual(CFTypeRef cf1, CFTypeRef cf2) {
278 CFSetRef set1 = (CFSetRef)cf1;
279 CFSetRef set2 = (CFSetRef)cf2;
280 const CFSetCallBacks *cb1, *cb2;
281 const void **keys;
282 CFIndex idx, nbuckets;
283 if (set1 == set2) return true;
284 if (set1->_count != set2->_count) return false;
285 cb1 = __CFSetGetKeyCallBacks(set1);
286 cb2 = __CFSetGetKeyCallBacks(set2);
287 if (cb1->equal != cb2->equal) return false;
288 if (0 == set1->_count) return true; /* after function comparison! */
289 keys = set1->_keys;
290 nbuckets = set1->_bucketsNum;
291 for (idx = 0; idx < nbuckets; idx++) {
292 if (set1->_marker != (uintptr_t)keys[idx] && ~set1->_marker != (uintptr_t)keys[idx]) {
293 const void *value;
294 if (!CFSetGetValueIfPresent(set2, keys[idx], &value)) return false;
295 }
296 }
297 return true;
298 }
299
300 static CFHashCode __CFSetHash(CFTypeRef cf) {
301 CFSetRef set = (CFSetRef)cf;
302 return set->_count;
303 }
304
305 static CFStringRef __CFSetCopyDescription(CFTypeRef cf) {
306 CFSetRef set = (CFSetRef)cf;
307 CFAllocatorRef allocator;
308 const CFSetCallBacks *cb;
309 const void **keys;
310 CFIndex idx, nbuckets;
311 CFMutableStringRef result;
312 cb = __CFSetGetKeyCallBacks(set);
313 keys = set->_keys;
314 nbuckets = set->_bucketsNum;
315 allocator = CFGetAllocator(set);
316 result = CFStringCreateMutable(allocator, 0);
317 switch (__CFSetGetType(set)) {
318 case __kCFSetImmutable:
319 CFStringAppendFormat(result, NULL, CFSTR("<CFSet %p [%p]>{type = immutable, count = %u, capacity = %u, pairs = (\n"), cf, allocator, set->_count, set->_capacity);
320 break;
321 case __kCFSetFixedMutable:
322 CFStringAppendFormat(result, NULL, CFSTR("<CFSet %p [%p]>{type = fixed-mutable, count = %u, capacity = %u, pairs = (\n"), cf, allocator, set->_count, set->_capacity);
323 break;
324 case __kCFSetMutable:
325 CFStringAppendFormat(result, NULL, CFSTR("<CFSet %p [%p]>{type = mutable, count = %u, capacity = %u, pairs = (\n"), cf, allocator, set->_count, set->_capacity);
326 break;
327 }
328 for (idx = 0; idx < nbuckets; idx++) {
329 if (set->_marker != (uintptr_t)keys[idx] && ~set->_marker != (uintptr_t)keys[idx]) {
330 CFStringRef kDesc = NULL;
331 if (NULL != cb->copyDescription) {
332 kDesc = (CFStringRef)INVOKE_CALLBACK2(((CFStringRef (*)(const void *, void *))cb->copyDescription), keys[idx], set->_context);
333 }
334 if (NULL != kDesc) {
335 CFStringAppendFormat(result, NULL, CFSTR("\t%u : %@\n"), idx, kDesc);
336 CFRelease(kDesc);
337 } else {
338 CFStringAppendFormat(result, NULL, CFSTR("\t%u : <%p>\n"), idx, keys[idx]);
339 }
340 }
341 }
342 CFStringAppend(result, CFSTR(")}"));
343 return result;
344 }
345
346 static void __CFSetDeallocate(CFTypeRef cf) {
347 CFMutableSetRef set = (CFMutableSetRef)cf;
348 CFAllocatorRef allocator = __CFGetAllocator(set);
349 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
350 const CFSetCallBacks *kcb = __CFSetGetKeyCallBacks(set);
351 if (kcb->retain == NULL && kcb->release == NULL)
352 return; // XXX_PCB keep set intact during finalization.
353 }
354 if (__CFSetGetType(set) == __kCFSetImmutable) {
355 __CFBitfieldSetValue(((CFRuntimeBase *)set)->_info, 1, 0, __kCFSetFixedMutable);
356 }
357
358 const CFSetCallBacks *cb = __CFSetGetKeyCallBacks(set);
359 if (cb->release) {
360 const void **keys = set->_keys;
361 CFIndex idx, nbuckets = set->_bucketsNum;
362 for (idx = 0; idx < nbuckets; idx++) {
363 if (set->_marker != (uintptr_t)keys[idx] && ~set->_marker != (uintptr_t)keys[idx]) {
364 const void *oldkey = keys[idx];
365 INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), allocator, oldkey, set->_context);
366 }
367 }
368 }
369
370 if (__CFSetGetType(set) == __kCFSetMutable && set->_keys) {
371 _CFAllocatorDeallocateGC(allocator, set->_keys);
372 set->_keys = NULL;
373 }
374 }
375
376 /*
377 * When running under GC, we suss up sets with standard string copy to hold
378 * onto everything, including the copies of incoming keys, in strong memory without retain counts.
379 * This is the routine that makes that copy.
380 * Not for inputs of constant strings we'll get a constant string back, and so the result
381 * is not guaranteed to be from the auto zone, hence the call to CFRelease since it will figure
382 * out where the refcount really is.
383 */
384 static CFStringRef _CFStringCreateCopyCollected(CFAllocatorRef allocator, CFStringRef theString) {
385 return CFMakeCollectable(CFStringCreateCopy(NULL, theString));
386 }
387
388 static CFTypeID __kCFSetTypeID = _kCFRuntimeNotATypeID;
389
390 static const CFRuntimeClass __CFSetClass = {
391 _kCFRuntimeScannedObject,
392 "CFSet",
393 NULL, // init
394 NULL, // copy
395 __CFSetDeallocate,
396 (void *)__CFSetEqual,
397 __CFSetHash,
398 NULL, //
399 __CFSetCopyDescription
400 };
401
402 __private_extern__ void __CFSetInitialize(void) {
403 __kCFSetTypeID = _CFRuntimeRegisterClass(&__CFSetClass);
404 }
405
406 CFTypeID CFSetGetTypeID(void) {
407 return __kCFSetTypeID;
408 }
409
410 static CFSetRef __CFSetInit(CFAllocatorRef allocator, uint32_t flags, CFIndex capacity, const CFSetCallBacks *keyCallBacks) {
411 struct __CFSet *memory;
412 uint32_t size;
413 CFIndex idx;
414 __CFBitfieldSetValue(flags, 31, 2, 0);
415 CFSetCallBacks nonRetainingKeyCallbacks;
416 CFOptionFlags xflags = 0;
417 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
418 // preserve NULL for key or value CB, otherwise fix up.
419 if (!keyCallBacks || (keyCallBacks->retain == NULL && keyCallBacks->release == NULL)) {
420 xflags = __kCFSetWeakKeys;
421 }
422 else {
423 if (keyCallBacks->retain == __CFTypeCollectionRetain && keyCallBacks->release == __CFTypeCollectionRelease) {
424 // copy everything
425 nonRetainingKeyCallbacks = *keyCallBacks;
426 nonRetainingKeyCallbacks.retain = NULL;
427 nonRetainingKeyCallbacks.release = NULL;
428 keyCallBacks = &nonRetainingKeyCallbacks;
429 xflags = __kCFSetRestoreKeys;
430 }
431 else if (keyCallBacks->retain == CFStringCreateCopy && keyCallBacks->release == __CFTypeCollectionRelease) {
432 // copy everything
433 nonRetainingKeyCallbacks = *keyCallBacks;
434 nonRetainingKeyCallbacks.retain = (void *)_CFStringCreateCopyCollected; // XXX fix with better cast
435 nonRetainingKeyCallbacks.release = NULL;
436 keyCallBacks = &nonRetainingKeyCallbacks;
437 xflags = (__kCFSetRestoreKeys | __kCFSetRestoreStringKeys);
438 }
439 }
440 }
441 if (__CFSetCallBacksMatchNull(keyCallBacks)) {
442 __CFBitfieldSetValue(flags, 3, 2, __kCFSetHasNullCallBacks);
443 } else if (__CFSetCallBacksMatchCFType(keyCallBacks)) {
444 __CFBitfieldSetValue(flags, 3, 2, __kCFSetHasCFTypeCallBacks);
445 } else {
446 __CFBitfieldSetValue(flags, 3, 2, __kCFSetHasCustomCallBacks);
447 }
448 size = __CFSetGetSizeOfType(flags) - sizeof(CFRuntimeBase);
449 switch (__CFBitfieldGetValue(flags, 1, 0)) {
450 case __kCFSetImmutable:
451 case __kCFSetFixedMutable:
452 size += __CFSetNumBucketsForCapacity(capacity) * sizeof(const void *);
453 break;
454 case __kCFSetMutable:
455 break;
456 }
457 memory = (struct __CFSet *)_CFRuntimeCreateInstance(allocator, __kCFSetTypeID, size, NULL);
458 if (NULL == memory) {
459 return NULL;
460 }
461 __CFBitfieldSetValue(memory->_base._info, 6, 0, flags);
462 memory->_count = 0;
463 memory->_marker = (uintptr_t)0xa1b1c1d3;
464 memory->_context = NULL;
465 memory->_deletes = 0;
466 memory->_xflags = xflags;
467 switch (__CFBitfieldGetValue(flags, 1, 0)) {
468 case __kCFSetImmutable:
469 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator) && (xflags & __kCFSetWeakKeys)) { // if weak, don't scan
470 auto_zone_set_layout_type(__CFCollectableZone, memory, AUTO_OBJECT_UNSCANNED);
471 }
472 if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (immutable)");
473 memory->_capacity = capacity; /* Don't round up capacity */
474 memory->_bucketsNum = __CFSetNumBucketsForCapacity(memory->_capacity);
475 memory->_keys = (const void **)((uint8_t *)memory + __CFSetGetSizeOfType(flags));
476 for (idx = memory->_bucketsNum; idx--;) {
477 memory->_keys[idx] = (const void *)memory->_marker;
478 }
479 break;
480 case __kCFSetFixedMutable:
481 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator) && (xflags & __kCFSetWeakKeys)) { // if weak, don't scan
482 auto_zone_set_layout_type(__CFCollectableZone, memory, AUTO_OBJECT_UNSCANNED);
483 }
484 if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (mutable-fixed)");
485 memory->_capacity = capacity; /* Don't round up capacity */
486 memory->_bucketsNum = __CFSetNumBucketsForCapacity(memory->_capacity);
487 memory->_keys = (const void **)((uint8_t *)memory + __CFSetGetSizeOfType(flags));
488 for (idx = memory->_bucketsNum; idx--;) {
489 memory->_keys[idx] = (const void *)memory->_marker;
490 }
491 break;
492 case __kCFSetMutable:
493 if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (mutable-variable)");
494 memory->_capacity = __CFSetRoundUpCapacity(1);
495 memory->_bucketsNum = 0;
496 memory->_keys = NULL;
497 break;
498 }
499 if (__kCFSetHasCustomCallBacks == __CFBitfieldGetValue(flags, 3, 2)) {
500 CFSetCallBacks *cb = (CFSetCallBacks *)__CFSetGetKeyCallBacks((CFSetRef)memory);
501 *cb = *keyCallBacks;
502 FAULT_CALLBACK((void **)&(cb->retain));
503 FAULT_CALLBACK((void **)&(cb->release));
504 FAULT_CALLBACK((void **)&(cb->copyDescription));
505 FAULT_CALLBACK((void **)&(cb->equal));
506 FAULT_CALLBACK((void **)&(cb->hash));
507 }
508 return (CFSetRef)memory;
509 }
510
511 CFSetRef CFSetCreate(CFAllocatorRef allocator, const void **keys, CFIndex numValues, const CFSetCallBacks *keyCallBacks) {
512 CFSetRef result;
513 uint32_t flags;
514 CFIndex idx;
515 CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues);
516 result = __CFSetInit(allocator, __kCFSetImmutable, numValues, keyCallBacks);
517 flags = __CFBitfieldGetValue(((const CFRuntimeBase *)result)->_info, 1, 0);
518 if (flags == __kCFSetImmutable) {
519 // tweak flags so that we can add our immutable values
520 __CFBitfieldSetValue(((CFRuntimeBase *)result)->_info, 1, 0, __kCFSetFixedMutable);
521 }
522 for (idx = 0; idx < numValues; idx++) {
523 CFSetAddValue((CFMutableSetRef)result, keys[idx]);
524 }
525 __CFBitfieldSetValue(((CFRuntimeBase *)result)->_info, 1, 0, flags);
526 return result;
527 }
528
529 CFMutableSetRef CFSetCreateMutable(CFAllocatorRef allocator, CFIndex capacity, const CFSetCallBacks *keyCallBacks) {
530 CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity);
531 return (CFMutableSetRef)__CFSetInit(allocator, (0 == capacity) ? __kCFSetMutable : __kCFSetFixedMutable, capacity, keyCallBacks);
532 }
533
534
535 static void __CFSetGrow(CFMutableSetRef set, CFIndex numNewValues);
536
537 // This creates a set which is for CFTypes or NSObjects, with an ownership transfer --
538 // the set does not take a retain, and the caller does not need to release the inserted objects.
539 // The incoming objects must also be collectable if allocated out of a collectable allocator.
540 CFSetRef _CFSetCreate_ex(CFAllocatorRef allocator, bool mutable, const void **keys, CFIndex numValues) {
541 CFSetRef result;
542 void *bucketsBase;
543 uint32_t flags;
544 CFIndex idx;
545 result = __CFSetInit(allocator, mutable ? __kCFSetMutable : __kCFSetImmutable, numValues, &kCFTypeSetCallBacks);
546 flags = __CFBitfieldGetValue(((const CFRuntimeBase *)result)->_info, 1, 0);
547 if (!mutable) {
548 __CFBitfieldSetValue(((CFRuntimeBase *)result)->_info, 1, 0, __kCFSetFixedMutable);
549 }
550 if (mutable) {
551 if (result->_count == result->_capacity || NULL == result->_keys) {
552 __CFSetGrow((CFMutableSetRef)result, numValues);
553 }
554 }
555 // GC: since kCFTypeSetCallBacks are used, the keys
556 // and values will be allocated contiguously.
557 bool collectableContainer = CF_IS_COLLECTABLE_ALLOCATOR(allocator);
558 bucketsBase = (collectableContainer ? (void *)auto_zone_base_pointer(__CFCollectableZone, result->_keys) : NULL);
559 for (idx = 0; idx < numValues; idx++) {
560 CFIndex match, nomatch;
561 const void *newKey;
562 __CFSetFindBuckets2(result, keys[idx], &match, &nomatch);
563 if (kCFNotFound != match) {
564 } else {
565 newKey = keys[idx];
566 if (result->_marker == (uintptr_t)newKey || ~result->_marker == (uintptr_t)newKey) {
567 __CFSetFindNewMarker(result);
568 }
569 CF_WRITE_BARRIER_BASE_ASSIGN(allocator, bucketsBase, result->_keys[nomatch], newKey);
570 // GC: generation(_keys/_values) <= generation(keys/values), but added for completeness.
571 ((CFMutableSetRef)result)->_count++;
572 }
573 }
574 __CFBitfieldSetValue(((CFRuntimeBase *)result)->_info, 1, 0, flags);
575 return result;
576 }
577
578 CFSetRef CFSetCreateCopy(CFAllocatorRef allocator, CFSetRef set) {
579 CFSetRef result;
580 const CFSetCallBacks *cb;
581 CFIndex numValues = CFSetGetCount(set);
582 const void **list, *buffer[256];
583 list = (numValues <= 256) ? buffer : CFAllocatorAllocate(allocator, numValues * sizeof(void *), 0); // XXX_PCB GC OK
584 if (list != buffer && __CFOASafe) __CFSetLastAllocationEventName(list, "CFSet (temp)");
585 CFSetGetValues(set, list);
586 CFSetCallBacks patchedKeyCB;
587 if (CF_IS_OBJC(__kCFSetTypeID, set)) {
588 cb = &kCFTypeSetCallBacks;
589 }
590 else {
591 cb = __CFSetGetKeyCallBacks(set);
592 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
593 if (set->_xflags & __kCFSetRestoreKeys) {
594 patchedKeyCB = *cb; // copy
595 cb = &patchedKeyCB; // reset to copy
596 patchedKeyCB.retain = (set->_xflags & __kCFSetRestoreStringKeys) ? CFStringCreateCopy : __CFTypeCollectionRetain;
597 patchedKeyCB.release = __CFTypeCollectionRelease;
598 }
599 }
600 }
601 result = CFSetCreate(allocator, list, numValues, cb);
602 if (list != buffer) CFAllocatorDeallocate(allocator, list); // GC OK
603 return result;
604 }
605
606 CFMutableSetRef CFSetCreateMutableCopy(CFAllocatorRef allocator, CFIndex capacity, CFSetRef set) {
607 CFMutableSetRef result;
608 const CFSetCallBacks *cb;
609 CFIndex idx, numValues = CFSetGetCount(set);
610 const void **list, *buffer[256];
611 CFAssert3(0 == capacity || numValues <= capacity, __kCFLogAssertion, "%s(): for fixed-mutable sets, capacity (%d) must be greater than or equal to initial number of values (%d)", __PRETTY_FUNCTION__, capacity, numValues);
612 list = (numValues <= 256) ? buffer : CFAllocatorAllocate(allocator, numValues * sizeof(void *), 0); // XXX_PCB GC OK
613 if (list != buffer && __CFOASafe) __CFSetLastAllocationEventName(list, "CFSet (temp)");
614 CFSetGetValues(set, list);
615 CFSetCallBacks patchedKeyCB;
616 if (CF_IS_OBJC(__kCFSetTypeID, set)) {
617 cb = &kCFTypeSetCallBacks;
618 }
619 else {
620 cb = __CFSetGetKeyCallBacks(set);
621 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
622 if (set->_xflags & __kCFSetRestoreKeys) {
623 patchedKeyCB = *cb; // copy
624 cb = &patchedKeyCB; // reset to copy
625 patchedKeyCB.retain = (set->_xflags & __kCFSetRestoreStringKeys) ? CFStringCreateCopy : __CFTypeCollectionRetain;
626 patchedKeyCB.release = __CFTypeCollectionRelease;
627 }
628 }
629 }
630 result = CFSetCreateMutable(allocator, capacity, cb);
631 if (0 == capacity) _CFSetSetCapacity(result, numValues);
632 for (idx = 0; idx < numValues; idx++) {
633 CFSetAddValue(result, list[idx]);
634 }
635 if (list != buffer) CFAllocatorDeallocate(allocator, list); // XXX_PCB GC OK
636 return result;
637 }
638
639 // Used by NSHashTables and KVO
640 void _CFSetSetContext(CFSetRef set, void *context) {
641 CFAllocatorRef allocator = CFGetAllocator(set);
642 CF_WRITE_BARRIER_BASE_ASSIGN(allocator, set, set->_context, context);
643 }
644
645 void *_CFSetGetContext(CFSetRef set) {
646 return ((struct __CFSet *)set)->_context;
647 }
648
649 CFIndex CFSetGetCount(CFSetRef set) {
650 CF_OBJC_FUNCDISPATCH0(__kCFSetTypeID, CFIndex, set, "count");
651 __CFGenericValidateType(set, __kCFSetTypeID);
652 return set->_count;
653 }
654
655 CFIndex CFSetGetCountOfValue(CFSetRef set, const void *key) {
656 CFIndex match;
657 CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, CFIndex, set, "countForObject:", key);
658 __CFGenericValidateType(set, __kCFSetTypeID);
659 if (0 == set->_count) return 0;
660 if (__kCFSetHasNullCallBacks == __CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 3, 2)) {
661 match = __CFSetFindBuckets1a(set, key);
662 } else {
663 match = __CFSetFindBuckets1b(set, key);
664 }
665 return (kCFNotFound != match ? 1 : 0);
666 }
667
668 Boolean CFSetContainsValue(CFSetRef set, const void *key) {
669 CFIndex match;
670 CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, char, set, "containsObject:", key);
671 __CFGenericValidateType(set, __kCFSetTypeID);
672 if (0 == set->_count) return false;
673 if (__kCFSetHasNullCallBacks == __CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 3, 2)) {
674 match = __CFSetFindBuckets1a(set, key);
675 } else {
676 match = __CFSetFindBuckets1b(set, key);
677 }
678 return (kCFNotFound != match ? true : false);
679 }
680
681 const void *CFSetGetValue(CFSetRef set, const void *key) {
682 CFIndex match;
683 CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, const void *, set, "member:", key);
684 __CFGenericValidateType(set, __kCFSetTypeID);
685 if (0 == set->_count) return NULL;
686 if (__kCFSetHasNullCallBacks == __CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 3, 2)) {
687 match = __CFSetFindBuckets1a(set, key);
688 } else {
689 match = __CFSetFindBuckets1b(set, key);
690 }
691 return (kCFNotFound != match ? set->_keys[match] : NULL);
692 }
693
694 Boolean CFSetGetValueIfPresent(CFSetRef set, const void *key, const void **actualkey) {
695 CFIndex match;
696 CF_OBJC_FUNCDISPATCH2(__kCFSetTypeID, char, set, "_getValue:forObj:", (void * *) actualkey, key);
697 __CFGenericValidateType(set, __kCFSetTypeID);
698 if (0 == set->_count) return false;
699 if (__kCFSetHasNullCallBacks == __CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 3, 2)) {
700 match = __CFSetFindBuckets1a(set, key);
701 } else {
702 match = __CFSetFindBuckets1b(set, key);
703 }
704 return (kCFNotFound != match ? ((actualkey ? __CFObjCStrongAssign(set->_keys[match], actualkey) : NULL), true) : false);
705 }
706
707 void CFSetGetValues(CFSetRef set, const void **keys) {
708 CFIndex idx, cnt, nbuckets;
709 CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, void, set, "getObjects:", (void * *) keys);
710 __CFGenericValidateType(set, __kCFSetTypeID);
711 if (CF_USING_COLLECTABLE_MEMORY) {
712 // GC: speculatively issue a write-barrier on the copied to buffers (3743553).
713 __CFObjCWriteBarrierRange(keys, set->_count * sizeof(void *));
714 }
715 nbuckets = set->_bucketsNum;
716 for (idx = 0; idx < nbuckets; idx++) {
717 if (set->_marker != (uintptr_t)set->_keys[idx] && ~set->_marker != (uintptr_t)set->_keys[idx]) {
718 for (cnt = 1; cnt--;) {
719 if (keys) CF_WRITE_BARRIER_ASSIGN(NULL, *keys++, set->_keys[idx]);
720 }
721 }
722 }
723 }
724
725 void CFSetApplyFunction(CFSetRef set, CFSetApplierFunction applier, void *context) {
726 const void **keys;
727 CFIndex idx, cnt, nbuckets;
728 FAULT_CALLBACK((void **)&(applier));
729 CF_OBJC_FUNCDISPATCH2(__kCFSetTypeID, void, set, "_applyValues:context:", applier, context);
730 __CFGenericValidateType(set, __kCFSetTypeID);
731 keys = set->_keys;
732 nbuckets = set->_bucketsNum;
733 for (idx = 0; idx < nbuckets; idx++) {
734 if (set->_marker != (uintptr_t)keys[idx] && ~set->_marker != (uintptr_t)keys[idx]) {
735 for (cnt = 1; cnt--;) {
736 INVOKE_CALLBACK2(applier, keys[idx], context);
737 }
738 }
739 }
740 }
741
742
743 static void __CFSetGrow(CFMutableSetRef set, CFIndex numNewValues) {
744 const void **oldkeys = set->_keys;
745 CFIndex idx, oldnbuckets = set->_bucketsNum;
746 CFIndex oldCount = set->_count;
747 CFAllocatorRef allocator = __CFGetAllocator(set), keysAllocator;
748 void *keysBase;
749 set->_capacity = __CFSetRoundUpCapacity(oldCount + numNewValues);
750 set->_bucketsNum = __CFSetNumBucketsForCapacity(set->_capacity);
751 set->_deletes = 0;
752 void *buckets = _CFAllocatorAllocateGC(allocator, set->_bucketsNum * sizeof(const void *), (set->_xflags & __kCFSetWeakKeys) ? AUTO_MEMORY_UNSCANNED : AUTO_MEMORY_SCANNED);
753 CF_WRITE_BARRIER_BASE_ASSIGN(allocator, set, set->_keys, buckets);
754 keysAllocator = allocator;
755 keysBase = set->_keys;
756 if (NULL == set->_keys) HALT;
757 if (__CFOASafe) __CFSetLastAllocationEventName(set->_keys, "CFSet (store)");
758 for (idx = set->_bucketsNum; idx--;) {
759 set->_keys[idx] = (const void *)set->_marker;
760 }
761 if (NULL == oldkeys) return;
762 for (idx = 0; idx < oldnbuckets; idx++) {
763 if (set->_marker != (uintptr_t)oldkeys[idx] && ~set->_marker != (uintptr_t)oldkeys[idx]) {
764 CFIndex match, nomatch;
765 __CFSetFindBuckets2(set, oldkeys[idx], &match, &nomatch);
766 CFAssert3(kCFNotFound == match, __kCFLogAssertion, "%s(): two values (%p, %p) now hash to the same slot; mutable value changed while in table or hash value is not immutable", __PRETTY_FUNCTION__, oldkeys[idx], set->_keys[match]);
767 if (kCFNotFound != nomatch) {
768 CF_WRITE_BARRIER_BASE_ASSIGN(keysAllocator, keysBase, set->_keys[nomatch], oldkeys[idx]);
769 }
770 }
771 }
772 CFAssert1(set->_count == oldCount, __kCFLogAssertion, "%s(): set count differs after rehashing; error", __PRETTY_FUNCTION__);
773 _CFAllocatorDeallocateGC(allocator, oldkeys);
774 }
775
776 // This function is for Foundation's benefit; no one else should use it.
777 void _CFSetSetCapacity(CFMutableSetRef set, CFIndex cap) {
778 if (CF_IS_OBJC(__kCFSetTypeID, set)) return;
779 #if defined(DEBUG)
780 __CFGenericValidateType(set, __kCFSetTypeID);
781 CFAssert1(__CFSetGetType(set) != __kCFSetImmutable && __CFSetGetType(set) != __kCFSetFixedMutable, __kCFLogAssertion, "%s(): set is immutable or fixed-mutable", __PRETTY_FUNCTION__);
782 CFAssert3(set->_count <= cap, __kCFLogAssertion, "%s(): desired capacity (%d) is less than count (%d)", __PRETTY_FUNCTION__, cap, set->_count);
783 #endif
784 __CFSetGrow(set, cap - set->_count);
785 }
786
787
788 void CFSetAddValue(CFMutableSetRef set, const void *key) {
789 CFIndex match, nomatch;
790 const CFSetCallBacks *cb;
791 const void *newKey;
792 CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, void, set, "addObject:", key);
793 __CFGenericValidateType(set, __kCFSetTypeID);
794 switch (__CFSetGetType(set)) {
795 case __kCFSetMutable:
796 if (set->_count == set->_capacity || NULL == set->_keys) {
797 __CFSetGrow(set, 1);
798 }
799 break;
800 case __kCFSetFixedMutable:
801 CFAssert3(set->_count < set->_capacity, __kCFLogAssertion, "%s(): capacity exceeded on fixed-capacity set %p (capacity = %d)", __PRETTY_FUNCTION__, set, set->_capacity);
802 break;
803 default:
804 CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set);
805 break;
806 }
807 __CFSetFindBuckets2(set, key, &match, &nomatch);
808 if (kCFNotFound != match) {
809 } else {
810 CFAllocatorRef allocator = __CFGetAllocator(set);
811 CFAllocatorRef keysAllocator = (set->_xflags & __kCFSetWeakKeys) ? kCFAllocatorNull : allocator;
812 cb = __CFSetGetKeyCallBacks(set);
813 if (cb->retain) {
814 newKey = (void *)INVOKE_CALLBACK3(((const void *(*)(CFAllocatorRef, const void *, void *))cb->retain), allocator, key, set->_context);
815 } else {
816 newKey = key;
817 }
818 if (set->_marker == (uintptr_t)newKey || ~set->_marker == (uintptr_t)newKey) {
819 __CFSetFindNewMarker(set);
820 }
821 CF_OBJC_KVO_WILLCHANGE(set, key);
822 CF_WRITE_BARRIER_ASSIGN(keysAllocator, set->_keys[nomatch], newKey);
823 set->_count++;
824 CF_OBJC_KVO_DIDCHANGE(set, key);
825 }
826 }
827
828 __private_extern__ const void *__CFSetAddValueAndReturn(CFMutableSetRef set, const void *key) {
829 CFIndex match, nomatch;
830 const CFSetCallBacks *cb;
831 const void *newKey;
832 // #warning not toll-free bridged, but internal
833 __CFGenericValidateType(set, __kCFSetTypeID);
834 switch (__CFSetGetType(set)) {
835 case __kCFSetMutable:
836 if (set->_count == set->_capacity || NULL == set->_keys) {
837 __CFSetGrow(set, 1);
838 }
839 break;
840 case __kCFSetFixedMutable:
841 CFAssert3(set->_count < set->_capacity, __kCFLogAssertion, "%s(): capacity exceeded on fixed-capacity set %p (capacity = %d)", __PRETTY_FUNCTION__, set, set->_capacity);
842 break;
843 default:
844 CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set);
845 break;
846 }
847 __CFSetFindBuckets2(set, key, &match, &nomatch);
848 if (kCFNotFound != match) {
849 return set->_keys[match];
850 } else {
851 CFAllocatorRef allocator = __CFGetAllocator(set);
852 CFAllocatorRef keysAllocator = (set->_xflags & __kCFSetWeakKeys) ? kCFAllocatorNull : allocator;
853 cb = __CFSetGetKeyCallBacks(set);
854 if (cb->retain) {
855 newKey = (void *)INVOKE_CALLBACK3(((const void *(*)(CFAllocatorRef, const void *, void *))cb->retain), allocator, key, set->_context);
856 } else {
857 newKey = key;
858 }
859 if (set->_marker == (uintptr_t)newKey || ~set->_marker == (uintptr_t)newKey) {
860 __CFSetFindNewMarker(set);
861 }
862 CF_OBJC_KVO_WILLCHANGE(set, key);
863 CF_WRITE_BARRIER_ASSIGN(keysAllocator, set->_keys[nomatch], newKey);
864 set->_count++;
865 CF_OBJC_KVO_DIDCHANGE(set, key);
866 return newKey;
867 }
868 }
869
870 void CFSetReplaceValue(CFMutableSetRef set, const void *key) {
871 CFIndex match;
872 const CFSetCallBacks *cb;
873 const void *newKey;
874 CFAllocatorRef allocator;
875 CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, void, set, "_replaceObject:", key);
876 __CFGenericValidateType(set, __kCFSetTypeID);
877 switch (__CFSetGetType(set)) {
878 case __kCFSetMutable:
879 case __kCFSetFixedMutable:
880 break;
881 default:
882 CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set);
883 break;
884 }
885 if (0 == set->_count) return;
886 if (__kCFSetHasNullCallBacks == __CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 3, 2)) {
887 match = __CFSetFindBuckets1a(set, key);
888 } else {
889 match = __CFSetFindBuckets1b(set, key);
890 }
891 if (kCFNotFound == match) return;
892 cb = __CFSetGetKeyCallBacks(set);
893 allocator = (set->_xflags & __kCFSetWeakKeys) ? kCFAllocatorNull : __CFGetAllocator(set);
894 if (cb->retain) {
895 newKey = (void *)INVOKE_CALLBACK3(((const void *(*)(CFAllocatorRef, const void *, void *))cb->retain), allocator, key, set->_context);
896 } else {
897 newKey = key;
898 }
899 CF_OBJC_KVO_WILLCHANGE(set, key);
900 if (cb->release) {
901 INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), allocator, set->_keys[match], set->_context);
902 }
903 CF_WRITE_BARRIER_ASSIGN(allocator, set->_keys[match], newKey);
904 CF_OBJC_KVO_DIDCHANGE(set, key);
905 }
906
907 void CFSetSetValue(CFMutableSetRef set, const void *key) {
908 CFIndex match, nomatch;
909 const CFSetCallBacks *cb;
910 const void *newKey;
911 CFAllocatorRef allocator;
912 CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, void, set, "_setObject:", key);
913 __CFGenericValidateType(set, __kCFSetTypeID);
914 switch (__CFSetGetType(set)) {
915 case __kCFSetMutable:
916 if (set->_count == set->_capacity || NULL == set->_keys) {
917 __CFSetGrow(set, 1);
918 }
919 break;
920 case __kCFSetFixedMutable:
921 break;
922 default:
923 CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set);
924 break;
925 }
926 __CFSetFindBuckets2(set, key, &match, &nomatch);
927 cb = __CFSetGetKeyCallBacks(set);
928 allocator = (set->_xflags & __kCFSetWeakKeys) ? kCFAllocatorNull : __CFGetAllocator(set);
929 if (cb->retain) {
930 newKey = (void *)INVOKE_CALLBACK3(((const void *(*)(CFAllocatorRef, const void *, void *))cb->retain), allocator, key, set->_context);
931 } else {
932 newKey = key;
933 }
934 if (kCFNotFound != match) {
935 CF_OBJC_KVO_WILLCHANGE(set, key);
936 if (cb->release) {
937 INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), allocator, set->_keys[match], set->_context);
938 }
939 CF_WRITE_BARRIER_ASSIGN(allocator, set->_keys[match], newKey);
940 CF_OBJC_KVO_DIDCHANGE(set, key);
941 } else {
942 CFAssert3(__kCFSetFixedMutable != __CFSetGetType(set) || set->_count < set->_capacity, __kCFLogAssertion, "%s(): capacity exceeded on fixed-capacity set %p (capacity = %d)", __PRETTY_FUNCTION__, set, set->_capacity);
943 if (set->_marker == (uintptr_t)newKey || ~set->_marker == (uintptr_t)newKey) {
944 __CFSetFindNewMarker(set);
945 }
946 CF_OBJC_KVO_WILLCHANGE(set, key);
947 CF_WRITE_BARRIER_ASSIGN(allocator, set->_keys[nomatch], newKey);
948 set->_count++;
949 CF_OBJC_KVO_DIDCHANGE(set, key);
950 }
951 }
952
953 void CFSetRemoveValue(CFMutableSetRef set, const void *key) {
954 CFIndex match;
955 const CFSetCallBacks *cb;
956 CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, void, set, "removeObject:", key);
957 __CFGenericValidateType(set, __kCFSetTypeID);
958 switch (__CFSetGetType(set)) {
959 case __kCFSetMutable:
960 case __kCFSetFixedMutable:
961 break;
962 default:
963 CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set);
964 break;
965 }
966 if (0 == set->_count) return;
967 if (__kCFSetHasNullCallBacks == __CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 3, 2)) {
968 match = __CFSetFindBuckets1a(set, key);
969 } else {
970 match = __CFSetFindBuckets1b(set, key);
971 }
972 if (kCFNotFound == match) return;
973 cb = __CFSetGetKeyCallBacks(set);
974 if (1) {
975 const void *oldkey = set->_keys[match];
976 CF_OBJC_KVO_WILLCHANGE(set, oldkey);
977 set->_keys[match] = (const void *)~set->_marker;
978 set->_count--;
979 CF_OBJC_KVO_DIDCHANGE(set, oldkey);
980 if (cb->release) {
981 INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), __CFGetAllocator(set), oldkey, set->_context);
982 }
983 set->_deletes++;
984 if ((__kCFSetMutable == __CFSetGetType(set)) && (set->_bucketsNum < 4 * set->_deletes || (512 < set->_capacity && 3.236067 * set->_count < set->_capacity))) {
985 // 3.236067 == 2 * golden_mean; this comes about because we're trying to resize down
986 // when the count is less than 2 capacities smaller, but not right away when count
987 // is just less than 2 capacities smaller, because an add would then force growth;
988 // well, the ratio between one capacity and the previous is close to the golden
989 // mean currently, so (cap / m / m) would be two smaller; but so we're not close,
990 // we take the average of that and the prior cap (cap / m / m / m). Well, after one
991 // does the algebra, and uses the convenient fact that m^(x+2) = m^(x+1) + m^x if m
992 // is the golden mean, this reduces to cap / 2m for the threshold. In general, the
993 // possible threshold constant is 1 / (2 * m^k), k = 0, 1, 2, ... under this scheme.
994 // Rehash; currently only for mutable-variable sets
995 __CFSetGrow(set, 0);
996 } else {
997 // When the probeskip == 1 always and only, a DELETED slot followed by an EMPTY slot
998 // can be converted to an EMPTY slot. By extension, a chain of DELETED slots followed
999 // by an EMPTY slot can be converted to EMPTY slots, which is what we do here.
1000 // _CFSetDecrementValue() below has this same code.
1001 if (match < set->_bucketsNum - 1 && set->_keys[match + 1] == (const void *)set->_marker) {
1002 while (0 <= match && set->_keys[match] == (const void *)~set->_marker) {
1003 set->_keys[match] = (const void *)set->_marker;
1004 set->_deletes--;
1005 match--;
1006 }
1007 }
1008 }
1009 }
1010 }
1011
1012 void CFSetRemoveAllValues(CFMutableSetRef set) {
1013 const void **keys;
1014 const CFSetCallBacks *cb;
1015 CFAllocatorRef allocator;
1016 CFIndex idx, nbuckets;
1017 CF_OBJC_FUNCDISPATCH0(__kCFSetTypeID, void, set, "removeAllObjects");
1018 __CFGenericValidateType(set, __kCFSetTypeID);
1019 switch (__CFSetGetType(set)) {
1020 case __kCFSetMutable:
1021 case __kCFSetFixedMutable:
1022 break;
1023 default:
1024 CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set);
1025 break;
1026 }
1027 if (0 == set->_count) return;
1028 keys = set->_keys;
1029 nbuckets = set->_bucketsNum;
1030 cb = __CFSetGetKeyCallBacks(set);
1031 allocator = __CFGetAllocator(set);
1032 for (idx = 0; idx < nbuckets; idx++) {
1033 if (set->_marker != (uintptr_t)keys[idx] && ~set->_marker != (uintptr_t)keys[idx]) {
1034 const void *oldkey = keys[idx];
1035 CF_OBJC_KVO_WILLCHANGE(set, oldkey);
1036 keys[idx] = (const void *)~set->_marker;
1037 set->_count--;
1038 CF_OBJC_KVO_DIDCHANGE(set, oldkey);
1039 if (cb->release) {
1040 INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), allocator, oldkey, set->_context);
1041 }
1042 }
1043 }
1044 // XXX need memset here
1045 for (idx = 0; idx < nbuckets; idx++) {
1046 keys[idx] = (const void *)set->_marker;
1047 }
1048 set->_count = 0;
1049 set->_deletes = 0;
1050 if ((__kCFSetMutable == __CFSetGetType(set)) && (512 < set->_capacity)) {
1051 __CFSetGrow(set, 256);
1052 }
1053 }
1054