]> git.saurik.com Git - apple/cf.git/blob - CFBitVector.c
CF-476.17.tar.gz
[apple/cf.git] / CFBitVector.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* CFBitVector.c
24 Copyright 1998-2002, Apple, Inc. All rights reserved.
25 Responsibility: Christopher Kane
26 */
27
28 #include <CoreFoundation/CFBitVector.h>
29 #include "CFInternal.h"
30 #include <string.h>
31
32 /* The bucket type must be unsigned, at least one byte in size, and
33 a power of 2 in number of bits; bits are numbered from 0 from left
34 to right (bit 0 is the most significant) */
35 typedef uint8_t __CFBitVectorBucket;
36
37 enum {
38 __CF_BITS_PER_BYTE = 8
39 };
40
41 enum {
42 __CF_BITS_PER_BUCKET = (__CF_BITS_PER_BYTE * sizeof(__CFBitVectorBucket))
43 };
44
45 CF_INLINE CFIndex __CFBitVectorRoundUpCapacity(CFIndex capacity) {
46 return ((capacity + 63) / 64) * 64;
47 }
48
49 CF_INLINE CFIndex __CFBitVectorNumBucketsForCapacity(CFIndex capacity) {
50 return (capacity + __CF_BITS_PER_BUCKET - 1) / __CF_BITS_PER_BUCKET;
51 }
52
53 struct __CFBitVector {
54 CFRuntimeBase _base;
55 CFIndex _count; /* number of bits */
56 CFIndex _capacity; /* maximum number of bits */
57 __CFBitVectorBucket *_buckets;
58 };
59
60 CF_INLINE UInt32 __CFBitVectorMutableVariety(const void *cf) {
61 return __CFBitfieldGetValue(((const CFRuntimeBase *)cf)->_cfinfo[CF_INFO_BITS], 3, 2);
62 }
63
64 CF_INLINE void __CFBitVectorSetMutableVariety(void *cf, UInt32 v) {
65 __CFBitfieldSetValue(((CFRuntimeBase *)cf)->_cfinfo[CF_INFO_BITS], 3, 2, v);
66 }
67
68 CF_INLINE UInt32 __CFBitVectorMutableVarietyFromFlags(UInt32 flags) {
69 return __CFBitfieldGetValue(flags, 1, 0);
70 }
71
72 // ensure that uses of these inlines are correct, bytes vs. buckets vs. bits
73 CF_INLINE CFIndex __CFBitVectorCount(CFBitVectorRef bv) {
74 return bv->_count;
75 }
76
77 CF_INLINE void __CFBitVectorSetCount(CFMutableBitVectorRef bv, CFIndex v) {
78 bv->_count = v;
79 }
80
81 CF_INLINE CFIndex __CFBitVectorCapacity(CFBitVectorRef bv) {
82 return bv->_capacity;
83 }
84
85 CF_INLINE void __CFBitVectorSetCapacity(CFMutableBitVectorRef bv, CFIndex v) {
86 bv->_capacity = v;
87 }
88
89 CF_INLINE CFIndex __CFBitVectorNumBucketsUsed(CFBitVectorRef bv) {
90 return bv->_count / __CF_BITS_PER_BUCKET + 1;
91 }
92
93 CF_INLINE void __CFBitVectorSetNumBucketsUsed(CFMutableBitVectorRef bv, CFIndex v) {
94 /* for a CFBitVector, _bucketsUsed == _count / __CF_BITS_PER_BUCKET + 1 */
95 }
96
97 CF_INLINE CFIndex __CFBitVectorNumBuckets(CFBitVectorRef bv) {
98 return bv->_capacity / __CF_BITS_PER_BUCKET + 1;
99 }
100
101 CF_INLINE void __CFBitVectorSetNumBuckets(CFMutableBitVectorRef bv, CFIndex v) {
102 /* for a CFBitVector, _bucketsNum == _capacity / __CF_BITS_PER_BUCKET + 1 */
103 }
104
105 static __CFBitVectorBucket __CFBitBucketMask(CFIndex bottomBit, CFIndex topBit) {
106 CFIndex shiftL = __CF_BITS_PER_BUCKET - topBit + bottomBit - 1;
107 __CFBitVectorBucket result = ~(__CFBitVectorBucket)0;
108 result = (result << shiftL);
109 result = (result >> bottomBit);
110 return result;
111 }
112
113 CF_INLINE CFBit __CFBitVectorBit(__CFBitVectorBucket *buckets, CFIndex idx) {
114 CFIndex bucketIdx = idx / __CF_BITS_PER_BUCKET;
115 CFIndex bitOfBucket = idx & (__CF_BITS_PER_BUCKET - 1);
116 return (buckets[bucketIdx] >> (__CF_BITS_PER_BUCKET - 1 - bitOfBucket)) & 0x1;
117 }
118
119 CF_INLINE void __CFSetBitVectorBit(__CFBitVectorBucket *buckets, CFIndex idx, CFBit value) {
120 CFIndex bucketIdx = idx / __CF_BITS_PER_BUCKET;
121 CFIndex bitOfBucket = idx & (__CF_BITS_PER_BUCKET - 1);
122 if (value) {
123 buckets[bucketIdx] |= (1 << (__CF_BITS_PER_BUCKET - 1 - bitOfBucket));
124 } else {
125 buckets[bucketIdx] &= ~(1 << (__CF_BITS_PER_BUCKET - 1 - bitOfBucket));
126 }
127 }
128
129 CF_INLINE void __CFFlipBitVectorBit(__CFBitVectorBucket *buckets, CFIndex idx) {
130 CFIndex bucketIdx = idx / __CF_BITS_PER_BUCKET;
131 CFIndex bitOfBucket = idx & (__CF_BITS_PER_BUCKET - 1);
132 buckets[bucketIdx] ^= (1 << (__CF_BITS_PER_BUCKET - 1 - bitOfBucket));
133 }
134
135 #if defined(DEBUG)
136 CF_INLINE void __CFBitVectorValidateRange(CFBitVectorRef bv, CFRange range, const char *func) {
137 CFAssert2(0 <= range.location && range.location < __CFBitVectorCount(bv), __kCFLogAssertion, "%s(): range.location index (%d) out of bounds", func, range.location);
138 CFAssert2(0 <= range.length, __kCFLogAssertion, "%s(): range.length (%d) cannot be less than zero", func, range.length);
139 CFAssert2(range.location + range.length <= __CFBitVectorCount(bv), __kCFLogAssertion, "%s(): ending index (%d) out of bounds", func, range.location + range.length);
140 }
141 #else
142 #define __CFBitVectorValidateRange(bf,r,f)
143 #endif
144
145 static Boolean __CFBitVectorEqual(CFTypeRef cf1, CFTypeRef cf2) {
146 CFBitVectorRef bv1 = (CFBitVectorRef)cf1;
147 CFBitVectorRef bv2 = (CFBitVectorRef)cf2;
148 CFIndex idx, cnt;
149 cnt = __CFBitVectorCount(bv1);
150 if (cnt != __CFBitVectorCount(bv2)) return false;
151 if (0 == cnt) return true;
152 for (idx = 0; idx < (cnt / __CF_BITS_PER_BUCKET) + 1; idx++) {
153 __CFBitVectorBucket val1 = bv1->_buckets[idx];
154 __CFBitVectorBucket val2 = bv2->_buckets[idx];
155 if (val1 != val2) return false;
156 }
157 return true;
158 }
159
160 static CFHashCode __CFBitVectorHash(CFTypeRef cf) {
161 CFBitVectorRef bv = (CFBitVectorRef)cf;
162 return __CFBitVectorCount(bv);
163 }
164
165 static CFStringRef __CFBitVectorCopyDescription(CFTypeRef cf) {
166 CFBitVectorRef bv = (CFBitVectorRef)cf;
167 CFMutableStringRef result;
168 CFIndex idx, cnt;
169 __CFBitVectorBucket *buckets;
170 cnt = __CFBitVectorCount(bv);
171 buckets = bv->_buckets;
172 result = CFStringCreateMutable(kCFAllocatorSystemDefault, 0);
173 CFStringAppendFormat(result, NULL, CFSTR("<CFBitVector %p [%p]>{count = %u, capacity = %u, objects = (\n"), cf, CFGetAllocator(bv), cnt, __CFBitVectorCapacity(bv));
174 for (idx = 0; idx < (cnt / 64); idx++) { /* Print groups of 64 */
175 CFIndex idx2;
176 CFStringAppendFormat(result, NULL, CFSTR("\t%u : "), (idx * 64));
177 for (idx2 = 0; idx2 < 64; idx2 += 4) {
178 CFIndex bucketIdx = (idx << 6) + idx2;
179 CFStringAppendFormat(result, NULL, CFSTR("%d%d%d%d"),
180 __CFBitVectorBit(buckets, bucketIdx + 0),
181 __CFBitVectorBit(buckets, bucketIdx + 1),
182 __CFBitVectorBit(buckets, bucketIdx + 2),
183 __CFBitVectorBit(buckets, bucketIdx + 3));
184 }
185 CFStringAppend(result, CFSTR("\n"));
186 }
187 if (idx * 64 < cnt) {
188 CFStringAppendFormat(result, NULL, CFSTR("\t%u : "), (idx * 64));
189 for (idx = (idx * 64); idx < cnt; idx++) { /* Print remainder */
190 CFStringAppendFormat(result, NULL, CFSTR("%d"), __CFBitVectorBit(buckets, idx));
191 }
192 }
193 CFStringAppend(result, CFSTR("\n)}"));
194 return result;
195 }
196
197 enum {
198 kCFBitVectorImmutable = 0x0, /* unchangable and fixed capacity; default */
199 kCFBitVectorMutable = 0x1, /* changeable and variable capacity */
200 kCFBitVectorFixedMutable = 0x3 /* changeable and fixed capacity */
201 };
202
203 static void __CFBitVectorDeallocate(CFTypeRef cf) {
204 CFMutableBitVectorRef bv = (CFMutableBitVectorRef)cf;
205 CFAllocatorRef allocator = CFGetAllocator(bv);
206 if (__CFBitVectorMutableVariety(bv) == kCFBitVectorMutable) {
207 _CFAllocatorDeallocateGC(allocator, bv->_buckets);
208 }
209 }
210
211 static CFTypeID __kCFBitVectorTypeID = _kCFRuntimeNotATypeID;
212
213 static const CFRuntimeClass __CFBitVectorClass = {
214 _kCFRuntimeScannedObject,
215 "CFBitVector",
216 NULL, // init
217 NULL, // copy
218 __CFBitVectorDeallocate,
219 __CFBitVectorEqual,
220 __CFBitVectorHash,
221 NULL, //
222 __CFBitVectorCopyDescription
223 };
224
225 __private_extern__ void __CFBitVectorInitialize(void) {
226 __kCFBitVectorTypeID = _CFRuntimeRegisterClass(&__CFBitVectorClass);
227 }
228
229 CFTypeID CFBitVectorGetTypeID(void) {
230 return __kCFBitVectorTypeID;
231 }
232
233 static CFMutableBitVectorRef __CFBitVectorInit(CFAllocatorRef allocator, CFOptionFlags flags, CFIndex capacity, const uint8_t *bytes, CFIndex numBits) {
234 CFMutableBitVectorRef memory;
235 CFIndex size;
236 CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity);
237 CFAssert3(kCFBitVectorFixedMutable != __CFBitVectorMutableVarietyFromFlags(flags) || numBits <= capacity, __kCFLogAssertion, "%s(): for fixed mutable bit vectors, capacity (%d) must be greater than or equal to number of initial elements (%d)", __PRETTY_FUNCTION__, capacity, numBits);
238 CFAssert2(0 <= numBits, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numBits);
239 size = sizeof(struct __CFBitVector) - sizeof(CFRuntimeBase);
240 if (__CFBitVectorMutableVarietyFromFlags(flags) != kCFBitVectorMutable)
241 size += sizeof(__CFBitVectorBucket) * __CFBitVectorNumBucketsForCapacity(capacity);
242 memory = (CFMutableBitVectorRef)_CFRuntimeCreateInstance(allocator, __kCFBitVectorTypeID, size, NULL);
243 if (NULL == memory) {
244 return NULL;
245 }
246 switch (__CFBitVectorMutableVarietyFromFlags(flags)) {
247 case kCFBitVectorMutable:
248 __CFBitVectorSetCapacity(memory, __CFBitVectorRoundUpCapacity(1));
249 __CFBitVectorSetNumBuckets(memory, __CFBitVectorNumBucketsForCapacity(__CFBitVectorRoundUpCapacity(1)));
250 CF_WRITE_BARRIER_BASE_ASSIGN(allocator, memory, memory->_buckets, _CFAllocatorAllocateGC(allocator, __CFBitVectorNumBuckets(memory) * sizeof(__CFBitVectorBucket), 0));
251 if (__CFOASafe) __CFSetLastAllocationEventName(memory->_buckets, "CFBitVector (store)");
252 if (NULL == memory->_buckets) {
253 CFRelease(memory);
254 return NULL;
255 }
256 break;
257 case kCFBitVectorFixedMutable:
258 case kCFBitVectorImmutable:
259 /* Don't round up capacity */
260 __CFBitVectorSetCapacity(memory, capacity);
261 __CFBitVectorSetNumBuckets(memory, __CFBitVectorNumBucketsForCapacity(capacity));
262 memory->_buckets = (__CFBitVectorBucket *)((int8_t *)memory + sizeof(struct __CFBitVector));
263 break;
264 }
265 __CFBitVectorSetNumBucketsUsed(memory, numBits / __CF_BITS_PER_BUCKET + 1);
266 __CFBitVectorSetCount(memory, numBits);
267 if (bytes) {
268 /* This move is possible because bits are numbered from 0 on the left */
269 memmove(memory->_buckets, bytes, (numBits + __CF_BITS_PER_BYTE - 1) / __CF_BITS_PER_BYTE);
270 }
271 __CFBitVectorSetMutableVariety(memory, __CFBitVectorMutableVarietyFromFlags(flags));
272 return memory;
273 }
274
275 CFBitVectorRef CFBitVectorCreate(CFAllocatorRef allocator, const uint8_t *bytes, CFIndex numBits) {
276 return __CFBitVectorInit(allocator, kCFBitVectorImmutable, numBits, bytes, numBits);
277 }
278
279 CFMutableBitVectorRef CFBitVectorCreateMutable(CFAllocatorRef allocator, CFIndex capacity) {
280 return __CFBitVectorInit(allocator, (0 == capacity) ? kCFBitVectorMutable : kCFBitVectorFixedMutable, capacity, NULL, 0);
281 }
282
283 CFBitVectorRef CFBitVectorCreateCopy(CFAllocatorRef allocator, CFBitVectorRef bv) {
284 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
285 return __CFBitVectorInit(allocator, kCFBitVectorImmutable, __CFBitVectorCount(bv), (const uint8_t *)bv->_buckets, __CFBitVectorCount(bv));
286 }
287
288 CFMutableBitVectorRef CFBitVectorCreateMutableCopy(CFAllocatorRef allocator, CFIndex capacity, CFBitVectorRef bv) {
289 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
290 return __CFBitVectorInit(allocator, (0 == capacity) ? kCFBitVectorMutable : kCFBitVectorFixedMutable, capacity, (const uint8_t *)bv->_buckets, __CFBitVectorCount(bv));
291 }
292
293 CFIndex CFBitVectorGetCount(CFBitVectorRef bv) {
294 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
295 return __CFBitVectorCount(bv);
296 }
297
298 typedef __CFBitVectorBucket (*__CFInternalMapper)(__CFBitVectorBucket bucketValue, __CFBitVectorBucket bucketValueMask, void *context);
299
300 static void __CFBitVectorInternalMap(CFMutableBitVectorRef bv, CFRange range, __CFInternalMapper mapper, void *context) {
301 CFIndex bucketIdx, bitOfBucket;
302 CFIndex nBuckets;
303 __CFBitVectorBucket bucketValMask, newBucketVal;
304 if (0 == range.length) return;
305 bucketIdx = range.location / __CF_BITS_PER_BUCKET;
306 bitOfBucket = range.location & (__CF_BITS_PER_BUCKET - 1);
307 /* Follow usual pattern of ramping up to a bit bucket boundary ...*/
308 if (bitOfBucket + range.length < __CF_BITS_PER_BUCKET) {
309 bucketValMask = __CFBitBucketMask(bitOfBucket, bitOfBucket + range.length - 1);
310 range.length = 0;
311 } else {
312 bucketValMask = __CFBitBucketMask(bitOfBucket, __CF_BITS_PER_BUCKET - 1);
313 range.length -= __CF_BITS_PER_BUCKET - bitOfBucket;
314 }
315 newBucketVal = mapper(bv->_buckets[bucketIdx], bucketValMask, context);
316 bv->_buckets[bucketIdx] = (bv->_buckets[bucketIdx] & ~bucketValMask) | (newBucketVal & bucketValMask);
317 bucketIdx++;
318 /* ... clipping along with entire bit buckets ... */
319 nBuckets = range.length / __CF_BITS_PER_BUCKET;
320 range.length -= nBuckets * __CF_BITS_PER_BUCKET;
321 while (nBuckets--) {
322 newBucketVal = mapper(bv->_buckets[bucketIdx], ~0, context);
323 bv->_buckets[bucketIdx] = newBucketVal;
324 bucketIdx++;
325 }
326 /* ... and ramping down with the last fragmentary bit bucket. */
327 if (0 != range.length) {
328 bucketValMask = __CFBitBucketMask(0, range.length - 1);
329 newBucketVal = mapper(bv->_buckets[bucketIdx], bucketValMask, context);
330 bv->_buckets[bucketIdx] = (bv->_buckets[bucketIdx] & ~bucketValMask) | (newBucketVal & bucketValMask);
331 }
332 }
333
334 struct _occursContext {
335 CFBit value;
336 CFIndex count;
337 };
338
339 static __CFBitVectorBucket __CFBitVectorCountBits(__CFBitVectorBucket bucketValue, __CFBitVectorBucket bucketValueMask, struct _occursContext *context) {
340 static const __CFBitVectorBucket __CFNibbleBitCount[16] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
341 __CFBitVectorBucket val;
342 CFIndex idx;
343 val = (context->value) ? (bucketValue & bucketValueMask) : (~bucketValue & bucketValueMask);
344 for (idx = 0; idx < (CFIndex)sizeof(__CFBitVectorBucket) * 2; idx++) {
345 context->count += __CFNibbleBitCount[val & 0xF];
346 val = val >> 4;
347 }
348 return bucketValue;
349 }
350
351 CFIndex CFBitVectorGetCountOfBit(CFBitVectorRef bv, CFRange range, CFBit value) {
352 struct _occursContext context;
353 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
354 __CFBitVectorValidateRange(bv, range, __PRETTY_FUNCTION__);
355 if (0 == range.length) return 0;
356 context.value = value;
357 context.count = 0;
358 __CFBitVectorInternalMap((CFMutableBitVectorRef)bv, range, (__CFInternalMapper)__CFBitVectorCountBits, &context);
359 return context.count;
360 }
361
362 Boolean CFBitVectorContainsBit(CFBitVectorRef bv, CFRange range, CFBit value) {
363 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
364 __CFBitVectorValidateRange(bv, range, __PRETTY_FUNCTION__);
365 return (CFBitVectorGetCountOfBit(bv, range, value) != 0) ? true : false;
366 }
367
368 CFBit CFBitVectorGetBitAtIndex(CFBitVectorRef bv, CFIndex idx) {
369 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
370 CFAssert2(0 <= idx && idx < __CFBitVectorCount(bv), __kCFLogAssertion, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__, idx);
371 return __CFBitVectorBit(bv->_buckets, idx);
372 }
373
374 struct _getBitsContext {
375 uint8_t *curByte;
376 CFIndex initBits; /* Bits to extract off the front for the prev. byte */
377 CFIndex totalBits; /* This is for stopping at the end */
378 bool ignoreFirstInitBits;
379 };
380
381 static __CFBitVectorBucket __CFBitVectorGetBits(__CFBitVectorBucket bucketValue, __CFBitVectorBucket bucketValueMask, void *ctx) {
382 struct _getBitsContext *context = (struct _getBitsContext *)ctx;
383 __CFBitVectorBucket val;
384 CFIndex nBits;
385 val = bucketValue & bucketValueMask;
386 nBits = __CFMin(__CF_BITS_PER_BUCKET - context->initBits, context->totalBits);
387 /* First initBits bits go in *curByte ... */
388 if (0 < context->initBits) {
389 if (!context->ignoreFirstInitBits) {
390 *context->curByte |= (uint8_t)(val >> (__CF_BITS_PER_BUCKET - context->initBits));
391 context->curByte++;
392 context->totalBits -= context->initBits;
393 context->ignoreFirstInitBits = false;
394 }
395 val <<= context->initBits;
396 }
397 /* ... then next groups of __CF_BITS_PER_BYTE go in *curByte ... */
398 while (__CF_BITS_PER_BYTE <= nBits) {
399 *context->curByte = (uint8_t)(val >> (__CF_BITS_PER_BUCKET - __CF_BITS_PER_BYTE));
400 context->curByte++;
401 context->totalBits -= context->initBits;
402 nBits -= __CF_BITS_PER_BYTE;
403 val <<= __CF_BITS_PER_BYTE;
404 }
405 /* ... then remaining bits go in *curByte */
406 if (0 < nBits) {
407 *context->curByte = (uint8_t)(val >> (__CF_BITS_PER_BUCKET - __CF_BITS_PER_BYTE));
408 context->totalBits -= nBits;
409 }
410 return bucketValue;
411 }
412
413 void CFBitVectorGetBits(CFBitVectorRef bv, CFRange range, uint8_t *bytes) {
414 struct _getBitsContext context;
415 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
416 __CFBitVectorValidateRange(bv, range, __PRETTY_FUNCTION__);
417 if (0 == range.length) return;
418 context.curByte = bytes;
419 context.initBits = range.location & (__CF_BITS_PER_BUCKET - 1);
420 context.totalBits = range.length;
421 context.ignoreFirstInitBits = true;
422 __CFBitVectorInternalMap((CFMutableBitVectorRef)bv, range, __CFBitVectorGetBits, &context);
423 }
424
425 CFIndex CFBitVectorGetFirstIndexOfBit(CFBitVectorRef bv, CFRange range, CFBit value) {
426 CFIndex idx;
427 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
428 __CFBitVectorValidateRange(bv, range, __PRETTY_FUNCTION__);
429 for (idx = 0; idx < range.length; idx++) {
430 if (value == CFBitVectorGetBitAtIndex(bv, range.location + idx)) {
431 return range.location + idx;
432 }
433 }
434 return kCFNotFound;
435 }
436
437 CFIndex CFBitVectorGetLastIndexOfBit(CFBitVectorRef bv, CFRange range, CFBit value) {
438 CFIndex idx;
439 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
440 __CFBitVectorValidateRange(bv, range, __PRETTY_FUNCTION__);
441 for (idx = range.length; idx--;) {
442 if (value == CFBitVectorGetBitAtIndex(bv, range.location + idx)) {
443 return range.location + idx;
444 }
445 }
446 return kCFNotFound;
447 }
448
449 static void __CFBitVectorGrow(CFMutableBitVectorRef bv, CFIndex numNewValues) {
450 CFIndex oldCount = __CFBitVectorCount(bv);
451 CFIndex capacity = __CFBitVectorRoundUpCapacity(oldCount + numNewValues);
452 CFAllocatorRef allocator = CFGetAllocator(bv);
453 __CFBitVectorSetCapacity(bv, capacity);
454 __CFBitVectorSetNumBuckets(bv, __CFBitVectorNumBucketsForCapacity(capacity));
455 CF_WRITE_BARRIER_BASE_ASSIGN(allocator, bv, bv->_buckets, CFAllocatorReallocate(allocator, bv->_buckets, __CFBitVectorNumBuckets(bv) * sizeof(__CFBitVectorBucket), 0));
456 if (__CFOASafe) __CFSetLastAllocationEventName(bv->_buckets, "CFBitVector (store)");
457 if (NULL == bv->_buckets) HALT;
458 }
459
460 static __CFBitVectorBucket __CFBitVectorZeroBits(__CFBitVectorBucket bucketValue, __CFBitVectorBucket bucketValueMask, void *context) {
461 return 0;
462 }
463
464 static __CFBitVectorBucket __CFBitVectorOneBits(__CFBitVectorBucket bucketValue, __CFBitVectorBucket bucketValueMask, void *context) {
465 return ~(__CFBitVectorBucket)0;
466 }
467
468 void CFBitVectorSetCount(CFMutableBitVectorRef bv, CFIndex count) {
469 CFIndex cnt;
470 CFAssert1(__CFBitVectorMutableVariety(bv) == kCFBitVectorMutable || __CFBitVectorMutableVariety(bv) == kCFBitVectorFixedMutable, __kCFLogAssertion, "%s(): bit vector is immutable", __PRETTY_FUNCTION__);
471 cnt = __CFBitVectorCount(bv);
472 switch (__CFBitVectorMutableVariety(bv)) {
473 case kCFBitVectorMutable:
474 if (cnt < count) {
475 __CFBitVectorGrow(bv, count - cnt);
476 }
477 break;
478 case kCFBitVectorFixedMutable:
479 CFAssert1(count <= __CFBitVectorCapacity(bv), __kCFLogAssertion, "%s(): fixed-capacity bit vector is full", __PRETTY_FUNCTION__);
480 break;
481 }
482 if (cnt < count) {
483 CFRange range = CFRangeMake(cnt, count - cnt);
484 __CFBitVectorInternalMap(bv, range, __CFBitVectorZeroBits, NULL);
485 }
486 __CFBitVectorSetNumBucketsUsed(bv, count / __CF_BITS_PER_BUCKET + 1);
487 __CFBitVectorSetCount(bv, count);
488 }
489
490 void CFBitVectorFlipBitAtIndex(CFMutableBitVectorRef bv, CFIndex idx) {
491 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
492 CFAssert2(0 <= idx && idx < __CFBitVectorCount(bv), __kCFLogAssertion, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__, idx);
493 CFAssert1(__CFBitVectorMutableVariety(bv) == kCFBitVectorMutable || __CFBitVectorMutableVariety(bv) == kCFBitVectorFixedMutable, __kCFLogAssertion, "%s(): bit vector is immutable", __PRETTY_FUNCTION__);
494 __CFFlipBitVectorBit(bv->_buckets, idx);
495 }
496
497 static __CFBitVectorBucket __CFBitVectorFlipBits(__CFBitVectorBucket bucketValue, __CFBitVectorBucket bucketValueMask, void *context) {
498 return (~(__CFBitVectorBucket)0) ^ bucketValue;
499 }
500
501 void CFBitVectorFlipBits(CFMutableBitVectorRef bv, CFRange range) {
502 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
503 __CFBitVectorValidateRange(bv, range, __PRETTY_FUNCTION__);
504 CFAssert1(__CFBitVectorMutableVariety(bv) == kCFBitVectorMutable || __CFBitVectorMutableVariety(bv) == kCFBitVectorFixedMutable, __kCFLogAssertion, "%s(): bit vector is immutable", __PRETTY_FUNCTION__);
505 if (0 == range.length) return;
506 __CFBitVectorInternalMap(bv, range, __CFBitVectorFlipBits, NULL);
507 }
508
509 void CFBitVectorSetBitAtIndex(CFMutableBitVectorRef bv, CFIndex idx, CFBit value) {
510 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
511 CFAssert2(0 <= idx && idx < __CFBitVectorCount(bv), __kCFLogAssertion, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__, idx);
512 CFAssert1(__CFBitVectorMutableVariety(bv) == kCFBitVectorMutable || __CFBitVectorMutableVariety(bv) == kCFBitVectorFixedMutable, __kCFLogAssertion, "%s(): bit vector is immutable", __PRETTY_FUNCTION__);
513 __CFSetBitVectorBit(bv->_buckets, idx, value);
514 }
515
516 void CFBitVectorSetBits(CFMutableBitVectorRef bv, CFRange range, CFBit value) {
517 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
518 __CFBitVectorValidateRange(bv, range, __PRETTY_FUNCTION__);
519 CFAssert1(__CFBitVectorMutableVariety(bv) == kCFBitVectorMutable || __CFBitVectorMutableVariety(bv) == kCFBitVectorFixedMutable, __kCFLogAssertion, "%s(): bit vector is immutable", __PRETTY_FUNCTION__);
520 if (0 == range.length) return;
521 if (value) {
522 __CFBitVectorInternalMap(bv, range, __CFBitVectorOneBits, NULL);
523 } else {
524 __CFBitVectorInternalMap(bv, range, __CFBitVectorZeroBits, NULL);
525 }
526 }
527
528 void CFBitVectorSetAllBits(CFMutableBitVectorRef bv, CFBit value) {
529 CFIndex nBuckets, leftover;
530 __CFGenericValidateType(bv, __kCFBitVectorTypeID);
531 CFAssert1(__CFBitVectorMutableVariety(bv) == kCFBitVectorMutable || __CFBitVectorMutableVariety(bv) == kCFBitVectorFixedMutable, __kCFLogAssertion, "%s(): bit vector is immutable", __PRETTY_FUNCTION__);
532 nBuckets = __CFBitVectorCount(bv) / __CF_BITS_PER_BUCKET;
533 leftover = __CFBitVectorCount(bv) - nBuckets * __CF_BITS_PER_BUCKET;
534 if (0 < leftover) {
535 CFRange range = CFRangeMake(nBuckets * __CF_BITS_PER_BUCKET, leftover);
536 if (value) {
537 __CFBitVectorInternalMap(bv, range, __CFBitVectorOneBits, NULL);
538 } else {
539 __CFBitVectorInternalMap(bv, range, __CFBitVectorZeroBits, NULL);
540 }
541 }
542 memset(bv->_buckets, (value ? ~0 : 0), nBuckets);
543 }
544
545 #undef __CFBitVectorValidateRange
546