2 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 Copyright (c) 1998-2014, Apple Inc. All rights reserved.
26 Responsibility: Christopher Kane
29 #include <CoreFoundation/CFBitVector.h>
30 #include "CFInternal.h"
33 /* The bucket type must be unsigned, at least one byte in size, and
34 a power of 2 in number of bits; bits are numbered from 0 from left
35 to right (bit 0 is the most significant) */
36 typedef uint8_t __CFBitVectorBucket
;
38 #define __CFBITVECTORBUCKET_SIZE 8
39 #define __CF_BITS_PER_BYTE 8
42 __CF_BITS_PER_BYTE_MASK
= 0x07
46 __CF_BITS_PER_BUCKET
= (__CF_BITS_PER_BYTE
* sizeof(__CFBitVectorBucket
)),
47 __CF_BITS_PER_BUCKET_MASK
= 0x07 // __CF_BITS_PER_BYTE_MASK * lg2(sizeof(__CFBitVectorBucket))
50 CF_INLINE CFIndex
__CFBitVectorRoundUpCapacity(CFIndex capacity
) {
51 if (0 == capacity
) capacity
= 1;
52 return ((capacity
+ 63) / 64) * 64;
55 CF_INLINE CFIndex
__CFBitVectorNumBucketsForCapacity(CFIndex capacity
) {
56 return capacity
/ __CF_BITS_PER_BUCKET
+ ((capacity
& __CF_BITS_PER_BUCKET_MASK
) ? 1 : 0);
59 struct __CFBitVector
{
61 CFIndex _count
; /* number of bits */
62 CFIndex _capacity
; /* maximum number of bits */
63 __CFBitVectorBucket
*_buckets
;
66 CF_INLINE UInt32
__CFBitVectorMutableVariety(const void *cf
) {
67 return __CFBitfieldGetValue(((const CFRuntimeBase
*)cf
)->_cfinfo
[CF_INFO_BITS
], 3, 2);
70 CF_INLINE
void __CFBitVectorSetMutableVariety(void *cf
, UInt32 v
) {
71 __CFBitfieldSetValue(((CFRuntimeBase
*)cf
)->_cfinfo
[CF_INFO_BITS
], 3, 2, v
);
74 CF_INLINE UInt32
__CFBitVectorMutableVarietyFromFlags(UInt32 flags
) {
75 return __CFBitfieldGetValue(flags
, 1, 0);
78 // ensure that uses of these inlines are correct, bytes vs. buckets vs. bits
79 CF_INLINE CFIndex
__CFBitVectorCount(CFBitVectorRef bv
) {
83 CF_INLINE
void __CFBitVectorSetCount(CFMutableBitVectorRef bv
, CFIndex v
) {
87 CF_INLINE CFIndex
__CFBitVectorCapacity(CFBitVectorRef bv
) {
91 CF_INLINE
void __CFBitVectorSetCapacity(CFMutableBitVectorRef bv
, CFIndex v
) {
95 CF_INLINE
void __CFBitVectorSetNumBucketsUsed(CFMutableBitVectorRef bv
, CFIndex v
) {
96 /* for a CFBitVector, _bucketsUsed == _count / __CF_BITS_PER_BUCKET + 1 */
99 CF_INLINE CFIndex
__CFBitVectorNumBuckets(CFBitVectorRef bv
) {
100 return bv
->_capacity
/ __CF_BITS_PER_BUCKET
+ 1;
103 CF_INLINE
void __CFBitVectorSetNumBuckets(CFMutableBitVectorRef bv
, CFIndex v
) {
104 /* for a CFBitVector, _bucketsNum == _capacity / __CF_BITS_PER_BUCKET + 1 */
107 static __CFBitVectorBucket
__CFBitBucketMask(CFIndex bottomBit
, CFIndex topBit
) {
108 CFIndex shiftL
= __CF_BITS_PER_BUCKET
- topBit
+ bottomBit
- 1;
109 __CFBitVectorBucket result
= ~(__CFBitVectorBucket
)0;
110 result
= (result
<< shiftL
);
111 result
= (result
>> bottomBit
);
115 CF_INLINE CFBit
__CFBitVectorBit(__CFBitVectorBucket
*buckets
, CFIndex idx
) {
116 CFIndex bucketIdx
= idx
/ __CF_BITS_PER_BUCKET
;
117 CFIndex bitOfBucket
= idx
& (__CF_BITS_PER_BUCKET
- 1);
118 return (buckets
[bucketIdx
] >> (__CF_BITS_PER_BUCKET
- 1 - bitOfBucket
)) & 0x1;
121 CF_INLINE
void __CFSetBitVectorBit(__CFBitVectorBucket
*buckets
, CFIndex idx
, CFBit value
) {
122 CFIndex bucketIdx
= idx
/ __CF_BITS_PER_BUCKET
;
123 CFIndex bitOfBucket
= idx
& (__CF_BITS_PER_BUCKET
- 1);
125 buckets
[bucketIdx
] |= (1 << (__CF_BITS_PER_BUCKET
- 1 - bitOfBucket
));
127 buckets
[bucketIdx
] &= ~(1 << (__CF_BITS_PER_BUCKET
- 1 - bitOfBucket
));
131 CF_INLINE
void __CFFlipBitVectorBit(__CFBitVectorBucket
*buckets
, CFIndex idx
) {
132 CFIndex bucketIdx
= idx
/ __CF_BITS_PER_BUCKET
;
133 CFIndex bitOfBucket
= idx
& (__CF_BITS_PER_BUCKET
- 1);
134 buckets
[bucketIdx
] ^= (1 << (__CF_BITS_PER_BUCKET
- 1 - bitOfBucket
));
138 CF_INLINE
void __CFBitVectorValidateRange(CFBitVectorRef bv
, CFRange range
, const char *func
) {
139 CFAssert2(0 <= range
.location
&& range
.location
< __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): range.location index (%d) out of bounds", func
, range
.location
);
140 CFAssert2(0 <= range
.length
, __kCFLogAssertion
, "%s(): range.length (%d) cannot be less than zero", func
, range
.length
);
141 CFAssert2(range
.location
+ range
.length
<= __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): ending index (%d) out of bounds", func
, range
.location
+ range
.length
);
144 #define __CFBitVectorValidateRange(bf,r,f)
147 static Boolean
__CFBitVectorEqual(CFTypeRef cf1
, CFTypeRef cf2
) {
148 CFBitVectorRef bv1
= (CFBitVectorRef
)cf1
;
149 CFBitVectorRef bv2
= (CFBitVectorRef
)cf2
;
151 cnt
= __CFBitVectorCount(bv1
);
152 if (cnt
!= __CFBitVectorCount(bv2
)) return false;
153 if (0 == cnt
) return true;
154 for (idx
= 0; idx
< (cnt
/ __CF_BITS_PER_BUCKET
) + 1; idx
++) {
155 __CFBitVectorBucket val1
= bv1
->_buckets
[idx
];
156 __CFBitVectorBucket val2
= bv2
->_buckets
[idx
];
157 if (val1
!= val2
) return false;
162 static CFHashCode
__CFBitVectorHash(CFTypeRef cf
) {
163 CFBitVectorRef bv
= (CFBitVectorRef
)cf
;
164 return __CFBitVectorCount(bv
);
167 static CFStringRef
__CFBitVectorCopyDescription(CFTypeRef cf
) {
168 CFBitVectorRef bv
= (CFBitVectorRef
)cf
;
169 CFMutableStringRef result
;
171 __CFBitVectorBucket
*buckets
;
172 cnt
= __CFBitVectorCount(bv
);
173 buckets
= bv
->_buckets
;
174 result
= CFStringCreateMutable(kCFAllocatorSystemDefault
, 0);
175 CFStringAppendFormat(result
, NULL
, CFSTR("<CFBitVector %p [%p]>{count = %lu, capacity = %lu, objects = (\n"), cf
, CFGetAllocator(bv
), (unsigned long)cnt
, __CFBitVectorCapacity(bv
));
176 for (idx
= 0; idx
< (cnt
/ 64); idx
++) { /* Print groups of 64 */
178 CFStringAppendFormat(result
, NULL
, CFSTR("\t%lu : "), (unsigned long)(idx
* 64));
179 for (idx2
= 0; idx2
< 64; idx2
+= 4) {
180 CFIndex bucketIdx
= (idx
<< 6) + idx2
;
181 CFStringAppendFormat(result
, NULL
, CFSTR("%u%u%u%u"),
182 (unsigned int)__CFBitVectorBit(buckets
, bucketIdx
+ 0),
183 (unsigned int)__CFBitVectorBit(buckets
, bucketIdx
+ 1),
184 (unsigned int)__CFBitVectorBit(buckets
, bucketIdx
+ 2),
185 (unsigned int)__CFBitVectorBit(buckets
, bucketIdx
+ 3));
187 CFStringAppend(result
, CFSTR("\n"));
189 if (idx
* 64 < cnt
) {
190 CFStringAppendFormat(result
, NULL
, CFSTR("\t%lu : "), (unsigned long)(idx
* 64));
191 for (idx
= (idx
* 64); idx
< cnt
; idx
++) { /* Print remainder */
192 CFStringAppendFormat(result
, NULL
, CFSTR("%u"), (unsigned int)__CFBitVectorBit(buckets
, idx
));
195 CFStringAppend(result
, CFSTR("\n)}"));
200 kCFBitVectorImmutable
= 0x0, /* unchangable and fixed capacity; default */
201 kCFBitVectorMutable
= 0x1, /* changeable and variable capacity */
204 static void __CFBitVectorDeallocate(CFTypeRef cf
) {
205 CFMutableBitVectorRef bv
= (CFMutableBitVectorRef
)cf
;
206 CFAllocatorRef allocator
= CFGetAllocator(bv
);
207 if (bv
->_buckets
) _CFAllocatorDeallocateGC(allocator
, bv
->_buckets
);
210 static CFTypeID __kCFBitVectorTypeID
= _kCFRuntimeNotATypeID
;
212 static const CFRuntimeClass __CFBitVectorClass
= {
213 _kCFRuntimeScannedObject
,
217 __CFBitVectorDeallocate
,
221 __CFBitVectorCopyDescription
224 CFTypeID
CFBitVectorGetTypeID(void) {
225 static dispatch_once_t initOnce
;
226 dispatch_once(&initOnce
, ^{ __kCFBitVectorTypeID
= _CFRuntimeRegisterClass(&__CFBitVectorClass
); });
227 return __kCFBitVectorTypeID
;
230 static CFMutableBitVectorRef
__CFBitVectorInit(CFAllocatorRef allocator
, CFOptionFlags flags
, CFIndex capacity
, const uint8_t *bytes
, CFIndex numBits
) {
231 CFMutableBitVectorRef memory
;
233 CFAssert2(0 <= capacity
, __kCFLogAssertion
, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__
, capacity
);
234 CFAssert2(0 <= numBits
, __kCFLogAssertion
, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__
, numBits
);
235 size
= sizeof(struct __CFBitVector
) - sizeof(CFRuntimeBase
);
236 memory
= (CFMutableBitVectorRef
)_CFRuntimeCreateInstance(allocator
, CFBitVectorGetTypeID(), size
, NULL
);
237 if (NULL
== memory
) {
240 __CFBitVectorSetCapacity(memory
, __CFBitVectorRoundUpCapacity(numBits
));
241 __CFBitVectorSetNumBuckets(memory
, __CFBitVectorNumBucketsForCapacity(__CFBitVectorRoundUpCapacity(numBits
)));
242 __CFAssignWithWriteBarrier((void **)&memory
->_buckets
, _CFAllocatorAllocateGC(allocator
, __CFBitVectorNumBuckets(memory
) * sizeof(__CFBitVectorBucket
), 0));
243 if (__CFOASafe
) __CFSetLastAllocationEventName(memory
->_buckets
, "CFBitVector (store)");
244 if (NULL
== memory
->_buckets
) {
248 memset(memory
->_buckets
, 0, __CFBitVectorNumBuckets(memory
) * sizeof(__CFBitVectorBucket
));
249 __CFBitVectorSetNumBucketsUsed(memory
, numBits
/ __CF_BITS_PER_BUCKET
+ 1);
250 __CFBitVectorSetCount(memory
, numBits
);
252 /* This move is possible because bits are numbered from 0 on the left */
253 memmove(memory
->_buckets
, bytes
, numBits
/ __CF_BITS_PER_BYTE
+ (numBits
& __CF_BITS_PER_BYTE_MASK
? 1 : 0));
255 __CFBitVectorSetMutableVariety(memory
, __CFBitVectorMutableVarietyFromFlags(flags
));
259 CFBitVectorRef
CFBitVectorCreate(CFAllocatorRef allocator
, const uint8_t *bytes
, CFIndex numBits
) {
260 return __CFBitVectorInit(allocator
, kCFBitVectorImmutable
, numBits
, bytes
, numBits
);
263 CFMutableBitVectorRef
CFBitVectorCreateMutable(CFAllocatorRef allocator
, CFIndex capacity
) {
264 return __CFBitVectorInit(allocator
, kCFBitVectorMutable
, capacity
, NULL
, 0);
267 CFBitVectorRef
CFBitVectorCreateCopy(CFAllocatorRef allocator
, CFBitVectorRef bv
) {
268 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
269 return __CFBitVectorInit(allocator
, kCFBitVectorImmutable
, __CFBitVectorCount(bv
), (const uint8_t *)bv
->_buckets
, __CFBitVectorCount(bv
));
272 CFMutableBitVectorRef
CFBitVectorCreateMutableCopy(CFAllocatorRef allocator
, CFIndex capacity
, CFBitVectorRef bv
) {
273 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
274 return __CFBitVectorInit(allocator
, kCFBitVectorMutable
, capacity
, (const uint8_t *)bv
->_buckets
, __CFBitVectorCount(bv
));
277 CFIndex
CFBitVectorGetCount(CFBitVectorRef bv
) {
278 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
279 return __CFBitVectorCount(bv
);
282 typedef __CFBitVectorBucket (*__CFInternalMapper
)(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *context
);
284 static void __CFBitVectorInternalMap(CFMutableBitVectorRef bv
, CFRange range
, __CFInternalMapper mapper
, void *context
) {
285 CFIndex bucketIdx
, bitOfBucket
;
287 __CFBitVectorBucket bucketValMask
, newBucketVal
;
288 if (0 == range
.length
) return;
289 bucketIdx
= range
.location
/ __CF_BITS_PER_BUCKET
;
290 bitOfBucket
= range
.location
& (__CF_BITS_PER_BUCKET
- 1);
291 /* Follow usual pattern of ramping up to a bit bucket boundary ...*/
292 if (bitOfBucket
+ range
.length
< __CF_BITS_PER_BUCKET
) {
293 bucketValMask
= __CFBitBucketMask(bitOfBucket
, bitOfBucket
+ range
.length
- 1);
296 bucketValMask
= __CFBitBucketMask(bitOfBucket
, __CF_BITS_PER_BUCKET
- 1);
297 range
.length
-= __CF_BITS_PER_BUCKET
- bitOfBucket
;
299 newBucketVal
= mapper(bv
->_buckets
[bucketIdx
], bucketValMask
, context
);
300 bv
->_buckets
[bucketIdx
] = (bv
->_buckets
[bucketIdx
] & ~bucketValMask
) | (newBucketVal
& bucketValMask
);
302 /* ... clipping along with entire bit buckets ... */
303 nBuckets
= range
.length
/ __CF_BITS_PER_BUCKET
;
304 range
.length
-= nBuckets
* __CF_BITS_PER_BUCKET
;
306 newBucketVal
= mapper(bv
->_buckets
[bucketIdx
], ~0, context
);
307 bv
->_buckets
[bucketIdx
] = newBucketVal
;
310 /* ... and ramping down with the last fragmentary bit bucket. */
311 if (0 != range
.length
) {
312 bucketValMask
= __CFBitBucketMask(0, range
.length
- 1);
313 newBucketVal
= mapper(bv
->_buckets
[bucketIdx
], bucketValMask
, context
);
314 bv
->_buckets
[bucketIdx
] = (bv
->_buckets
[bucketIdx
] & ~bucketValMask
) | (newBucketVal
& bucketValMask
);
318 struct _occursContext
{
323 static __CFBitVectorBucket
__CFBitVectorCountBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, struct _occursContext
*context
) {
324 static const __CFBitVectorBucket __CFNibbleBitCount
[16] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
325 __CFBitVectorBucket val
;
327 val
= (context
->value
) ? (bucketValue
& bucketValueMask
) : (~bucketValue
& bucketValueMask
);
328 for (idx
= 0; idx
< (CFIndex
)sizeof(__CFBitVectorBucket
) * 2; idx
++) {
329 context
->count
+= __CFNibbleBitCount
[val
& 0xF];
335 CFIndex
CFBitVectorGetCountOfBit(CFBitVectorRef bv
, CFRange range
, CFBit value
) {
336 struct _occursContext context
;
337 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
338 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
339 if (0 == range
.length
) return 0;
340 context
.value
= value
;
342 __CFBitVectorInternalMap((CFMutableBitVectorRef
)bv
, range
, (__CFInternalMapper
)__CFBitVectorCountBits
, &context
);
343 return context
.count
;
346 Boolean
CFBitVectorContainsBit(CFBitVectorRef bv
, CFRange range
, CFBit value
) {
347 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
348 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
349 return (CFBitVectorGetCountOfBit(bv
, range
, value
) != 0) ? true : false;
352 CFBit
CFBitVectorGetBitAtIndex(CFBitVectorRef bv
, CFIndex idx
) {
353 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
354 CFAssert2(0 <= idx
&& idx
< __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__
, idx
);
355 return __CFBitVectorBit(bv
->_buckets
, idx
);
358 struct _getBitsContext
{
360 CFIndex initBits
; /* Bits to extract off the front for the prev. byte */
361 CFIndex totalBits
; /* This is for stopping at the end */
362 bool ignoreFirstInitBits
;
365 static __CFBitVectorBucket
__CFBitVectorGetBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *ctx
) {
366 struct _getBitsContext
*context
= (struct _getBitsContext
*)ctx
;
367 __CFBitVectorBucket val
;
369 val
= bucketValue
& bucketValueMask
;
370 nBits
= __CFMin(__CF_BITS_PER_BUCKET
- context
->initBits
, context
->totalBits
);
371 /* First initBits bits go in *curByte ... */
372 if (0 < context
->initBits
) {
373 if (!context
->ignoreFirstInitBits
) {
374 *context
->curByte
|= (uint8_t)(val
>> (__CF_BITS_PER_BUCKET
- context
->initBits
));
376 context
->totalBits
-= context
->initBits
;
377 context
->ignoreFirstInitBits
= false;
379 val
<<= context
->initBits
;
381 /* ... then next groups of __CF_BITS_PER_BYTE go in *curByte ... */
382 while (__CF_BITS_PER_BYTE
<= nBits
) {
383 *context
->curByte
= (uint8_t)(val
>> (__CF_BITS_PER_BUCKET
- __CF_BITS_PER_BYTE
));
385 context
->totalBits
-= context
->initBits
;
386 nBits
-= __CF_BITS_PER_BYTE
;
387 #if __CFBITVECTORBUCKET_SIZE > __CF_BITS_PER_BYTE
388 val
<<= __CF_BITS_PER_BYTE
;
393 /* ... then remaining bits go in *curByte */
395 *context
->curByte
= (uint8_t)(val
>> (__CF_BITS_PER_BUCKET
- __CF_BITS_PER_BYTE
));
396 context
->totalBits
-= nBits
;
401 void CFBitVectorGetBits(CFBitVectorRef bv
, CFRange range
, uint8_t *bytes
) {
402 struct _getBitsContext context
;
403 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
404 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
405 if (0 == range
.length
) return;
406 context
.curByte
= bytes
;
407 context
.initBits
= range
.location
& (__CF_BITS_PER_BUCKET
- 1);
408 context
.totalBits
= range
.length
;
409 context
.ignoreFirstInitBits
= true;
410 __CFBitVectorInternalMap((CFMutableBitVectorRef
)bv
, range
, __CFBitVectorGetBits
, &context
);
413 CFIndex
CFBitVectorGetFirstIndexOfBit(CFBitVectorRef bv
, CFRange range
, CFBit value
) {
415 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
416 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
417 for (idx
= 0; idx
< range
.length
; idx
++) {
418 if (value
== CFBitVectorGetBitAtIndex(bv
, range
.location
+ idx
)) {
419 return range
.location
+ idx
;
425 CFIndex
CFBitVectorGetLastIndexOfBit(CFBitVectorRef bv
, CFRange range
, CFBit value
) {
427 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
428 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
429 for (idx
= range
.length
; idx
--;) {
430 if (value
== CFBitVectorGetBitAtIndex(bv
, range
.location
+ idx
)) {
431 return range
.location
+ idx
;
437 static void __CFBitVectorGrow(CFMutableBitVectorRef bv
, CFIndex numNewValues
) {
438 CFIndex oldCount
= __CFBitVectorCount(bv
);
439 CFIndex capacity
= __CFBitVectorRoundUpCapacity(oldCount
+ numNewValues
);
440 CFAllocatorRef allocator
= CFGetAllocator(bv
);
441 __CFBitVectorSetCapacity(bv
, capacity
);
442 __CFBitVectorSetNumBuckets(bv
, __CFBitVectorNumBucketsForCapacity(capacity
));
443 __CFAssignWithWriteBarrier((void **)&bv
->_buckets
, CFAllocatorReallocate(allocator
, bv
->_buckets
, __CFBitVectorNumBuckets(bv
) * sizeof(__CFBitVectorBucket
), 0));
444 if (__CFOASafe
) __CFSetLastAllocationEventName(bv
->_buckets
, "CFBitVector (store)");
445 if (NULL
== bv
->_buckets
) HALT
;
448 static __CFBitVectorBucket
__CFBitVectorZeroBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *context
) {
452 static __CFBitVectorBucket
__CFBitVectorOneBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *context
) {
453 return ~(__CFBitVectorBucket
)0;
456 void CFBitVectorSetCount(CFMutableBitVectorRef bv
, CFIndex count
) {
458 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
459 cnt
= __CFBitVectorCount(bv
);
460 switch (__CFBitVectorMutableVariety(bv
)) {
461 case kCFBitVectorMutable
:
463 __CFBitVectorGrow(bv
, count
- cnt
);
468 CFRange range
= CFRangeMake(cnt
, count
- cnt
);
469 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorZeroBits
, NULL
);
471 __CFBitVectorSetNumBucketsUsed(bv
, count
/ __CF_BITS_PER_BUCKET
+ 1);
472 __CFBitVectorSetCount(bv
, count
);
475 void CFBitVectorFlipBitAtIndex(CFMutableBitVectorRef bv
, CFIndex idx
) {
476 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
477 CFAssert2(0 <= idx
&& idx
< __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__
, idx
);
478 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
479 __CFFlipBitVectorBit(bv
->_buckets
, idx
);
482 static __CFBitVectorBucket
__CFBitVectorFlipBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *context
) {
483 return (~(__CFBitVectorBucket
)0) ^ bucketValue
;
486 void CFBitVectorFlipBits(CFMutableBitVectorRef bv
, CFRange range
) {
487 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
488 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
489 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
490 if (0 == range
.length
) return;
491 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorFlipBits
, NULL
);
494 void CFBitVectorSetBitAtIndex(CFMutableBitVectorRef bv
, CFIndex idx
, CFBit value
) {
495 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
496 CFAssert2(0 <= idx
&& idx
< __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__
, idx
);
497 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
498 __CFSetBitVectorBit(bv
->_buckets
, idx
, value
);
501 void CFBitVectorSetBits(CFMutableBitVectorRef bv
, CFRange range
, CFBit value
) {
502 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
503 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
504 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
505 if (0 == range
.length
) return;
507 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorOneBits
, NULL
);
509 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorZeroBits
, NULL
);
513 void CFBitVectorSetAllBits(CFMutableBitVectorRef bv
, CFBit value
) {
514 CFIndex nBuckets
, leftover
;
515 __CFGenericValidateType(bv
, CFBitVectorGetTypeID());
516 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
517 nBuckets
= __CFBitVectorCount(bv
) / __CF_BITS_PER_BUCKET
;
518 leftover
= __CFBitVectorCount(bv
) - nBuckets
* __CF_BITS_PER_BUCKET
;
520 CFRange range
= CFRangeMake(nBuckets
* __CF_BITS_PER_BUCKET
, leftover
);
522 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorOneBits
, NULL
);
524 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorZeroBits
, NULL
);
527 memset(bv
->_buckets
, (value
? ~0 : 0), nBuckets
);
530 #undef __CFBitVectorValidateRange