2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 Copyright (c) 1998-2012, Apple Inc. All rights reserved.
26 Responsibility: Christopher Kane
29 #include <CoreFoundation/CFBitVector.h>
30 #include "CFInternal.h"
33 /* The bucket type must be unsigned, at least one byte in size, and
34 a power of 2 in number of bits; bits are numbered from 0 from left
35 to right (bit 0 is the most significant) */
36 typedef uint8_t __CFBitVectorBucket
;
38 #define __CFBITVECTORBUCKET_SIZE 8
39 #define __CF_BITS_PER_BYTE 8
42 __CF_BITS_PER_BYTE_MASK
= 0x07
46 __CF_BITS_PER_BUCKET
= (__CF_BITS_PER_BYTE
* sizeof(__CFBitVectorBucket
)),
47 __CF_BITS_PER_BUCKET_MASK
= 0x07 // __CF_BITS_PER_BYTE_MASK * lg2(sizeof(__CFBitVectorBucket))
50 CF_INLINE CFIndex
__CFBitVectorRoundUpCapacity(CFIndex capacity
) {
51 if (0 == capacity
) capacity
= 1;
52 return ((capacity
+ 63) / 64) * 64;
55 CF_INLINE CFIndex
__CFBitVectorNumBucketsForCapacity(CFIndex capacity
) {
56 return capacity
/ __CF_BITS_PER_BUCKET
+ ((capacity
& __CF_BITS_PER_BUCKET_MASK
) ? 1 : 0);
59 struct __CFBitVector
{
61 CFIndex _count
; /* number of bits */
62 CFIndex _capacity
; /* maximum number of bits */
63 __CFBitVectorBucket
*_buckets
;
66 CF_INLINE UInt32
__CFBitVectorMutableVariety(const void *cf
) {
67 return __CFBitfieldGetValue(((const CFRuntimeBase
*)cf
)->_cfinfo
[CF_INFO_BITS
], 3, 2);
70 CF_INLINE
void __CFBitVectorSetMutableVariety(void *cf
, UInt32 v
) {
71 __CFBitfieldSetValue(((CFRuntimeBase
*)cf
)->_cfinfo
[CF_INFO_BITS
], 3, 2, v
);
74 CF_INLINE UInt32
__CFBitVectorMutableVarietyFromFlags(UInt32 flags
) {
75 return __CFBitfieldGetValue(flags
, 1, 0);
78 // ensure that uses of these inlines are correct, bytes vs. buckets vs. bits
79 CF_INLINE CFIndex
__CFBitVectorCount(CFBitVectorRef bv
) {
83 CF_INLINE
void __CFBitVectorSetCount(CFMutableBitVectorRef bv
, CFIndex v
) {
87 CF_INLINE CFIndex
__CFBitVectorCapacity(CFBitVectorRef bv
) {
91 CF_INLINE
void __CFBitVectorSetCapacity(CFMutableBitVectorRef bv
, CFIndex v
) {
95 CF_INLINE CFIndex
__CFBitVectorNumBucketsUsed(CFBitVectorRef bv
) {
96 return bv
->_count
/ __CF_BITS_PER_BUCKET
+ 1;
99 CF_INLINE
void __CFBitVectorSetNumBucketsUsed(CFMutableBitVectorRef bv
, CFIndex v
) {
100 /* for a CFBitVector, _bucketsUsed == _count / __CF_BITS_PER_BUCKET + 1 */
103 CF_INLINE CFIndex
__CFBitVectorNumBuckets(CFBitVectorRef bv
) {
104 return bv
->_capacity
/ __CF_BITS_PER_BUCKET
+ 1;
107 CF_INLINE
void __CFBitVectorSetNumBuckets(CFMutableBitVectorRef bv
, CFIndex v
) {
108 /* for a CFBitVector, _bucketsNum == _capacity / __CF_BITS_PER_BUCKET + 1 */
111 static __CFBitVectorBucket
__CFBitBucketMask(CFIndex bottomBit
, CFIndex topBit
) {
112 CFIndex shiftL
= __CF_BITS_PER_BUCKET
- topBit
+ bottomBit
- 1;
113 __CFBitVectorBucket result
= ~(__CFBitVectorBucket
)0;
114 result
= (result
<< shiftL
);
115 result
= (result
>> bottomBit
);
119 CF_INLINE CFBit
__CFBitVectorBit(__CFBitVectorBucket
*buckets
, CFIndex idx
) {
120 CFIndex bucketIdx
= idx
/ __CF_BITS_PER_BUCKET
;
121 CFIndex bitOfBucket
= idx
& (__CF_BITS_PER_BUCKET
- 1);
122 return (buckets
[bucketIdx
] >> (__CF_BITS_PER_BUCKET
- 1 - bitOfBucket
)) & 0x1;
125 CF_INLINE
void __CFSetBitVectorBit(__CFBitVectorBucket
*buckets
, CFIndex idx
, CFBit value
) {
126 CFIndex bucketIdx
= idx
/ __CF_BITS_PER_BUCKET
;
127 CFIndex bitOfBucket
= idx
& (__CF_BITS_PER_BUCKET
- 1);
129 buckets
[bucketIdx
] |= (1 << (__CF_BITS_PER_BUCKET
- 1 - bitOfBucket
));
131 buckets
[bucketIdx
] &= ~(1 << (__CF_BITS_PER_BUCKET
- 1 - bitOfBucket
));
135 CF_INLINE
void __CFFlipBitVectorBit(__CFBitVectorBucket
*buckets
, CFIndex idx
) {
136 CFIndex bucketIdx
= idx
/ __CF_BITS_PER_BUCKET
;
137 CFIndex bitOfBucket
= idx
& (__CF_BITS_PER_BUCKET
- 1);
138 buckets
[bucketIdx
] ^= (1 << (__CF_BITS_PER_BUCKET
- 1 - bitOfBucket
));
142 CF_INLINE
void __CFBitVectorValidateRange(CFBitVectorRef bv
, CFRange range
, const char *func
) {
143 CFAssert2(0 <= range
.location
&& range
.location
< __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): range.location index (%d) out of bounds", func
, range
.location
);
144 CFAssert2(0 <= range
.length
, __kCFLogAssertion
, "%s(): range.length (%d) cannot be less than zero", func
, range
.length
);
145 CFAssert2(range
.location
+ range
.length
<= __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): ending index (%d) out of bounds", func
, range
.location
+ range
.length
);
148 #define __CFBitVectorValidateRange(bf,r,f)
151 static Boolean
__CFBitVectorEqual(CFTypeRef cf1
, CFTypeRef cf2
) {
152 CFBitVectorRef bv1
= (CFBitVectorRef
)cf1
;
153 CFBitVectorRef bv2
= (CFBitVectorRef
)cf2
;
155 cnt
= __CFBitVectorCount(bv1
);
156 if (cnt
!= __CFBitVectorCount(bv2
)) return false;
157 if (0 == cnt
) return true;
158 for (idx
= 0; idx
< (cnt
/ __CF_BITS_PER_BUCKET
) + 1; idx
++) {
159 __CFBitVectorBucket val1
= bv1
->_buckets
[idx
];
160 __CFBitVectorBucket val2
= bv2
->_buckets
[idx
];
161 if (val1
!= val2
) return false;
166 static CFHashCode
__CFBitVectorHash(CFTypeRef cf
) {
167 CFBitVectorRef bv
= (CFBitVectorRef
)cf
;
168 return __CFBitVectorCount(bv
);
171 static CFStringRef
__CFBitVectorCopyDescription(CFTypeRef cf
) {
172 CFBitVectorRef bv
= (CFBitVectorRef
)cf
;
173 CFMutableStringRef result
;
175 __CFBitVectorBucket
*buckets
;
176 cnt
= __CFBitVectorCount(bv
);
177 buckets
= bv
->_buckets
;
178 result
= CFStringCreateMutable(kCFAllocatorSystemDefault
, 0);
179 CFStringAppendFormat(result
, NULL
, CFSTR("<CFBitVector %p [%p]>{count = %u, capacity = %u, objects = (\n"), cf
, CFGetAllocator(bv
), cnt
, __CFBitVectorCapacity(bv
));
180 for (idx
= 0; idx
< (cnt
/ 64); idx
++) { /* Print groups of 64 */
182 CFStringAppendFormat(result
, NULL
, CFSTR("\t%u : "), (idx
* 64));
183 for (idx2
= 0; idx2
< 64; idx2
+= 4) {
184 CFIndex bucketIdx
= (idx
<< 6) + idx2
;
185 CFStringAppendFormat(result
, NULL
, CFSTR("%d%d%d%d"),
186 __CFBitVectorBit(buckets
, bucketIdx
+ 0),
187 __CFBitVectorBit(buckets
, bucketIdx
+ 1),
188 __CFBitVectorBit(buckets
, bucketIdx
+ 2),
189 __CFBitVectorBit(buckets
, bucketIdx
+ 3));
191 CFStringAppend(result
, CFSTR("\n"));
193 if (idx
* 64 < cnt
) {
194 CFStringAppendFormat(result
, NULL
, CFSTR("\t%u : "), (idx
* 64));
195 for (idx
= (idx
* 64); idx
< cnt
; idx
++) { /* Print remainder */
196 CFStringAppendFormat(result
, NULL
, CFSTR("%d"), __CFBitVectorBit(buckets
, idx
));
199 CFStringAppend(result
, CFSTR("\n)}"));
204 kCFBitVectorImmutable
= 0x0, /* unchangable and fixed capacity; default */
205 kCFBitVectorMutable
= 0x1, /* changeable and variable capacity */
208 static void __CFBitVectorDeallocate(CFTypeRef cf
) {
209 CFMutableBitVectorRef bv
= (CFMutableBitVectorRef
)cf
;
210 CFAllocatorRef allocator
= CFGetAllocator(bv
);
211 if (bv
->_buckets
) _CFAllocatorDeallocateGC(allocator
, bv
->_buckets
);
214 static CFTypeID __kCFBitVectorTypeID
= _kCFRuntimeNotATypeID
;
216 static const CFRuntimeClass __CFBitVectorClass
= {
217 _kCFRuntimeScannedObject
,
221 __CFBitVectorDeallocate
,
225 __CFBitVectorCopyDescription
228 __private_extern__
void __CFBitVectorInitialize(void) {
229 __kCFBitVectorTypeID
= _CFRuntimeRegisterClass(&__CFBitVectorClass
);
232 CFTypeID
CFBitVectorGetTypeID(void) {
233 return __kCFBitVectorTypeID
;
236 static CFMutableBitVectorRef
__CFBitVectorInit(CFAllocatorRef allocator
, CFOptionFlags flags
, CFIndex capacity
, const uint8_t *bytes
, CFIndex numBits
) {
237 CFMutableBitVectorRef memory
;
239 CFAssert2(0 <= capacity
, __kCFLogAssertion
, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__
, capacity
);
240 CFAssert2(0 <= numBits
, __kCFLogAssertion
, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__
, numBits
);
241 size
= sizeof(struct __CFBitVector
) - sizeof(CFRuntimeBase
);
242 memory
= (CFMutableBitVectorRef
)_CFRuntimeCreateInstance(allocator
, __kCFBitVectorTypeID
, size
, NULL
);
243 if (NULL
== memory
) {
246 __CFBitVectorSetCapacity(memory
, __CFBitVectorRoundUpCapacity(numBits
));
247 __CFBitVectorSetNumBuckets(memory
, __CFBitVectorNumBucketsForCapacity(__CFBitVectorRoundUpCapacity(numBits
)));
248 __CFAssignWithWriteBarrier((void **)&memory
->_buckets
, _CFAllocatorAllocateGC(allocator
, __CFBitVectorNumBuckets(memory
) * sizeof(__CFBitVectorBucket
), 0));
249 if (__CFOASafe
) __CFSetLastAllocationEventName(memory
->_buckets
, "CFBitVector (store)");
250 if (NULL
== memory
->_buckets
) {
254 memset(memory
->_buckets
, 0, __CFBitVectorNumBuckets(memory
) * sizeof(__CFBitVectorBucket
));
255 __CFBitVectorSetNumBucketsUsed(memory
, numBits
/ __CF_BITS_PER_BUCKET
+ 1);
256 __CFBitVectorSetCount(memory
, numBits
);
258 /* This move is possible because bits are numbered from 0 on the left */
259 memmove(memory
->_buckets
, bytes
, numBits
/ __CF_BITS_PER_BYTE
+ (numBits
& __CF_BITS_PER_BYTE_MASK
? 1 : 0));
261 __CFBitVectorSetMutableVariety(memory
, __CFBitVectorMutableVarietyFromFlags(flags
));
265 CFBitVectorRef
CFBitVectorCreate(CFAllocatorRef allocator
, const uint8_t *bytes
, CFIndex numBits
) {
266 return __CFBitVectorInit(allocator
, kCFBitVectorImmutable
, numBits
, bytes
, numBits
);
269 CFMutableBitVectorRef
CFBitVectorCreateMutable(CFAllocatorRef allocator
, CFIndex capacity
) {
270 return __CFBitVectorInit(allocator
, kCFBitVectorMutable
, capacity
, NULL
, 0);
273 CFBitVectorRef
CFBitVectorCreateCopy(CFAllocatorRef allocator
, CFBitVectorRef bv
) {
274 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
275 return __CFBitVectorInit(allocator
, kCFBitVectorImmutable
, __CFBitVectorCount(bv
), (const uint8_t *)bv
->_buckets
, __CFBitVectorCount(bv
));
278 CFMutableBitVectorRef
CFBitVectorCreateMutableCopy(CFAllocatorRef allocator
, CFIndex capacity
, CFBitVectorRef bv
) {
279 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
280 return __CFBitVectorInit(allocator
, kCFBitVectorMutable
, capacity
, (const uint8_t *)bv
->_buckets
, __CFBitVectorCount(bv
));
283 CFIndex
CFBitVectorGetCount(CFBitVectorRef bv
) {
284 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
285 return __CFBitVectorCount(bv
);
288 typedef __CFBitVectorBucket (*__CFInternalMapper
)(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *context
);
290 static void __CFBitVectorInternalMap(CFMutableBitVectorRef bv
, CFRange range
, __CFInternalMapper mapper
, void *context
) {
291 CFIndex bucketIdx
, bitOfBucket
;
293 __CFBitVectorBucket bucketValMask
, newBucketVal
;
294 if (0 == range
.length
) return;
295 bucketIdx
= range
.location
/ __CF_BITS_PER_BUCKET
;
296 bitOfBucket
= range
.location
& (__CF_BITS_PER_BUCKET
- 1);
297 /* Follow usual pattern of ramping up to a bit bucket boundary ...*/
298 if (bitOfBucket
+ range
.length
< __CF_BITS_PER_BUCKET
) {
299 bucketValMask
= __CFBitBucketMask(bitOfBucket
, bitOfBucket
+ range
.length
- 1);
302 bucketValMask
= __CFBitBucketMask(bitOfBucket
, __CF_BITS_PER_BUCKET
- 1);
303 range
.length
-= __CF_BITS_PER_BUCKET
- bitOfBucket
;
305 newBucketVal
= mapper(bv
->_buckets
[bucketIdx
], bucketValMask
, context
);
306 bv
->_buckets
[bucketIdx
] = (bv
->_buckets
[bucketIdx
] & ~bucketValMask
) | (newBucketVal
& bucketValMask
);
308 /* ... clipping along with entire bit buckets ... */
309 nBuckets
= range
.length
/ __CF_BITS_PER_BUCKET
;
310 range
.length
-= nBuckets
* __CF_BITS_PER_BUCKET
;
312 newBucketVal
= mapper(bv
->_buckets
[bucketIdx
], ~0, context
);
313 bv
->_buckets
[bucketIdx
] = newBucketVal
;
316 /* ... and ramping down with the last fragmentary bit bucket. */
317 if (0 != range
.length
) {
318 bucketValMask
= __CFBitBucketMask(0, range
.length
- 1);
319 newBucketVal
= mapper(bv
->_buckets
[bucketIdx
], bucketValMask
, context
);
320 bv
->_buckets
[bucketIdx
] = (bv
->_buckets
[bucketIdx
] & ~bucketValMask
) | (newBucketVal
& bucketValMask
);
324 struct _occursContext
{
329 static __CFBitVectorBucket
__CFBitVectorCountBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, struct _occursContext
*context
) {
330 static const __CFBitVectorBucket __CFNibbleBitCount
[16] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
331 __CFBitVectorBucket val
;
333 val
= (context
->value
) ? (bucketValue
& bucketValueMask
) : (~bucketValue
& bucketValueMask
);
334 for (idx
= 0; idx
< (CFIndex
)sizeof(__CFBitVectorBucket
) * 2; idx
++) {
335 context
->count
+= __CFNibbleBitCount
[val
& 0xF];
341 CFIndex
CFBitVectorGetCountOfBit(CFBitVectorRef bv
, CFRange range
, CFBit value
) {
342 struct _occursContext context
;
343 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
344 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
345 if (0 == range
.length
) return 0;
346 context
.value
= value
;
348 __CFBitVectorInternalMap((CFMutableBitVectorRef
)bv
, range
, (__CFInternalMapper
)__CFBitVectorCountBits
, &context
);
349 return context
.count
;
352 Boolean
CFBitVectorContainsBit(CFBitVectorRef bv
, CFRange range
, CFBit value
) {
353 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
354 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
355 return (CFBitVectorGetCountOfBit(bv
, range
, value
) != 0) ? true : false;
358 CFBit
CFBitVectorGetBitAtIndex(CFBitVectorRef bv
, CFIndex idx
) {
359 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
360 CFAssert2(0 <= idx
&& idx
< __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__
, idx
);
361 return __CFBitVectorBit(bv
->_buckets
, idx
);
364 struct _getBitsContext
{
366 CFIndex initBits
; /* Bits to extract off the front for the prev. byte */
367 CFIndex totalBits
; /* This is for stopping at the end */
368 bool ignoreFirstInitBits
;
371 static __CFBitVectorBucket
__CFBitVectorGetBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *ctx
) {
372 struct _getBitsContext
*context
= (struct _getBitsContext
*)ctx
;
373 __CFBitVectorBucket val
;
375 val
= bucketValue
& bucketValueMask
;
376 nBits
= __CFMin(__CF_BITS_PER_BUCKET
- context
->initBits
, context
->totalBits
);
377 /* First initBits bits go in *curByte ... */
378 if (0 < context
->initBits
) {
379 if (!context
->ignoreFirstInitBits
) {
380 *context
->curByte
|= (uint8_t)(val
>> (__CF_BITS_PER_BUCKET
- context
->initBits
));
382 context
->totalBits
-= context
->initBits
;
383 context
->ignoreFirstInitBits
= false;
385 val
<<= context
->initBits
;
387 /* ... then next groups of __CF_BITS_PER_BYTE go in *curByte ... */
388 while (__CF_BITS_PER_BYTE
<= nBits
) {
389 *context
->curByte
= (uint8_t)(val
>> (__CF_BITS_PER_BUCKET
- __CF_BITS_PER_BYTE
));
391 context
->totalBits
-= context
->initBits
;
392 nBits
-= __CF_BITS_PER_BYTE
;
393 #if __CFBITVECTORBUCKET_SIZE > __CF_BITS_PER_BYTE
394 val
<<= __CF_BITS_PER_BYTE
;
399 /* ... then remaining bits go in *curByte */
401 *context
->curByte
= (uint8_t)(val
>> (__CF_BITS_PER_BUCKET
- __CF_BITS_PER_BYTE
));
402 context
->totalBits
-= nBits
;
407 void CFBitVectorGetBits(CFBitVectorRef bv
, CFRange range
, uint8_t *bytes
) {
408 struct _getBitsContext context
;
409 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
410 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
411 if (0 == range
.length
) return;
412 context
.curByte
= bytes
;
413 context
.initBits
= range
.location
& (__CF_BITS_PER_BUCKET
- 1);
414 context
.totalBits
= range
.length
;
415 context
.ignoreFirstInitBits
= true;
416 __CFBitVectorInternalMap((CFMutableBitVectorRef
)bv
, range
, __CFBitVectorGetBits
, &context
);
419 CFIndex
CFBitVectorGetFirstIndexOfBit(CFBitVectorRef bv
, CFRange range
, CFBit value
) {
421 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
422 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
423 for (idx
= 0; idx
< range
.length
; idx
++) {
424 if (value
== CFBitVectorGetBitAtIndex(bv
, range
.location
+ idx
)) {
425 return range
.location
+ idx
;
431 CFIndex
CFBitVectorGetLastIndexOfBit(CFBitVectorRef bv
, CFRange range
, CFBit value
) {
433 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
434 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
435 for (idx
= range
.length
; idx
--;) {
436 if (value
== CFBitVectorGetBitAtIndex(bv
, range
.location
+ idx
)) {
437 return range
.location
+ idx
;
443 static void __CFBitVectorGrow(CFMutableBitVectorRef bv
, CFIndex numNewValues
) {
444 CFIndex oldCount
= __CFBitVectorCount(bv
);
445 CFIndex capacity
= __CFBitVectorRoundUpCapacity(oldCount
+ numNewValues
);
446 CFAllocatorRef allocator
= CFGetAllocator(bv
);
447 __CFBitVectorSetCapacity(bv
, capacity
);
448 __CFBitVectorSetNumBuckets(bv
, __CFBitVectorNumBucketsForCapacity(capacity
));
449 __CFAssignWithWriteBarrier((void **)&bv
->_buckets
, CFAllocatorReallocate(allocator
, bv
->_buckets
, __CFBitVectorNumBuckets(bv
) * sizeof(__CFBitVectorBucket
), 0));
450 if (__CFOASafe
) __CFSetLastAllocationEventName(bv
->_buckets
, "CFBitVector (store)");
451 if (NULL
== bv
->_buckets
) HALT
;
454 static __CFBitVectorBucket
__CFBitVectorZeroBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *context
) {
458 static __CFBitVectorBucket
__CFBitVectorOneBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *context
) {
459 return ~(__CFBitVectorBucket
)0;
462 void CFBitVectorSetCount(CFMutableBitVectorRef bv
, CFIndex count
) {
464 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
465 cnt
= __CFBitVectorCount(bv
);
466 switch (__CFBitVectorMutableVariety(bv
)) {
467 case kCFBitVectorMutable
:
469 __CFBitVectorGrow(bv
, count
- cnt
);
474 CFRange range
= CFRangeMake(cnt
, count
- cnt
);
475 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorZeroBits
, NULL
);
477 __CFBitVectorSetNumBucketsUsed(bv
, count
/ __CF_BITS_PER_BUCKET
+ 1);
478 __CFBitVectorSetCount(bv
, count
);
481 void CFBitVectorFlipBitAtIndex(CFMutableBitVectorRef bv
, CFIndex idx
) {
482 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
483 CFAssert2(0 <= idx
&& idx
< __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__
, idx
);
484 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
485 __CFFlipBitVectorBit(bv
->_buckets
, idx
);
488 static __CFBitVectorBucket
__CFBitVectorFlipBits(__CFBitVectorBucket bucketValue
, __CFBitVectorBucket bucketValueMask
, void *context
) {
489 return (~(__CFBitVectorBucket
)0) ^ bucketValue
;
492 void CFBitVectorFlipBits(CFMutableBitVectorRef bv
, CFRange range
) {
493 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
494 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
495 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
496 if (0 == range
.length
) return;
497 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorFlipBits
, NULL
);
500 void CFBitVectorSetBitAtIndex(CFMutableBitVectorRef bv
, CFIndex idx
, CFBit value
) {
501 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
502 CFAssert2(0 <= idx
&& idx
< __CFBitVectorCount(bv
), __kCFLogAssertion
, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__
, idx
);
503 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
504 __CFSetBitVectorBit(bv
->_buckets
, idx
, value
);
507 void CFBitVectorSetBits(CFMutableBitVectorRef bv
, CFRange range
, CFBit value
) {
508 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
509 __CFBitVectorValidateRange(bv
, range
, __PRETTY_FUNCTION__
);
510 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
511 if (0 == range
.length
) return;
513 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorOneBits
, NULL
);
515 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorZeroBits
, NULL
);
519 void CFBitVectorSetAllBits(CFMutableBitVectorRef bv
, CFBit value
) {
520 CFIndex nBuckets
, leftover
;
521 __CFGenericValidateType(bv
, __kCFBitVectorTypeID
);
522 CFAssert1(__CFBitVectorMutableVariety(bv
) == kCFBitVectorMutable
, __kCFLogAssertion
, "%s(): bit vector is immutable", __PRETTY_FUNCTION__
);
523 nBuckets
= __CFBitVectorCount(bv
) / __CF_BITS_PER_BUCKET
;
524 leftover
= __CFBitVectorCount(bv
) - nBuckets
* __CF_BITS_PER_BUCKET
;
526 CFRange range
= CFRangeMake(nBuckets
* __CF_BITS_PER_BUCKET
, leftover
);
528 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorOneBits
, NULL
);
530 __CFBitVectorInternalMap(bv
, range
, __CFBitVectorZeroBits
, NULL
);
533 memset(bv
->_buckets
, (value
? ~0 : 0), nBuckets
);
536 #undef __CFBitVectorValidateRange