2 * Copyright (c) 2011 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 Copyright (c) 1999-2011, Apple Inc. All rights reserved.
26 Responsibility: Ali Ozer
30 2-3 tree storing arbitrary sized values.
32 ??? Currently elementSize cannot be greater than storage->maxLeafCapacity, which is less than or equal to __CFStorageMaxLeafCapacity
34 CFStorage is thread-safe for multiple readers, but not thread safe for simultaneous reading and writing.
38 /* pammon notes on FrozenStorage
39 A CFStorage can be frozen, or more specifically, some or all of the nodes can be frozen. Frozen nodes can be safely shared between CFStorage instances. CFStorages containing frozen nodes are still considered mutable: frozen nodes are discarded and recreated on demand. It is a form of copy-on-write.
41 Freezing is usually one-way: there is no going back. However, if a node's reference count is 1, we know that no other CFStorage can reference it; and if we are in a mutating function, we know that it can be safely unfrozen.
43 If a node is frozen, all of its descendants should be considered frozen.
45 The root node, which is defined inline within the CFStorage struct itself, can never be frozen.
49 #define NO_SHIFTER ((uint32_t)(-1))
51 #include <CoreFoundation/CFStorage.h>
52 #include "CFInternal.h"
53 #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_WINDOWS
54 #include <dispatch/dispatch.h>
57 #if DEPLOYMENT_TARGET_WINDOWS
62 #define bzero(dst, size) ZeroMemory(dst, size)
66 #if !defined(PAGE_SIZE)
67 #define PAGE_SIZE 4096
70 // Above 15K, malloc switches (??? or used to in early Leopard) to using vm allocates for blocks; it seems best to avoid that.
71 // Also, tests with StorageTimer.c done in 4/07 indicate that 4096 * 3 is better than smaller or larger node sizes.
72 #define __CFStorageMaxLeafCapacity (4096 * 3)
74 #define COPYMEM(src,dst,n) objc_memmove_collectable((dst), (src), (n))
75 #define PAGE_LIMIT ((CFIndex)PAGE_SIZE / 2)
77 CF_INLINE
int32_t roundToPage(int32_t num
) {
78 return (num
+ PAGE_SIZE
- 1) & ~(PAGE_SIZE
- 1);
81 typedef const struct __CFStorage
*ConstCFStorageRef
;
83 /* Each node in the storage. isLeaf determines whether the node is a leaf node or a node inside the tree. If latter, number of children are determined by the number of non-NULL entries in child[]. (NULL entries are at the end.)
85 typedef struct __CFStorageNode
{
86 CFIndex numBytes
; /* Number of actual bytes in this node and all its children */
87 uint32_t refCount
; /* Refcount of the node. Is always at least 1 for a normal node. For root nodes, which are stored directly in the CFStorage, this is 0. CFStorageRetain/ReleaseNode checks for a refcount of 0 and does not retain/release such nodes. */
88 bool isFrozen
; /* Indicates that the node is frozen, i.e. may be shared. */
92 CFIndex capacityInBytes
; // capacityInBytes is capacity of memory; this is either 0, or >= numBytes
94 CFRange cachedRange
; //the absolute range of this node, in "value" units. This is valid only if this node is referenced by storage->cacheNode, and used by the cache. In general this is not valid, and the offset needs to be passed down from the tree
97 struct __CFStorageNode
*child
[3];
103 /* A struct used for insertion into frozen nodes, which may need to return a new version of themselves, and a new friend! The child field is a replacement for the child that was inserted into; if the child does not need to be replaced (i.e. it was unfrozen and there was space to perform the insertion) then the child that was inserted into is returned here. The sibling field is a new child that should also be inserted (or NULL if none). */
105 CFStorageNode
*child
;
106 CFStorageNode
*sibling
;
107 } CFStorageDoubleNodeReturn
;
109 CF_INLINE CFStorageDoubleNodeReturn
CFStorageDoubleNodeReturnMake(CFStorageNode
*child
, CFStorageNode
*sibling
) {
110 CFStorageDoubleNodeReturn v
;
116 /* The CFStorage object.
121 uint32_t byteToValueShifter
;
122 CFSpinLock_t cacheReaderMemoryAllocationLock
;
124 CFStorageNode
* volatile cacheNode
;
125 CFIndex maxLeafCapacity
; // In terms of bytes
126 CFStorageNode rootNode
;
127 CFOptionFlags nodeHint
; // __kCFAllocatorGCScannedMemory or 0.
130 /* Helper function to return the intersection of two ranges */
131 static inline CFRange
intersectionRange(CFRange a
, CFRange b
) {
132 CFIndex start
= __CFMax(a
.location
, b
.location
);
133 CFIndex end
= __CFMin(a
.location
+ a
.length
, b
.location
+ b
.length
);
135 return CFRangeMake(0, 0);
138 return CFRangeMake(start
, end
- start
);
142 /* Allocates the memory and initializes the capacity in a leaf. This locks not for mutations (mutations are not thread-safe in general), but for lazy allocation of storage during reading.
144 CF_INLINE
void __CFStorageAllocLeafNodeMemory(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex cap
, bool compact
) {
145 if (cap
> PAGE_LIMIT
) {
146 cap
= roundToPage(cap
);
147 if (cap
> storage
->maxLeafCapacity
) cap
= storage
->maxLeafCapacity
;
149 cap
= (((cap
+ 63) / 64) * 64);
151 /* We must be careful here, because another thread may be trying to allocate this memory at the same time (8203146). This may happen if two threads both attempt to read from a lazily-allocated node. */
152 if ((compact
? (cap
!= node
->info
.leaf
.capacityInBytes
) : (cap
> node
->info
.leaf
.capacityInBytes
))) {
153 __CFSpinLock(&(storage
->cacheReaderMemoryAllocationLock
));
154 /* Check again now that we've acquired the lock. We know that we can do this because two simulaneous readers will always pass the same capacity. This is the fix for 8203146. This probably needs a memory barrier. */
155 if ((compact
? (cap
!= node
->info
.leaf
.capacityInBytes
) : (cap
> node
->info
.leaf
.capacityInBytes
))) {
156 __CFAssignWithWriteBarrier((void **)&node
->info
.leaf
.memory
, _CFAllocatorReallocateGC(allocator
, node
->info
.leaf
.memory
, cap
, storage
->nodeHint
)); // This will free...
157 if (__CFOASafe
) __CFSetLastAllocationEventName(node
->info
.leaf
.memory
, "CFStorage (node bytes)");
158 node
->info
.leaf
.capacityInBytes
= cap
;
160 __CFSpinUnlock(&(storage
->cacheReaderMemoryAllocationLock
));
165 #define ASSERT(x) do { if (! (x)) { fprintf(stderr, "Assertion %s failed on line %d\n", #x, __LINE__); abort(); } } while (0)
167 #define ASSERT(x) do { if (0 && ! (x)) { } } while(0)
170 static void __CFStorageCheckIntegrity(CFStorageRef storage
);
171 #define CHECK_INTEGRITY() do { if (0) __CFStorageCheckIntegrity(storage); } while (0)
173 static void __CFStorageCheckNodeIntegrity(ConstCFStorageRef storage
, const CFStorageNode
*node
);
174 #define CHECK_NODE_INTEGRITY(X) do { if (0) __CFStorageCheckNodeIntegrity(storage, X); } while (0)
176 #pragma mark Byte <-> Value Functions
178 CF_INLINE CFIndex
__CFStorageConvertByteToValue(ConstCFStorageRef storage
, CFIndex byte
) {
179 if (storage
->byteToValueShifter
!= NO_SHIFTER
) {
180 return byte
>> storage
->byteToValueShifter
;
182 return byte
/ storage
->valueSize
;
185 CF_INLINE CFRange
__CFStorageConvertBytesToValueRange(ConstCFStorageRef storage
, CFIndex offset
, CFIndex length
) {
186 if (storage
->byteToValueShifter
!= NO_SHIFTER
) {
187 return CFRangeMake(offset
>> storage
->byteToValueShifter
, length
>> storage
->byteToValueShifter
);
189 return CFRangeMake(offset
/ storage
->valueSize
, length
/ storage
->valueSize
);
192 CF_INLINE CFIndex
__CFStorageConvertValueToByte(ConstCFStorageRef storage
, CFIndex value
) {
193 if (storage
->byteToValueShifter
!= NO_SHIFTER
) {
194 return value
<< storage
->byteToValueShifter
;
196 return value
* storage
->valueSize
;
199 CF_INLINE CFRange
__CFStorageConvertValuesToByteRange(ConstCFStorageRef storage
, CFIndex offset
, CFIndex length
) {
200 if (storage
->byteToValueShifter
!= NO_SHIFTER
) {
201 return CFRangeMake(offset
<< storage
->byteToValueShifter
, length
<< storage
->byteToValueShifter
);
203 return CFRangeMake(offset
* storage
->valueSize
, length
* storage
->valueSize
);
207 #pragma mark Node reference counting and freezing
209 CF_INLINE CFStorageNode
*__CFStorageRetainNode(CFStorageNode
*node
) {
210 if (node
->refCount
> 0) OSAtomicIncrement32((int32_t *)&node
->refCount
);
214 /* A faster version of __CFStorageRetainNode that is not thread safe. This can be used from the Unfrozen (aka unshared) calls, or when a node was just allocated and there's no opportunity for it to have escaped yet */
215 CF_INLINE CFStorageNode
*__CFStorageRetainNodeThreadUnsafe(CFStorageNode
*node
) {
216 if (node
->refCount
> 0) node
->refCount
++;
220 static void __CFStorageDeallocateNode(CFStorageRef storage
, CFStorageNode
*node
);
222 CF_INLINE
void __CFStorageReleaseNode(CFStorageRef storage
, CFStorageNode
*node
) {
223 if (node
->refCount
> 0) {
224 uint32_t newRefCount
= OSAtomicDecrement32((int32_t *)&node
->refCount
);
225 if (newRefCount
== 0) {
226 __CFStorageDeallocateNode(storage
, node
);
231 CF_INLINE
void __CFStorageReleaseNodeWithNullCheck(CFStorageRef storage
, CFStorageNode
*node
) {
232 if (node
) __CFStorageReleaseNode(storage
, node
);
235 static void __CFStorageDeallocateNode(CFStorageRef storage
, CFStorageNode
*node
) {
236 CFAllocatorRef allocator
= CFGetAllocator(storage
);
238 if (node
->info
.leaf
.memory
) _CFAllocatorDeallocateGC(allocator
, node
->info
.leaf
.memory
);
240 __CFStorageReleaseNodeWithNullCheck(storage
, node
->info
.notLeaf
.child
[0]);
241 __CFStorageReleaseNodeWithNullCheck(storage
, node
->info
.notLeaf
.child
[1]);
242 __CFStorageReleaseNodeWithNullCheck(storage
, node
->info
.notLeaf
.child
[2]);
244 _CFAllocatorDeallocateGC(allocator
, node
);
247 static inline void __CFStorageFreezeNode(CFStorageNode
*node
) {
248 node
->isFrozen
= true;
251 /* If a node is frozen, but its reference count is 1, then we are the only reference to it and we can unfreeze it. In general, it's unsafe to do this because it may race with other threads that depend on it being frozen (e.g. we are about to copy the node). However, we do not support concurrent access while mutating a CFStorage, so if we are in a mutation function, know we have exclusive access and we can unfreeze the node. */
252 static inline bool __CFStorageThawNodeDuringMutation(CFStorageRef storage
, CFStorageNode
*node
) {
253 if (node
->refCount
== 1) {
254 node
->isFrozen
= false;
260 static inline void __CFStorageSetChild(CFStorageNode
*parentNode
, CFIndex childIndex
, CFStorageNode
*newChild
) {
261 ASSERT(! parentNode
->isLeaf
);
262 ASSERT(childIndex
< 3);
263 __CFAssignWithWriteBarrier((void **)&parentNode
->info
.notLeaf
.child
[childIndex
], newChild
);
266 static inline void __CFStorageGetChildren(const CFStorageNode
*parent
, CFStorageNode
** restrict resultArray
, bool shouldRetain
, bool shouldFreeze
) {
267 ASSERT(! parent
->isLeaf
);
269 for (i
=0; i
< 3; i
++) {
270 CFStorageNode
*node
= parent
->info
.notLeaf
.child
[i
];
271 if (node
!= NULL
&& shouldRetain
) __CFStorageRetainNode(node
);
272 if (node
!= NULL
&& shouldFreeze
) __CFStorageFreezeNode(node
);
273 resultArray
[i
] = node
;
277 #pragma mark Storage cache handling
280 /* Sets the cache to point at the specified node. loc and len are in terms of values, not bytes. To clear the cache set these two to 0.
281 At least one of node or memory should be non-NULL. memory is consulted first when using the cache.
283 CF_INLINE
void __CFStorageSetCache(CFStorageRef storage
, CFStorageNode
*node
, CFIndex locInBytes
) {
285 ASSERT(node
->isLeaf
);
286 node
->info
.leaf
.cachedRange
= __CFStorageConvertBytesToValueRange(storage
, locInBytes
, node
->numBytes
);
288 storage
->cacheNode
= node
;
291 /* Gets the location for the specified absolute loc from the cached info.
292 Returns NULL if the location is not in the cache.
294 CF_INLINE
uint8_t *__CFStorageGetFromCache(CFStorageRef storage
, CFIndex loc
, CFRange
* restrict validConsecutiveValueRange
, bool requireUnfrozenNode
) {
295 CFStorageNode
* const cachedNode
= storage
->cacheNode
; /* It's important we read from this field no more than once, for thread safety with other concurrent reads; that is why the field is marked volatile. */
296 if (! cachedNode
) return NULL
; /* No cache */
298 /* We only allow caching leaf nodes. */
299 ASSERT(cachedNode
->isLeaf
);
301 /* If the node is frozen, and we require an unfrozen node, then return NULL */
302 if (requireUnfrozenNode
&& cachedNode
->isFrozen
) return NULL
;
304 /* If there's no memory allocated yet, then allocate it now*/
305 if (! cachedNode
->info
.leaf
.memory
) {
306 __CFStorageAllocLeafNodeMemory(CFGetAllocator(storage
), storage
, cachedNode
, cachedNode
->numBytes
, false);
309 /* If the node's range starts after loc, or ends before or at loc, return NULL */
310 CFIndex nodeOffset
= cachedNode
->info
.leaf
.cachedRange
.location
;
311 CFIndex nodeLength
= cachedNode
->info
.leaf
.cachedRange
.length
;
312 if (loc
< nodeOffset
|| loc
>= nodeOffset
+ nodeLength
) {
316 /* The cache is valid, so return it */
317 validConsecutiveValueRange
->location
= nodeOffset
;
318 validConsecutiveValueRange
->length
= nodeLength
;
319 uint8_t *result
= cachedNode
->info
.leaf
.memory
+ __CFStorageConvertValueToByte(storage
, loc
- nodeOffset
);
324 /* Returns the number of the child containing the desired value and the relative index of the value in that child.
325 forInsertion = true means that we are looking for the child in which to insert or delete; this changes the behavior when the index is at the end of a child
326 relativeByteNum (not optional, for performance reasons) returns the relative byte number of the specified byte in the child.
327 Don't call with leaf nodes!
329 CF_INLINE CFStorageNode
*__CFStorageFindChild(const CFStorageNode
* restrict node
, CFIndex byteNum
, bool forInsertionOrDeletion
, CFIndex
* restrict childNum
, CFIndex
* restrict relativeByteNum
) {
330 if (forInsertionOrDeletion
) byteNum
--; /* If for insertion, we do <= checks, not <, so this accomplishes the same thing */
331 CFStorageNode
*result
;
332 result
= node
->info
.notLeaf
.child
[0];
333 if (byteNum
< result
->numBytes
) *childNum
= 0;
335 byteNum
-= result
->numBytes
;
336 result
= node
->info
.notLeaf
.child
[1];
337 if (byteNum
< result
->numBytes
) *childNum
= 1;
339 byteNum
-= result
->numBytes
;
341 result
= node
->info
.notLeaf
.child
[2];
344 if (forInsertionOrDeletion
) byteNum
++;
345 *relativeByteNum
= byteNum
;
349 static CFStorageNode
*__CFStorageCopyNode(CFStorageRef storage
, const CFStorageNode
*node
);
351 /* Finds the location where the specified byte is stored. If validConsecutiveByteRange is not NULL, returns
352 the range of bytes that are consecutive with this one.
353 !!! Assumes the byteNum is within the range of this node.
355 static void *__CFStorageFindByte(CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex absoluteByteOffsetOfNode
, CFStorageNode
**resultNode
, CFRange
*validConsecutiveByteRange
, bool requireUnfreezing
) {
357 *validConsecutiveByteRange
= CFRangeMake(absoluteByteOffsetOfNode
, node
->numBytes
);
359 __CFStorageAllocLeafNodeMemory(CFGetAllocator(storage
), storage
, node
, node
->numBytes
, false);
360 return node
->info
.leaf
.memory
+ byteNum
;
363 CFIndex relativeByteNum
;
364 CFStorageNode
*child
= __CFStorageFindChild(node
, byteNum
, false, &childNum
, &relativeByteNum
);
365 if (requireUnfreezing
&& child
->isFrozen
&& ! __CFStorageThawNodeDuringMutation(storage
, child
)) {
366 /* Replace the child with an unfrozen variant */
367 CFStorageNode
*unfrozenReplacement
= __CFStorageCopyNode(storage
, child
);
368 /* Release the old node, set the new one */
369 __CFStorageSetChild(node
, childNum
, unfrozenReplacement
);
370 __CFStorageReleaseNode(storage
, child
);
371 child
= unfrozenReplacement
;
373 return __CFStorageFindByte(storage
, child
, relativeByteNum
, absoluteByteOffsetOfNode
+ (byteNum
- relativeByteNum
), resultNode
, validConsecutiveByteRange
, requireUnfreezing
);
377 /* Guts of CFStorageGetValueAtIndex(); note that validConsecutiveValueRange is not optional.
378 Consults and updates cache.
380 CF_INLINE
void *__CFStorageGetValueAtIndex(CFStorageRef storage
, CFIndex idx
, CFRange
*validConsecutiveValueRange
, bool requireUnfreezing
) {
382 if (!(result
= __CFStorageGetFromCache(storage
, idx
, validConsecutiveValueRange
, requireUnfreezing
))) {
383 CFStorageNode
*resultNode
;
384 CFRange rangeInBytes
;
385 result
= (uint8_t *)__CFStorageFindByte(storage
, &storage
->rootNode
, __CFStorageConvertValueToByte(storage
, idx
), 0, &resultNode
, &rangeInBytes
, requireUnfreezing
);
386 __CFStorageSetCache(storage
, resultNode
, rangeInBytes
.location
);
387 *validConsecutiveValueRange
= __CFStorageConvertBytesToValueRange(storage
, rangeInBytes
.location
, rangeInBytes
.length
);
392 /* Copies data in the range srcRange from srcLeaf to index dstLocation in dstLeaf. Both srcLeaf and dstLeaf must be leaf nodes, and dstLeaf must not be frozen. If srcRange has a nonzero length, then both must have their memory properly allocated. This does not modify the numBytes of srcLeaf or dstLeaf.
394 static void __CFLeafCopyRangeToOffset(const CFStorageNode
*srcLeaf
, CFRange srcRange
, CFStorageNode
*dstLeaf
, CFIndex dstLocation
) {
395 ASSERT(srcLeaf
->isLeaf
);
396 ASSERT(dstLeaf
->isLeaf
);
397 if (srcRange
.length
> 0) {
398 ASSERT(srcLeaf
->info
.leaf
.memory
!= NULL
);
399 ASSERT(dstLeaf
->info
.leaf
.memory
!= NULL
);
400 ASSERT(srcRange
.location
+ srcRange
.length
<= srcLeaf
->numBytes
);
401 ASSERT(dstLocation
+ srcRange
.length
<= dstLeaf
->info
.leaf
.capacityInBytes
);
402 COPYMEM(srcLeaf
->info
.leaf
.memory
+ srcRange
.location
, dstLeaf
->info
.leaf
.memory
+ dstLocation
, srcRange
.length
);
406 #pragma mark Node creation and destruction
408 // returns a node with a refCount of 1, and an auto_zone_retain() under GC
409 static CFStorageNode
*__CFStorageCreateNode(CFAllocatorRef allocator
, CFStorageRef storage
, bool isLeaf
, CFIndex numBytes
) {
410 CFStorageNode
*newNode
= (CFStorageNode
*)CFAllocatorAllocate(allocator
, sizeof(CFStorageNode
), __kCFAllocatorGCScannedMemory
);
411 if (__CFOASafe
) __CFSetLastAllocationEventName(newNode
, "CFStorage (node)");
412 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator
)) {
413 auto_zone_release(objc_collectableZone(), newNode
); //remove the implicit retain so we can be collected
415 newNode
->refCount
= 1;
416 newNode
->isFrozen
= storage
->alwaysFrozen
;
417 newNode
->isLeaf
= isLeaf
;
418 newNode
->numBytes
= numBytes
;
420 newNode
->info
.leaf
.capacityInBytes
= 0;
421 newNode
->info
.leaf
.memory
= NULL
;
423 newNode
->info
.notLeaf
.child
[0] = newNode
->info
.notLeaf
.child
[1] = newNode
->info
.notLeaf
.child
[2] = NULL
;
428 /* Creates an (unfrozen) copy of the given node. This is shallow in the sense that it shares children for branches, but deep in that it copies memory for leaves. */
429 static CFStorageNode
*__CFStorageCopyNode(CFStorageRef storage
, const CFStorageNode
*node
) {
430 CFAllocatorRef allocator
= CFGetAllocator(storage
);
431 CFStorageNode
*result
= __CFStorageCreateNode(allocator
, storage
, node
->isLeaf
, node
->numBytes
);
433 if (node
->info
.leaf
.memory
!= NULL
) {
434 __CFStorageAllocLeafNodeMemory(allocator
, storage
, result
, result
->numBytes
, false);
435 COPYMEM(node
->info
.leaf
.memory
, result
->info
.leaf
.memory
, result
->numBytes
);
439 CFStorageNode
*child
= node
->info
.notLeaf
.child
[0];
440 __CFStorageSetChild(result
, 0, __CFStorageRetainNode(child
));
441 if ((child
= node
->info
.notLeaf
.child
[1])) __CFStorageSetChild(result
, 1, __CFStorageRetainNode(child
));
442 if ((child
= node
->info
.notLeaf
.child
[2])) __CFStorageSetChild(result
, 2, __CFStorageRetainNode(child
));
444 /* If we are copying children from a frozen node to an unfrozen node, we need to freeze the children */
445 if (node
->isFrozen
) {
446 __CFStorageFreezeNode(result
->info
.notLeaf
.child
[0]);
447 if ((child
= result
->info
.notLeaf
.child
[1])) __CFStorageFreezeNode(child
);
448 if ((child
= result
->info
.notLeaf
.child
[2])) __CFStorageFreezeNode(child
);
455 static void __CFStorageDeallocateNode(CFStorageRef storage
, CFStorageNode
*node
);
458 #pragma mark Insertion and Deletion prototypes
459 /* Prototypes for deletion and insertion. The *Frozen and *Unfrozen variants should only be called for nodes that we know are frozen or unfrozen. Frozen nodes may only have frozen children, so it makes sense for the Frozen functions to call other Frozen functions. Unfrozen nodes may have frozen or unfrozen children, so they should call the non-suffixed variants (which dispatch on whether the node is frozen or not).
461 The only acceptable time to directly call the Unfrozen variant is for the root node of a CFStorage, because root nodes may never be frozen. The isRootNode parameter determines whether we are in this case.
463 The Insertion functions return two nodes. As an awful performance hack, if the first node returned from __CFStorageInsert* is the same as the node passed in, that node is *not* retained, so should not be relased. If it is a different node, it is retained.
465 static CFStorageDoubleNodeReturn
__CFStorageInsert(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
);
466 static CFStorageNode
*__CFStorageDelete(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFRange range
, bool compact
);
468 static CFStorageDoubleNodeReturn
__CFStorageInsertFrozen(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
);
469 static CFStorageNode
*__CFStorageDeleteFrozen(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFRange range
);
471 static CFStorageDoubleNodeReturn
__CFStorageInsertUnfrozen(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
);
472 static CFStorageNode
*__CFStorageDeleteUnfrozen(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFRange range
, bool compact
, bool isRootNode
);
474 #pragma mark Frozen Deletion
476 static CFStorageNode
*__CFStorageDeleteLeafFrozen(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFRange range
) {
477 ASSERT(node
->isLeaf
);
478 const CFIndex rangeToDeleteEnd
= range
.location
+ range
.length
;
479 ASSERT(rangeToDeleteEnd
<= node
->numBytes
);
480 CFIndex remainingBytes
= node
->numBytes
- range
.length
;
481 if (remainingBytes
== 0) {
482 /* The range to delete encompasses our entire range of bytes. Return NULL to indicate that we should be deleted. */
486 /* Need to create a new node */
487 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, storage
, true, remainingBytes
);
488 if (node
->info
.leaf
.memory
) {
489 /* Our node had memory allocated, so copy in the non-deleted portion */
490 CFRange nonDeletedPrefix
= CFRangeMake(0, range
.location
);
491 CFRange nonDeletedSuffix
= CFRangeMake(rangeToDeleteEnd
, node
->numBytes
- rangeToDeleteEnd
);
492 ASSERT(nonDeletedPrefix
.length
+ nonDeletedSuffix
.length
== remainingBytes
);
493 __CFStorageAllocLeafNodeMemory(allocator
, storage
, newNode
, remainingBytes
, false); // no point in compacting since we're freshly allocated
494 __CFLeafCopyRangeToOffset(node
, nonDeletedPrefix
, newNode
, 0);
495 __CFLeafCopyRangeToOffset(node
, nonDeletedSuffix
, newNode
, nonDeletedPrefix
.length
);
501 /* Helper function for both frozen and unfrozen branch deletion. Walks the children of the node, calling __CFStorageDelete (or __CFStorageDeleteFrozen if childrenAreDefinitelyFrozen is YES), and assigning the results back to newChildren. Returns the number of new children. The newChildren nodes all acquire a reference count!
503 static inline CFIndex
__CFStoragePopulateBranchChildrenAfterDeletion(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFRange range
, CFStorageNode
*newChildren
[3], bool childrenAreDefinitelyFrozen
, bool compact
) {
504 CFIndex newChildIndex
= 0;
505 CFIndex childByteOffset
= 0; //will track the current start byte of this child
506 for (CFIndex existingChildIndex
= 0; existingChildIndex
< 3; existingChildIndex
++) {
507 CFStorageNode
*existingChild
= node
->info
.notLeaf
.child
[existingChildIndex
];
508 if (! existingChild
) break; //no more children
509 const CFIndex existingChildLength
= existingChild
->numBytes
;
510 /* The child's range is {byteOffset, existingChildLength}. Figure out what part of the range to delete is intersected by this child's range */
511 CFRange deletionRangeIntersectedByChild
= intersectionRange(range
, CFRangeMake(childByteOffset
, existingChildLength
));
512 if (! deletionRangeIntersectedByChild
.length
) {
513 /* The range to delete does not overlap this child's range, so preserve the child */
514 newChildren
[newChildIndex
++] = __CFStorageRetainNode(existingChild
); //bump the refcount like we promised we would
515 if (childrenAreDefinitelyFrozen
) {
516 /* Because we are about to add this child from a frozen node to a possibly unfrozen node, mark the child as frozen */
517 __CFStorageFreezeNode(existingChild
);
521 /* We have something from this child to delete */
522 CFRange rangeOfChildToDelete
= CFRangeMake(deletionRangeIntersectedByChild
.location
- childByteOffset
, deletionRangeIntersectedByChild
.length
);
523 CFStorageNode
*newChild
;
524 if (childrenAreDefinitelyFrozen
) {
525 newChild
= __CFStorageDeleteFrozen(allocator
, storage
, existingChild
, rangeOfChildToDelete
);
528 newChild
= __CFStorageDelete(allocator
, storage
, existingChild
, rangeOfChildToDelete
, compact
);
530 /* We may get null back if we deleted the entire child */
531 if (newChild
!= NULL
) {
532 newChildren
[newChildIndex
++] = newChild
; // Transfers the reference count
535 if (rangeOfChildToDelete
.length
== existingChildLength
) {
536 ASSERT(newChild
== NULL
); //should have deleted everything
539 ASSERT(newChild
!= NULL
);
540 ASSERT(newChild
->numBytes
== existingChildLength
- rangeOfChildToDelete
.length
);
543 childByteOffset
+= existingChildLength
;
545 return newChildIndex
;
548 static CFStorageNode
*__CFStorageDeleteBranchFrozen(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFRange range
) {
549 ASSERT(! node
->isLeaf
);
550 ASSERT(range
.location
+ range
.length
<= node
->numBytes
);
551 if (range
.length
== node
->numBytes
) {
552 /* They're deleting everything, so return NULL to indicate that this node should be deleted. */
556 /* Construct our new children in this array. */
557 CFStorageNode
*newChildren
[3];
558 CFIndex newChildIndex
= __CFStoragePopulateBranchChildrenAfterDeletion(allocator
, storage
, node
, range
, newChildren
, true/*childrenAreDefinitelyFrozen*/, false/*compact*/);
560 /* We do not have to freeze anything in newChildren. __CFStoragePopulateBranchChildrenAfterDeletion() will properly freeze any existing children, and new children we get should not be marked frozen. */
562 /* Now we have the children of the new node in newChildren. We expect to have at least one child (if we got no children, we should have returned NULL up above because they deleted everything. */
563 ASSERT(newChildIndex
>= 1);
564 if (newChildIndex
== 1) {
565 /* Only one child, so just return it, transferring its retain count */
566 return newChildren
[0];
569 CFStorageNode
*result
= __CFStorageCreateNode(allocator
, storage
, false, 0);
570 while (newChildIndex
--) {
571 __CFStorageSetChild(result
, newChildIndex
, newChildren
[newChildIndex
]); //transfers the reference count
573 result
->numBytes
= node
->numBytes
- range
.length
;
578 /* Returns a new node, or NULL if the entire thing was deleted.
580 static CFStorageNode
*__CFStorageDeleteFrozen(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFRange range
) {
582 return __CFStorageDeleteLeafFrozen(allocator
, storage
, node
, range
);
585 return __CFStorageDeleteBranchFrozen(allocator
, storage
, node
, range
);
589 #pragma mark Unfrozen Deletion
591 /* The boolean compact indicates whether leaf nodes that get smaller should be realloced.
593 static CFStorageNode
*__CFStorageDeleteUnfrozen(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFRange range
, bool compact
, bool isRootNode
) {
594 ASSERT(! node
->isFrozen
);
595 ASSERT(range
.location
+ range
.length
<= node
->numBytes
);
596 CHECK_NODE_INTEGRITY(node
);
598 if (range
.length
== node
->numBytes
) {
599 /* We are deleting everything, so return NULL */
604 node
->numBytes
-= range
.length
;
605 // If this node had memory allocated, readjust the bytes...
606 if (node
->info
.leaf
.memory
) {
607 COPYMEM(node
->info
.leaf
.memory
+ range
.location
+ range
.length
, node
->info
.leaf
.memory
+ range
.location
, node
->numBytes
- range
.location
);
608 if (compact
) __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, node
->numBytes
, true);
610 CHECK_NODE_INTEGRITY(node
);
611 return __CFStorageRetainNodeThreadUnsafe(node
); //we can use the ThreadUnsafe calls because this is the Unfrozen variant, so we are not shared
613 CFStorageNode
*newChildren
[3] = {NULL
, NULL
, NULL
};
614 CFIndex newChildIndex
= __CFStoragePopulateBranchChildrenAfterDeletion(allocator
, storage
, node
, range
, newChildren
, false/*childrenAreDefinitelyFrozen*/, compact
);
615 node
->numBytes
-= range
.length
;
616 ASSERT(newChildIndex
>= 1); //we expect to have at least one child; else we would have deleted everything up above
618 /* Release all of our existing children. Either we are about to return a new child in place of us; or we are about to set our children to the new ones */
619 __CFStorageReleaseNode(storage
, node
->info
.notLeaf
.child
[0]);
620 __CFStorageReleaseNodeWithNullCheck(storage
, node
->info
.notLeaf
.child
[1]);
621 __CFStorageReleaseNodeWithNullCheck(storage
, node
->info
.notLeaf
.child
[2]);
622 node
->info
.notLeaf
.child
[0] = node
->info
.notLeaf
.child
[1] = node
->info
.notLeaf
.child
[2] = NULL
;
624 if (newChildIndex
== 1) {
625 /* We have only one child, so return it, transferring the refcount that __CFStoragePopulate gives it */
626 return newChildren
[0];
630 for (i
=0; i
< 3; i
++) {
631 __CFStorageSetChild(node
, i
, newChildren
[i
]); //set the new child, transferring the refcount to us (or set NULL)
633 CHECK_NODE_INTEGRITY(node
);
634 return __CFStorageRetainNodeThreadUnsafe(node
);
639 #pragma mark Frozen Insertion
641 /* Insertion into an frozen leaf. We return two nodes, either of which may be 'node', or possibly two new nodes. This always sets the cache. */
642 static CFStorageDoubleNodeReturn
__CFStorageInsertLeafFrozen(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
643 /* Inserting into a frozen leaf. If we can fit the new data along with our existing data into a single node, then do so (i.e. if we can return one node, do it). Otherwise, all of the data would have to fit into a second node (we are never called with more data than storage->maxLeafCapacity) so just make a new node with the data and return that. */
644 CFStorageNode
*leftResult
, *rightResult
;
645 CHECK_NODE_INTEGRITY(node
);
646 ASSERT(byteNum
<= node
->numBytes
);
647 CFIndex newTotalSize
= size
+ node
->numBytes
;
648 if (newTotalSize
<= storage
->maxLeafCapacity
) {
649 /* We can fit into a single node */
651 leftResult
= __CFStorageCreateNode(allocator
, storage
, true, newTotalSize
);
652 if (node
->info
.leaf
.memory
!= NULL
) { // Beware lazy memory allocation
653 __CFStorageAllocLeafNodeMemory(allocator
, storage
, leftResult
, newTotalSize
, false);
654 COPYMEM(node
->info
.leaf
.memory
, leftResult
->info
.leaf
.memory
, byteNum
); //copy first byteNum bytes from existing node
655 //middle we don't touch
656 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, leftResult
->info
.leaf
.memory
+ byteNum
+ size
, node
->numBytes
- byteNum
); //copy last part from existing node
658 __CFStorageSetCache(storage
, leftResult
, absoluteByteNum
- byteNum
);
661 /* We cannot fit into a single node. See if we can preserve self (i.e. we're inserting at beginning or end). */
662 if (byteNum
== node
->numBytes
) {
663 /* Inserting at end, so left is our existing node and right is the new node. Do not retain left node, because it is the same as the given node */
664 leftResult
= (CFStorageNode
*)node
;
665 rightResult
= __CFStorageCreateNode(allocator
, storage
, true, size
);
666 __CFStorageSetCache(storage
, rightResult
, absoluteByteNum
);
668 else if (byteNum
== 0) {
669 /* Inserting at beginning, so right is our existing node and left is the new node. Do retain left node, because it is different than the given node. */
670 rightResult
= __CFStorageRetainNode((CFStorageNode
*)node
);
671 leftResult
= __CFStorageCreateNode(allocator
, storage
, true, size
);
672 __CFStorageSetCache(storage
, leftResult
, absoluteByteNum
);
675 /* Inserting in middle. We will need to create two nodes because we overflow one. We could be lazy and only allocate up to byteNum, but since it's likely that they're about to insert into us and we'd have to reallocate, just allocate everything requested up front. */
676 CFIndex leftAmount
= storage
->maxLeafCapacity
, rightAmount
= newTotalSize
- storage
->maxLeafCapacity
;
677 leftResult
= __CFStorageCreateNode(allocator
, storage
, true, leftAmount
);
678 rightResult
= __CFStorageCreateNode(allocator
, storage
, true, rightAmount
);
679 __CFStorageAllocLeafNodeMemory(allocator
, storage
, leftResult
, leftAmount
, false);
680 __CFStorageAllocLeafNodeMemory(allocator
, storage
, rightResult
, rightAmount
, false);
681 ASSERT(node
->info
.leaf
.capacityInBytes
>= node
->numBytes
);
683 /* The existing node has length node->numBytes, so it has the following range: {0, node->numBytes}
685 We are inserting (garbage) data of length 'size' into offset 'byteNum'. Therefore we end up with the following two logical ranges, expressed as {start, length}:
686 {0, byteNum}, {byteNum + size, node->numBytes - byteNum}
688 We need to divide these among our new nodes with the following logical ranges:
689 {0, leftAmount}, {leftAmount, rightAmount}
691 The first range must be fit entirely within the left node (byteNum <= leftAmount). The second range may or may be divided between the left and right nodes.
694 ASSERT(byteNum
<= leftAmount
);
695 COPYMEM(node
->info
.leaf
.memory
, leftResult
->info
.leaf
.memory
, byteNum
);
697 const CFRange leftNodeRange
= {0, leftAmount
}, rightNodeRange
= {leftAmount
, rightAmount
};
698 const CFRange preservedData
= {byteNum
+ size
, node
->numBytes
- byteNum
};
700 if ((overlap
= intersectionRange(leftNodeRange
, preservedData
)).length
> 0) COPYMEM(node
->info
.leaf
.memory
+ overlap
.location
- size
, leftResult
->info
.leaf
.memory
+ overlap
.location
, overlap
.length
);
701 if ((overlap
= intersectionRange(rightNodeRange
, preservedData
)).length
> 0) COPYMEM(node
->info
.leaf
.memory
+ overlap
.location
- size
, rightResult
->info
.leaf
.memory
+ overlap
.location
- leftAmount
, overlap
.length
);
702 __CFStorageSetCache(storage
, leftResult
, absoluteByteNum
- byteNum
);
705 return CFStorageDoubleNodeReturnMake(leftResult
, rightResult
);
708 static CFStorageDoubleNodeReturn
__CFStorageInsertBranchFrozen(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
709 /* Inserting into a frozen branch. We definitely will need to make a new copy of us, so make that up front. We may or may not need to make a new sibling. Note that in some cases, we may be able to get away with not creating a new copy of us, e.g. inserting at the very end of the tree. In that case, we could preserve us and make a sibling containing exactly one node. However, we do not really want to have branches with exactly one child; because then why not just return the child? And then the whole tree can become unbalanced. So then instead, always distribute the children equally among our nodes. */
710 CHECK_NODE_INTEGRITY(node
);
711 CFStorageNode
*copyOfMe
= __CFStorageCreateNode(allocator
, storage
, false, 0);
712 CFStorageNode
*siblingOfMe
= NULL
;
713 CFIndex relativeByteNum
;
715 CFStorageNode
*child
= __CFStorageFindChild(node
, byteNum
, true, &childNum
, &relativeByteNum
);
716 ASSERT(childNum
>= 0 && childNum
<= 2);
717 CFStorageDoubleNodeReturn childReturn
= __CFStorageInsertFrozen(allocator
, storage
, child
, relativeByteNum
, size
, absoluteByteNum
);
718 ASSERT(childReturn
.child
); //we always get at least one back
720 /* Make a local array of all new children (retained). We'll then move them to the new nodes. */
721 CFStorageNode
*newChildren
[4] = {NULL
};
722 __CFStorageGetChildren(node
, newChildren
, true/*retain*/, true/*freeze*/);
723 if (newChildren
[childNum
] != childReturn
.child
) {
724 __CFStorageReleaseNode(storage
, newChildren
[childNum
]);
725 newChildren
[childNum
] = childReturn
.child
; // Transfers the retain
727 if (childReturn
.sibling
!= NULL
) {
728 if (childNum
< 2) newChildren
[3] = newChildren
[2];
729 if (childNum
< 1) newChildren
[2] = newChildren
[1];
730 newChildren
[childNum
+ 1] = childReturn
.sibling
; // Transfers the retain
733 /* First two always go to our clone */
734 __CFStorageSetChild(copyOfMe
, 0, newChildren
[0]);
735 __CFStorageSetChild(copyOfMe
, 1, newChildren
[1]);
736 if (newChildren
[3] == NULL
) {
737 /* We have three or fewer children to distribute, i.e. we don't need a sibling. Put them all into copy of me. Our clone's byte count is larger than our own by 'size'. */
738 __CFStorageSetChild(copyOfMe
, 2, newChildren
[2]);
739 copyOfMe
->numBytes
= node
->numBytes
+ size
;
742 /* We have four children to distribute. The first two go to us, the last two go to our new sibling. */
743 siblingOfMe
= __CFStorageCreateNode(allocator
, storage
, false, 0);
744 __CFStorageSetChild(siblingOfMe
, 0, newChildren
[2]);
745 __CFStorageSetChild(siblingOfMe
, 1, newChildren
[3]);
746 copyOfMe
->numBytes
= newChildren
[0]->numBytes
+ newChildren
[1]->numBytes
;
747 siblingOfMe
->numBytes
= newChildren
[2]->numBytes
+ newChildren
[3]->numBytes
;
749 CHECK_NODE_INTEGRITY(node
);
750 CHECK_NODE_INTEGRITY(copyOfMe
);
751 if (siblingOfMe
) CHECK_NODE_INTEGRITY(siblingOfMe
);
752 return CFStorageDoubleNodeReturnMake(copyOfMe
, siblingOfMe
);
755 static CFStorageDoubleNodeReturn
__CFStorageInsertFrozen(CFAllocatorRef allocator
, CFStorageRef storage
, const CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
757 return __CFStorageInsertLeafFrozen(allocator
, storage
, node
, byteNum
, size
, absoluteByteNum
);
760 return __CFStorageInsertBranchFrozen(allocator
, storage
, node
, byteNum
, size
, absoluteByteNum
);
765 #pragma mark Unfrozen Insertion
767 /* Insertion into an unfrozen leaf. We return two nodes, one of which is 'node'. This always sets the cache. */
768 static CFStorageDoubleNodeReturn
__CFStorageInsertLeafUnfrozen(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
769 if (size
+ node
->numBytes
> storage
->maxLeafCapacity
) { // Need to create more child nodes
770 CFStorageNode
*newNode
;
771 if (byteNum
== node
->numBytes
) { // Inserting at end; easy...
772 newNode
= __CFStorageCreateNode(allocator
, storage
, true, size
);
773 __CFStorageSetCache(storage
, newNode
, absoluteByteNum
);
774 } else if (byteNum
== 0) { // Inserting at front; also easy, but we need to swap node and newNode
775 newNode
= __CFStorageCreateNode(allocator
, storage
, true, 0);
777 /* Transfer our memory to the new node */
778 newNode
->numBytes
= node
->numBytes
;
779 newNode
->info
.leaf
.capacityInBytes
= node
->info
.leaf
.capacityInBytes
;
780 __CFAssignWithWriteBarrier((void **)&newNode
->info
.leaf
.memory
, node
->info
.leaf
.memory
);
782 /* Stomp on our existing node */
783 node
->numBytes
= size
;
784 node
->info
.leaf
.capacityInBytes
= 0;
785 node
->info
.leaf
.memory
= NULL
;
787 /* Cache our existing node */
788 __CFStorageSetCache(storage
, node
, absoluteByteNum
);
789 } else if (byteNum
+ size
<= storage
->maxLeafCapacity
) { // Inserting at middle; inserted region will fit into existing child
790 // Create new node to hold the overflow
791 newNode
= __CFStorageCreateNode(allocator
, storage
, true, node
->numBytes
- byteNum
);
792 if (node
->info
.leaf
.memory
) { // We allocate memory lazily...
793 __CFStorageAllocLeafNodeMemory(allocator
, storage
, newNode
, node
->numBytes
- byteNum
, false);
794 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, newNode
->info
.leaf
.memory
, node
->numBytes
- byteNum
);
795 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, byteNum
+ size
, false);
797 node
->numBytes
= byteNum
+ size
;
798 __CFStorageSetCache(storage
, node
, absoluteByteNum
- byteNum
);
799 } else { // Inserting some of new into one node, rest into another; remember that the assumption is size <= storage->maxLeafCapacity
800 newNode
= __CFStorageCreateNode(allocator
, storage
, true, node
->numBytes
+ size
- storage
->maxLeafCapacity
); // New stuff
801 if (node
->info
.leaf
.memory
) { // We allocate memory lazily...
802 __CFStorageAllocLeafNodeMemory(allocator
, storage
, newNode
, node
->numBytes
+ size
- storage
->maxLeafCapacity
, false);
803 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, newNode
->info
.leaf
.memory
+ byteNum
+ size
- storage
->maxLeafCapacity
, node
->numBytes
- byteNum
);
804 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, storage
->maxLeafCapacity
, false);
806 __CFStorageSetCache(storage
, node
, absoluteByteNum
- byteNum
);
807 node
->numBytes
= storage
->maxLeafCapacity
;
809 return CFStorageDoubleNodeReturnMake(node
, newNode
); // We do not retain 'node' because it is the given node.
810 } else { // No need to create new nodes!
811 if (node
->info
.leaf
.memory
) {
812 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, node
->numBytes
+ size
, false);
813 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, node
->info
.leaf
.memory
+ byteNum
+ size
, node
->numBytes
- byteNum
);
815 node
->numBytes
+= size
;
816 __CFStorageSetCache(storage
, node
, absoluteByteNum
- byteNum
);
817 return CFStorageDoubleNodeReturnMake(node
, NULL
); //return the existing node, meaning the parent does not have to do anything. Do not retain it because it is the given node.
821 static CFStorageDoubleNodeReturn
__CFStorageInsertBranchUnfrozen(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
822 CFIndex relativeByteNum
;
823 CFIndex childNum
; // we will insert after childNum, i.e. if childNum is 0, any new node becomes index 1. This can have value 0, 1, or 2.
824 CFStorageNode
*childNode
= __CFStorageFindChild(node
, byteNum
, true, &childNum
, &relativeByteNum
);
825 CFStorageDoubleNodeReturn newNodes
= __CFStorageInsert(allocator
, storage
, childNode
, relativeByteNum
, size
, absoluteByteNum
);
826 CFStorageDoubleNodeReturn result
= {node
, NULL
}; // the default return value meaning we did all of the work ourselves and our parent does not need to do anything
827 ASSERT(childNode
!= NULL
);
828 ASSERT(newNodes
.child
!= NULL
);
830 if (newNodes
.child
!= childNode
) {
831 /* We got back a replacement for the current child, so replace it. */
832 __CFStorageReleaseNode(storage
, childNode
);
833 __CFStorageSetChild(node
, childNum
, newNodes
.child
);
836 if (newNodes
.sibling
== NULL
) {
837 /* The insertion happened successfully without requiring us to add any new nodes. */
838 node
->numBytes
+= size
;
840 /* We got back an additional node to insert. */
841 CFStorageNode
*newChild
= newNodes
.sibling
;
842 if (node
->info
.notLeaf
.child
[2] == NULL
) { // There's an empty slot for the new node, cool
843 if (childNum
== 0) __CFStorageSetChild(node
, 2, node
->info
.notLeaf
.child
[1]); // Make room
844 __CFStorageSetChild(node
, childNum
+1, newChild
);
845 node
->numBytes
+= size
;
847 CFStorageNode
*anotherNode
= __CFStorageCreateNode(allocator
, storage
, false, 0); // Create another node
848 if (childNum
== 0) { // Last two children go to new node
849 __CFStorageSetChild(anotherNode
, 0, node
->info
.notLeaf
.child
[1]);
850 __CFStorageSetChild(anotherNode
, 1, node
->info
.notLeaf
.child
[2]);
851 __CFStorageSetChild(node
, 1, newChild
);
852 node
->info
.notLeaf
.child
[2] = NULL
;
853 } else if (childNum
== 1) { // Last child goes to new node
854 __CFStorageSetChild(anotherNode
, 0, newChild
);
855 __CFStorageSetChild(anotherNode
, 1, node
->info
.notLeaf
.child
[2]);
856 node
->info
.notLeaf
.child
[2] = NULL
;
857 } else { // New node contains the new comers...
858 __CFStorageSetChild(anotherNode
, 0, node
->info
.notLeaf
.child
[2]);
859 __CFStorageSetChild(anotherNode
, 1, newChild
);
860 node
->info
.notLeaf
.child
[2] = NULL
;
862 node
->numBytes
= node
->info
.notLeaf
.child
[0]->numBytes
+ node
->info
.notLeaf
.child
[1]->numBytes
;
863 anotherNode
->numBytes
= anotherNode
->info
.notLeaf
.child
[0]->numBytes
+ anotherNode
->info
.notLeaf
.child
[1]->numBytes
;
864 /* node should not be retained because it is the passed in node. anotherNode was created so we transfer its retain count */
865 result
.sibling
= anotherNode
;
873 /* Returns NULL or additional node to come after this node
874 Assumption: size is never > storage->maxLeafCapacity
876 static CFStorageDoubleNodeReturn
__CFStorageInsertUnfrozen(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
877 ASSERT(! node
->isFrozen
);
879 return __CFStorageInsertLeafUnfrozen(allocator
, storage
, node
, byteNum
, size
, absoluteByteNum
);
881 return __CFStorageInsertBranchUnfrozen(allocator
, storage
, node
, byteNum
, size
, absoluteByteNum
);
885 #pragma mark Frozen or Unfrozen Dispatch Functions
887 static CFStorageDoubleNodeReturn
__CFStorageInsert(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
888 if (node
->isFrozen
&& ! __CFStorageThawNodeDuringMutation(storage
, node
)) {
889 return __CFStorageInsertFrozen(allocator
, storage
, node
, byteNum
, size
, absoluteByteNum
);
892 return __CFStorageInsertUnfrozen(allocator
, storage
, node
, byteNum
, size
, absoluteByteNum
);
896 static CFStorageNode
*__CFStorageDelete(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFRange range
, bool compact
) {
897 if (node
->isFrozen
&& ! __CFStorageThawNodeDuringMutation(storage
, node
)) {
898 return __CFStorageDeleteFrozen(allocator
, storage
, node
, range
);
901 return __CFStorageDeleteUnfrozen(allocator
, storage
, node
, range
, compact
, false/*isRootNode*/);
906 #pragma mark Utility functions
908 CF_INLINE CFIndex
__CFStorageGetCount(CFStorageRef storage
) {
909 return __CFStorageConvertByteToValue(storage
, storage
->rootNode
.numBytes
);
912 static Boolean
__CFStorageEqual(CFTypeRef cf1
, CFTypeRef cf2
) {
913 CFStorageRef storage1
= (CFStorageRef
)cf1
;
914 CFStorageRef storage2
= (CFStorageRef
)cf2
;
915 CFIndex loc
, count
, valueSize
;
916 CFRange range1
, range2
;
917 uint8_t *ptr1
, *ptr2
;
919 count
= __CFStorageGetCount(storage1
);
920 if (count
!= __CFStorageGetCount(storage2
)) return false;
922 valueSize
= __CFStorageGetValueSize(storage1
);
923 if (valueSize
!= __CFStorageGetValueSize(storage2
)) return false;
925 loc
= range1
.location
= range1
.length
= range2
.location
= range2
.length
= 0;
928 while (loc
< count
) {
930 if (loc
>= range1
.location
+ range1
.length
) ptr1
= (uint8_t *)CFStorageGetValueAtIndex(storage1
, loc
, &range1
);
931 if (loc
>= range2
.location
+ range2
.length
) ptr2
= (uint8_t *)CFStorageGetValueAtIndex(storage2
, loc
, &range2
);
932 cntThisTime
= range1
.location
+ range1
.length
;
933 if (range2
.location
+ range2
.length
< cntThisTime
) cntThisTime
= range2
.location
+ range2
.length
;
935 if (memcmp(ptr1
, ptr2
, valueSize
* cntThisTime
) != 0) return false;
936 ptr1
+= valueSize
* cntThisTime
;
937 ptr2
+= valueSize
* cntThisTime
;
943 static CFHashCode
__CFStorageHash(CFTypeRef cf
) {
944 CFStorageRef Storage
= (CFStorageRef
)cf
;
945 return __CFStorageGetCount(Storage
);
948 static void __CFStorageDescribeNode(CFStorageNode
*node
, CFMutableStringRef str
, CFIndex level
) {
950 for (cnt
= 0; cnt
< level
; cnt
++) CFStringAppendCString(str
, " ", CFStringGetSystemEncoding());
953 CFStringAppendFormat(str
, NULL
, CFSTR("Leaf %ld/%ld (%p) refcount: %u frozen: %s\n"), node
->numBytes
, node
->info
.leaf
.capacityInBytes
, node
, node
->refCount
, node
->isFrozen
? "yes" : "no");
955 CFStringAppendFormat(str
, NULL
, CFSTR("Node %ld (%p) refcount: %u frozen: %s\n"), node
->numBytes
, node
, node
->refCount
, node
->isFrozen
? "yes" : "no");
956 for (cnt
= 0; cnt
< 3; cnt
++) if (node
->info
.notLeaf
.child
[cnt
]) __CFStorageDescribeNode(node
->info
.notLeaf
.child
[cnt
], str
, level
+1);
960 static CFIndex
__CFStorageGetNodeCapacity(CFStorageNode
*node
) {
962 if (node
->isLeaf
) return node
->info
.leaf
.capacityInBytes
;
963 return __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[0]) + __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[1]) + __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[2]);
966 CFIndex
__CFStorageGetCapacity(CFStorageRef storage
) {
967 return __CFStorageConvertByteToValue(storage
, __CFStorageGetNodeCapacity(&storage
->rootNode
));
970 CFIndex
__CFStorageGetValueSize(CFStorageRef storage
) {
971 return storage
->valueSize
;
974 static CFStringRef
__CFStorageCopyDescription(CFTypeRef cf
) {
975 CFStorageRef storage
= (CFStorageRef
)cf
;
976 CFMutableStringRef result
;
977 CFAllocatorRef allocator
= CFGetAllocator(storage
);
978 result
= CFStringCreateMutable(allocator
, 0);
979 CFStringAppendFormat(result
, NULL
, CFSTR("<CFStorage %p [%p]>[count = %u, capacity = %u]\n"), storage
, allocator
, __CFStorageGetCount(storage
), __CFStorageGetCapacity(storage
));
980 __CFStorageDescribeNode(&storage
->rootNode
, result
, 0);
984 /* Returns true if enumeration should stop, false if it should continue. */
985 static bool __CFStorageEnumerateNodesInByteRangeWithBlock(CFStorageRef storage
, CFStorageNode
*node
, CFIndex globalOffsetOfNode
, CFRange range
, CFIndex concurrencyToken
, CFStorageApplierBlock applier
) {
988 CFIndex start
= range
.location
;
989 CFIndex length
= __CFMin(range
.length
, node
->numBytes
- start
);
990 if (! node
->info
.leaf
.memory
) {
991 __CFStorageAllocLeafNodeMemory(CFGetAllocator(storage
), storage
, node
, node
->numBytes
, false);
993 applier(node
->info
.leaf
.memory
+ start
, __CFStorageConvertBytesToValueRange(storage
, globalOffsetOfNode
+ start
, length
), &stop
);
996 CFStorageNode
*children
[3] = {node
->info
.notLeaf
.child
[0], node
->info
.notLeaf
.child
[1], node
->info
.notLeaf
.child
[2]};
997 const CFIndex lengths
[3] = {children
[0]->numBytes
, children
[1] ? children
[1]->numBytes
: 0, children
[2] ? children
[2]->numBytes
: 0};
998 const CFIndex offsets
[3] = {0, lengths
[0], lengths
[0] + lengths
[1]};
999 const CFRange overlaps
[3] = {intersectionRange(CFRangeMake(offsets
[0], lengths
[0]), range
), intersectionRange(CFRangeMake(offsets
[1], lengths
[1]), range
), intersectionRange(CFRangeMake(offsets
[2], lengths
[2]), range
)};
1000 CFIndex numOverlappingChildren
= (!! overlaps
[0].length
+ !! overlaps
[1].length
+ !! overlaps
[2].length
);
1001 if (numOverlappingChildren
> 1) concurrencyToken
--;
1003 if (concurrencyToken
>= 0 && numOverlappingChildren
> 1) {
1004 CFIndex numChildren
= 1 + !!children
[1] + !!children
[2];
1005 const CFRange
* overlapsPtr
= overlaps
; //blocks don't let us reference arrays :(
1006 const CFIndex
* offsetsPtr
= offsets
;
1007 CFStorageNode
** childrenPtr
= children
;
1008 #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_WINDOWS
1009 __block
bool blockStop
= false;
1010 dispatch_apply(numChildren
, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0), ^(size_t ind
) {
1011 if (! blockStop
&& overlapsPtr
[ind
].length
> 0) {
1012 if (__CFStorageEnumerateNodesInByteRangeWithBlock(storage
, childrenPtr
[ind
], globalOffsetOfNode
+ offsetsPtr
[ind
], CFRangeMake(overlapsPtr
[ind
].location
- offsetsPtr
[ind
], overlapsPtr
[ind
].length
), concurrencyToken
, applier
)) {
1019 for (CFIndex ind
= 0; ind
< numChildren
; ind
++) {
1020 if (overlapsPtr
[ind
].length
> 0) {
1021 if (__CFStorageEnumerateNodesInByteRangeWithBlock(storage
, childrenPtr
[ind
], globalOffsetOfNode
+ offsetsPtr
[ind
], CFRangeMake(overlapsPtr
[ind
].location
- offsetsPtr
[ind
], overlapsPtr
[ind
].length
), concurrencyToken
, applier
)) {
1029 if (overlaps
[0].length
> 0) {
1030 stop
= stop
|| __CFStorageEnumerateNodesInByteRangeWithBlock(storage
, children
[0], globalOffsetOfNode
+ offsets
[0], CFRangeMake(overlaps
[0].location
- offsets
[0], overlaps
[0].length
), concurrencyToken
, applier
);
1032 if (overlaps
[1].length
> 0) {
1033 stop
= stop
|| __CFStorageEnumerateNodesInByteRangeWithBlock(storage
, children
[1], globalOffsetOfNode
+ offsets
[1], CFRangeMake(overlaps
[1].location
- offsets
[1], overlaps
[1].length
), concurrencyToken
, applier
);
1035 if (overlaps
[2].length
> 0) {
1036 stop
= stop
|| __CFStorageEnumerateNodesInByteRangeWithBlock(storage
, children
[2], globalOffsetOfNode
+ offsets
[2], CFRangeMake(overlaps
[2].location
- offsets
[2], overlaps
[2].length
), concurrencyToken
, applier
);
1043 static CFStorageNode
*_CFStorageFindNodeContainingByteRange(ConstCFStorageRef storage
, const CFStorageNode
*node
, CFRange nodeRange
, CFIndex globalOffsetOfNode
, CFRange
*outGlobalByteRangeOfResult
) {
1044 if (! node
->isLeaf
) {
1045 /* See how many children are overlapped by this range. If it's only 1, call us recursively on that node; otherwise we're it! */
1046 CFStorageNode
*children
[3] = {node
->info
.notLeaf
.child
[0], node
->info
.notLeaf
.child
[1], node
->info
.notLeaf
.child
[2]};
1047 const CFIndex lengths
[3] = {children
[0]->numBytes
, children
[1] ? children
[1]->numBytes
: 0, children
[2] ? children
[2]->numBytes
: 0};
1048 const CFIndex offsets
[3] = {0, lengths
[0], lengths
[0] + lengths
[1]};
1049 const CFRange overlaps
[3] = {intersectionRange(CFRangeMake(offsets
[0], lengths
[0]), nodeRange
), intersectionRange(CFRangeMake(offsets
[1], lengths
[1]), nodeRange
), intersectionRange(CFRangeMake(offsets
[2], lengths
[2]), nodeRange
)};
1050 CFIndex numOverlappingChildren
= (!! overlaps
[0].length
+ !! overlaps
[1].length
+ !! overlaps
[2].length
);
1051 ASSERT(numOverlappingChildren
> 0);
1052 if (numOverlappingChildren
== 1) {
1053 CFIndex overlappingChild
= (overlaps
[0].length
? 0 : (overlaps
[1].length
? 1 : 2));
1054 return _CFStorageFindNodeContainingByteRange(storage
, children
[overlappingChild
], CFRangeMake(nodeRange
.location
- offsets
[overlappingChild
], nodeRange
.length
), globalOffsetOfNode
+ offsets
[overlappingChild
], outGlobalByteRangeOfResult
);
1058 /* Either we are a leaf, in which case we contain the range, or we are a branch with multiple overlapping children. Either way, we are the minimum node containing the range in question. */
1059 *outGlobalByteRangeOfResult
= CFRangeMake(globalOffsetOfNode
, node
->numBytes
);
1060 return (CFStorageNode
*)node
;
1065 /* Frees all memory associated with the root node, effectively emptying the CFStorage */
1066 static void __CFStorageClearRootNode(CFStorageRef storage
) {
1067 CFAllocatorRef allocator
= CFGetAllocator(storage
);
1068 /* Have to release our children if we are a branch, or free our memory if we are a leaf */
1069 if (storage
->rootNode
.isLeaf
) {
1070 _CFAllocatorDeallocateGC(allocator
, storage
->rootNode
.info
.leaf
.memory
);
1073 __CFStorageReleaseNodeWithNullCheck(storage
, storage
->rootNode
.info
.notLeaf
.child
[0]);
1074 __CFStorageReleaseNodeWithNullCheck(storage
, storage
->rootNode
.info
.notLeaf
.child
[1]);
1075 __CFStorageReleaseNodeWithNullCheck(storage
, storage
->rootNode
.info
.notLeaf
.child
[2]);
1077 storage
->rootNode
.isLeaf
= true;
1078 storage
->rootNode
.numBytes
= 0;
1079 storage
->rootNode
.info
.leaf
.capacityInBytes
= 0;
1080 storage
->rootNode
.info
.leaf
.memory
= NULL
;
1083 static void __CFStorageDeallocate(CFTypeRef cf
) {
1084 /* CFStorage is used in CFArray. Under GC, CFArray references us strongly, but not retained. Thus we may be finalized before the array. When the array itself is finalized, it will call any custom deallocate callback on all of its contents, which means it has to walk the array. Thus CFStorage must be careful to not perturb its structure in Deallocate under GC.
1086 CFStorage nodes have a reference count, and if a node has a reference count of one, and we are in a mutating function, we conclude that this CFStorage has exclusive ownership of the node, and we can treat it as mutable even if it's marked as frozen (see __CFStorageThawNodeDuringMutation). Therefore it would be nice if we could decrement our nodes' refcounts in Deallocate. However, if we did so, then another CFStorage might treat a node that we reference as mutable and modify it, which must not happen, because we must not perturb the structure of a CFStorage in Deallocate. Thus we just "leak" a reference count under GC. Of course, these reference counts don't actually keep the memory alive in GC, so it's not a real leak.
1088 CFStorageRef storage
= (CFStorageRef
)cf
;
1089 if (! CF_IS_COLLECTABLE_ALLOCATOR(CFGetAllocator(storage
))) {
1090 __CFStorageClearRootNode(storage
);
1094 static CFTypeID __kCFStorageTypeID
= _kCFRuntimeNotATypeID
;
1096 static const CFRuntimeClass __CFStorageClass
= {
1097 _kCFRuntimeScannedObject
,
1101 __CFStorageDeallocate
,
1105 __CFStorageCopyDescription
1108 __private_extern__
void __CFStorageInitialize(void) {
1109 __kCFStorageTypeID
= _CFRuntimeRegisterClass(&__CFStorageClass
);
1112 /*** Public API ***/
1114 CFStorageRef
CFStorageCreate(CFAllocatorRef allocator
, CFIndex valueSize
) {
1115 CFStorageRef storage
;
1116 CFIndex size
= sizeof(struct __CFStorage
) - sizeof(CFRuntimeBase
);
1117 storage
= (CFStorageRef
)_CFRuntimeCreateInstance(allocator
, __kCFStorageTypeID
, size
, NULL
);
1118 if (NULL
== storage
) {
1121 storage
->valueSize
= valueSize
;
1122 /* if valueSize is a power of 2, then set the shifter to the log base 2 of valueSize. Otherwise set it to NO_SHIFTER */
1123 if (valueSize
> 0 && !(valueSize
& (valueSize
- 1))) {
1124 #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED
1125 storage
->byteToValueShifter
= __builtin_ctzl(valueSize
);
1127 CFIndex tempSize
= valueSize
;
1128 storage
->byteToValueShifter
= 0;
1129 while (tempSize
> 1) {
1130 storage
->byteToValueShifter
++;
1136 storage
->byteToValueShifter
= NO_SHIFTER
;
1139 CF_SPINLOCK_INIT_FOR_STRUCTS(storage
->cacheReaderMemoryAllocationLock
);
1140 storage
->maxLeafCapacity
= __CFStorageMaxLeafCapacity
;
1141 if (valueSize
&& ((storage
->maxLeafCapacity
% valueSize
) != 0)) {
1142 storage
->maxLeafCapacity
= (storage
->maxLeafCapacity
/ valueSize
) * valueSize
; // Make it fit perfectly (3406853)
1144 memset(&(storage
->rootNode
), 0, sizeof(CFStorageNode
));
1145 storage
->rootNode
.isLeaf
= true;
1146 storage
->rootNode
.refCount
= 0;
1147 if (valueSize
>= sizeof(void *)) {
1148 storage
->nodeHint
= __kCFAllocatorGCScannedMemory
;
1151 // Don't scan nodes if the value size is smaller than a pointer (8198596)
1152 storage
->nodeHint
= 0;
1154 if (__CFOASafe
) __CFSetLastAllocationEventName(storage
, "CFStorage");
1158 CFStorageRef
CFStorageCreateWithSubrange(CFStorageRef mutStorage
, CFRange range
) {
1159 const ConstCFStorageRef storage
= mutStorage
; //we expect this to never modify the storage, so use a const variable to help enforce that
1160 CFStorageRef result
= CFStorageCreate(CFGetAllocator(storage
), storage
->valueSize
);
1162 if (range
.length
> 0) {
1163 /* Start by finding the node that contains the entire range. Bump the reference count of its children and add them to the root of our new copy. */
1164 const CFRange byteRange
= __CFStorageConvertValuesToByteRange(storage
, range
.location
, range
.length
);
1165 CFRange byteRangeOfContainingNode
;
1166 CFStorageNode
*nodeContainingEntireRange
= _CFStorageFindNodeContainingByteRange(storage
, &storage
->rootNode
, byteRange
, 0, &byteRangeOfContainingNode
);
1167 ASSERT(nodeContainingEntireRange
!= NULL
);
1169 /* If the result is a leaf, insert the portion we care about */
1170 if (nodeContainingEntireRange
->isLeaf
) {
1171 CFStorageInsertValues(result
, CFRangeMake(0, range
.length
));
1172 if (nodeContainingEntireRange
->info
.leaf
.memory
) {
1173 CFIndex offsetIntoNode
= byteRange
.location
- byteRangeOfContainingNode
.location
;
1174 ASSERT(offsetIntoNode
>= 0);
1175 CFStorageReplaceValues(result
, CFRangeMake(0, range
.length
), nodeContainingEntireRange
->info
.leaf
.memory
+ offsetIntoNode
);
1179 /* The result is not a leaf. Insert all of its children into our root. */
1180 ASSERT(byteRangeOfContainingNode
.length
== nodeContainingEntireRange
->numBytes
);
1181 result
->rootNode
.isLeaf
= false;
1182 result
->rootNode
.numBytes
= byteRangeOfContainingNode
.length
;
1183 result
->rootNode
.info
.notLeaf
.child
[0] = result
->rootNode
.info
.notLeaf
.child
[1] = result
->rootNode
.info
.notLeaf
.child
[2] = NULL
;
1184 for (CFIndex i
=0; i
< 3; i
++) {
1185 CFStorageNode
*newNode
= nodeContainingEntireRange
->info
.notLeaf
.child
[i
];
1186 if (! newNode
) break;
1187 __CFStorageFreezeNode(newNode
);
1188 __CFStorageSetChild(&result
->rootNode
, i
, __CFStorageRetainNode(newNode
));
1191 /* Now delete from the beginning or end to trim this to the right size */
1192 CFRange rangeOfContainingNode
= __CFStorageConvertBytesToValueRange(result
, byteRangeOfContainingNode
.location
, byteRangeOfContainingNode
.length
);
1193 CFIndex prefixToTrim
= range
.location
- rangeOfContainingNode
.location
;
1194 CFIndex suffixToTrim
= (rangeOfContainingNode
.location
+ rangeOfContainingNode
.length
) - (range
.location
+ range
.length
);
1195 ASSERT(prefixToTrim
>= 0);
1196 ASSERT(suffixToTrim
>= 0);
1197 ASSERT(CFStorageGetCount(result
) == rangeOfContainingNode
.length
);
1198 if (suffixToTrim
> 0) CFStorageDeleteValues(result
, CFRangeMake(rangeOfContainingNode
.length
- suffixToTrim
, suffixToTrim
));
1199 if (prefixToTrim
> 0) CFStorageDeleteValues(result
, CFRangeMake(0, prefixToTrim
));
1203 ASSERT(CFStorageGetCount(result
) == range
.length
);
1207 CFTypeID
CFStorageGetTypeID(void) {
1208 return __kCFStorageTypeID
;
1211 CFIndex
CFStorageGetCount(CFStorageRef storage
) {
1212 return __CFStorageGetCount(storage
);
1215 /* Returns pointer to the specified value
1216 index and validConsecutiveValueRange are in terms of values
1218 void *CFStorageGetValueAtIndex(CFStorageRef storage
, CFIndex idx
, CFRange
*validConsecutiveValueRange
) {
1220 return __CFStorageGetValueAtIndex(storage
, idx
, validConsecutiveValueRange
? validConsecutiveValueRange
: &dummy
, true/*requireUnfreezing*/);
1223 const void *CFStorageGetConstValueAtIndex(CFStorageRef storage
, CFIndex idx
, CFRange
*validConsecutiveValueRange
) {
1225 return __CFStorageGetValueAtIndex(storage
, idx
, validConsecutiveValueRange
? validConsecutiveValueRange
: &dummy
, false/*requireUnfreezing*/);
1229 /* Makes space for range.length values at location range.location
1230 This function deepens the tree if necessary...
1232 void CFStorageInsertValues(CFStorageRef storage
, CFRange range
) {
1233 CFIndex numBytesToInsert
= __CFStorageConvertValueToByte(storage
, range
.length
);
1234 CFIndex byteNum
= __CFStorageConvertValueToByte(storage
, range
.location
);
1235 const CFIndex expectedByteCount
= storage
->rootNode
.numBytes
+ numBytesToInsert
;
1236 const CFAllocatorRef allocator
= CFGetAllocator(storage
);
1237 const CFIndex insertionChunkSize
= storage
->maxLeafCapacity
;
1238 while (numBytesToInsert
> 0) {
1240 const CFIndex insertThisTime
= __CFMin(numBytesToInsert
, insertionChunkSize
);
1241 CFStorageDoubleNodeReturn newNodes
= __CFStorageInsertUnfrozen(allocator
, storage
, &storage
->rootNode
, byteNum
, insertThisTime
, byteNum
); //we don't have to call the frozen variant because the root node is never frozen
1242 ASSERT(newNodes
.child
== &storage
->rootNode
);// unfrozen variant should always give us our node back. We may have another node to insert in newNodes.sibling
1243 if (newNodes
.sibling
!= NULL
) {
1244 CFStorageNode
*newNode
= newNodes
.sibling
;
1245 /* Need to create a new root node. Copy our existing root node's contents to a new heap node. */
1246 CFStorageNode
*heapRoot
= __CFStorageCreateNode(allocator
, storage
, storage
->rootNode
.isLeaf
, storage
->rootNode
.numBytes
); // Will copy the (static) rootNode over to this
1247 objc_memmove_collectable(&heapRoot
->info
, &storage
->rootNode
.info
, sizeof heapRoot
->info
);
1249 /* Our root is about to become a branch. If our root node is currently a leaf, we need to clear the cache, because if the cache points at the root then the cache is about to start pointing at a branch node (which is not allowed) */
1250 if (storage
->rootNode
.isLeaf
) {
1251 __CFStorageSetCache(storage
, NULL
, 0);
1252 storage
->rootNode
.isLeaf
= false;
1255 /* Set the new children in our root. Note that it's important that we overwrite the root node's info, because we wanted to transfer the refcounts of our children (or our allocated memory, if we are a leaf) to the new heap root */
1256 __CFStorageSetChild(&storage
->rootNode
, 0, heapRoot
);
1257 __CFStorageSetChild(&storage
->rootNode
, 1, newNode
);
1258 storage
->rootNode
.info
.notLeaf
.child
[2] = NULL
;
1259 storage
->rootNode
.numBytes
= heapRoot
->numBytes
+ newNode
->numBytes
;
1261 numBytesToInsert
-= insertThisTime
;
1262 byteNum
+= insertThisTime
;
1263 ASSERT(storage
->rootNode
.numBytes
+ numBytesToInsert
== expectedByteCount
);
1265 ASSERT(expectedByteCount
== storage
->rootNode
.numBytes
);
1269 /* Deletes the values in the specified range
1270 This function gets rid of levels if necessary...
1272 void CFStorageDeleteValues(CFStorageRef storage
, CFRange range
) {
1274 CFAllocatorRef allocator
= CFGetAllocator(storage
);
1275 CFRange byteRange
= __CFStorageConvertValuesToByteRange(storage
, range
.location
, range
.length
);
1276 const CFIndex expectedByteCount
= storage
->rootNode
.numBytes
- byteRange
.length
;
1278 /* We don't try to mantain the cache across deletion */
1279 __CFStorageSetCache(storage
, NULL
, 0);
1281 /* The root node can never be frozen, so it's always OK to modify it */
1282 ASSERT(! storage
->rootNode
.isFrozen
);
1283 CFStorageNode
*newRoot
= __CFStorageDeleteUnfrozen(allocator
, storage
, &storage
->rootNode
, byteRange
, true/*compact*/, true/*isRootNode*/);
1285 /* There are three return values we can get:
1286 NULL -> everything was deleted
1287 the root node -> something was deleted, but no nodes became empty, so we don't have to replace any children
1288 a different node -> represents the new root
1290 if (newRoot
== NULL
) {
1291 __CFStorageClearRootNode(storage
);
1293 else if (newRoot
== &storage
->rootNode
) {
1294 /* No need to replace any children, nothing to do for this case */
1297 /* Got a legitimately new root back. If it is unfrozen, we can just acquire its guts. If it is frozen, we have more work to do. Note that we do not have to worry about releasing any existing children of the root, beacuse __CFStorageDeleteUnfrozen already did that. Also note that if we got a legitimately new root back, we must be a branch node, because if we were a leaf node, we would have been unfrozen and gotten ourself back. */
1298 storage
->rootNode
.numBytes
= newRoot
->numBytes
;
1299 storage
->rootNode
.isLeaf
= newRoot
->isLeaf
;
1300 bzero(&storage
->rootNode
.info
, sizeof storage
->rootNode
.info
); //be a little paranoid here
1301 if (newRoot
->isLeaf
) {
1302 if (! newRoot
->isFrozen
) {
1303 /* If the leaf is not frozen, we can just steal its memory (if any)! If it is frozen, we must copy it. */
1304 __CFAssignWithWriteBarrier((void **)&storage
->rootNode
.info
.leaf
.memory
, newRoot
->info
.leaf
.memory
);
1305 /* Clear out the old node, because we stole its memory and we don't want it to deallocate it when teh node is destroyed below. */
1306 bzero(&newRoot
->info
, sizeof newRoot
->info
);
1309 /* The leaf is frozen, so we have to copy its memory. */
1310 if (newRoot
->info
.leaf
.memory
) {
1311 __CFStorageAllocLeafNodeMemory(allocator
, storage
, &storage
->rootNode
, storage
->rootNode
.numBytes
, false);
1312 COPYMEM(newRoot
->info
.leaf
.memory
, storage
->rootNode
.info
.leaf
.memory
, newRoot
->numBytes
);
1316 /* New root is a branch. */
1317 ASSERT(newRoot
->info
.notLeaf
.child
[0] && newRoot
->info
.notLeaf
.child
[1]); //never expect to get back a node with only one child
1318 __CFStorageSetChild(&storage
->rootNode
, 0, __CFStorageRetainNode(newRoot
->info
.notLeaf
.child
[0]));
1319 __CFStorageSetChild(&storage
->rootNode
, 1, __CFStorageRetainNode(newRoot
->info
.notLeaf
.child
[1]));
1320 if (newRoot
->info
.notLeaf
.child
[2]) __CFStorageSetChild(&storage
->rootNode
, 2, __CFStorageRetainNode(newRoot
->info
.notLeaf
.child
[2]));
1323 __CFStorageReleaseNodeWithNullCheck(storage
, newRoot
); //balance the retain from __CFStorageDeleteUnfrozen
1324 ASSERT(expectedByteCount
== storage
->rootNode
.numBytes
);
1328 void CFStorageGetValues(CFStorageRef storage
, CFRange range
, void *values
) {
1330 while (range
.length
> 0) {
1332 void *storagePtr
= __CFStorageGetValueAtIndex(storage
, range
.location
, &leafRange
, false/*requireUnfreezing*/);
1333 CFIndex cntThisTime
= __CFMin(range
.length
, leafRange
.length
- (range
.location
- leafRange
.location
));
1334 CFIndex byteCntThisTime
= __CFStorageConvertValueToByte(storage
, cntThisTime
);
1335 COPYMEM(storagePtr
, values
, byteCntThisTime
);
1336 values
= (uint8_t *)values
+ byteCntThisTime
;
1337 range
.location
+= cntThisTime
;
1338 range
.length
-= cntThisTime
;
1342 unsigned long _CFStorageFastEnumeration(CFStorageRef storage
, struct __objcFastEnumerationStateEquivalent
*state
, void *stackbuffer
, unsigned long count
) {
1343 // without trying to understand the data structure, each time through search for block containing index
1345 if (state
->state
== 0) { /* first time, get length */
1346 state
->extra
[0] = __CFStorageGetCount(storage
);
1348 if (state
->state
>= state
->extra
[0]) return 0;
1349 state
->itemsPtr
= (unsigned long *)CFStorageGetValueAtIndex(storage
, state
->state
, &leafRange
);
1350 state
->state
+= leafRange
.length
;
1351 return leafRange
.length
;
1354 void CFStorageApplyFunction(CFStorageRef storage
, CFRange range
, CFStorageApplierFunction applier
, void *context
) {
1356 CFIndex valueSize
= storage
->valueSize
;
1357 CFStorageApplyBlock(storage
, range
, 0, ^(const void *storagePtr
, CFRange subrange
, bool *stop
){
1358 while (subrange
.length
--) {
1359 applier(storagePtr
, context
);
1360 storagePtr
= valueSize
+ (const char *)storagePtr
;
1365 void CFStorageApplyBlock(CFStorageRef storage
, CFRange range
, CFStorageEnumerationOptionFlags options
, CFStorageApplierBlock applier
) {
1366 if (! range
.length
) return;
1367 CFRange byteRange
= __CFStorageConvertValuesToByteRange(storage
, range
.location
, range
.length
);
1368 /* As we descend the tree, if we find we need to go down two or more children, and the concurrency token is not zero, then we decrement the concurrency token and do it concurrently. Since we have 3 children, a concurrency token of 3 yields up to 3**3 == 27 threads, which is a lot! Concurrency benefits start to kick in around one million elements */
1369 CFIndex concurrencyToken
= 0;
1370 if ((options
& kCFStorageEnumerationConcurrent
) && (range
.length
>= 1024 * 1024)) {
1371 concurrencyToken
= 3;
1373 __CFStorageEnumerateNodesInByteRangeWithBlock(storage
, &storage
->rootNode
, 0/*globalOffsetOfNode*/, byteRange
, concurrencyToken
, applier
);
1376 void CFStorageReplaceValues(CFStorageRef storage
, CFRange range
, const void *values
) {
1378 while (range
.length
> 0) {
1380 void *storagePtr
= __CFStorageGetValueAtIndex(storage
, range
.location
, &leafRange
, true/*requireUnfreezing*/);
1381 ASSERT(range
.location
>= leafRange
.location
);
1382 ASSERT(range
.location
< leafRange
.location
+ leafRange
.length
);
1383 CFIndex cntThisTime
= __CFMin(range
.length
, leafRange
.length
- (range
.location
- leafRange
.location
));
1384 CFIndex byteCntThisTime
= __CFStorageConvertValueToByte(storage
, cntThisTime
);
1385 COPYMEM(values
, storagePtr
, byteCntThisTime
);
1386 values
= (const uint8_t *)values
+ byteCntThisTime
;
1387 range
.location
+= cntThisTime
;
1388 range
.length
-= cntThisTime
;
1392 static void __CFStorageApplyNodeBlockInterior(CFStorageRef storage
, CFStorageNode
*node
, void (^block
)(CFStorageRef storage
, CFStorageNode
*node
)) {
1393 block(storage
, node
);
1394 if (! node
->isLeaf
) {
1395 CFStorageNode
*childNode
;
1396 if ((childNode
= node
->info
.notLeaf
.child
[0])) __CFStorageApplyNodeBlockInterior(storage
, childNode
, block
);
1397 if ((childNode
= node
->info
.notLeaf
.child
[1])) __CFStorageApplyNodeBlockInterior(storage
, childNode
, block
);
1398 if ((childNode
= node
->info
.notLeaf
.child
[2])) __CFStorageApplyNodeBlockInterior(storage
, childNode
, block
);
1402 static void __CFStorageApplyNodeBlock(CFStorageRef storage
, void (^block
)(CFStorageRef storage
, CFStorageNode
*node
)) {
1403 __CFStorageApplyNodeBlockInterior(storage
, &storage
->rootNode
, block
);
1406 static CFIndex
__CFStorageEstimateTotalAllocatedSize(CFStorageRef storage
) __attribute__((unused
));
1407 static CFIndex
__CFStorageEstimateTotalAllocatedSize(CFStorageRef storage
) {
1408 __block CFIndex nodeResult
= 0;
1409 __block CFIndex contentsResult
= 0;
1410 __CFStorageApplyNodeBlock(storage
, ^(CFStorageRef storage
, CFStorageNode
*node
) {
1411 if (node
!= &storage
->rootNode
) {
1412 nodeResult
+= malloc_size(node
);
1413 if (node
->isLeaf
&& node
->info
.leaf
.memory
!= NULL
) {
1414 contentsResult
+= malloc_size(node
->info
.leaf
.memory
);
1418 return nodeResult
+ contentsResult
;
1421 void __CFStorageSetAlwaysFrozen(CFStorageRef storage
, bool alwaysFrozen
) {
1422 storage
->alwaysFrozen
= alwaysFrozen
;
1425 static CFIndex
__CFStorageCheckNodeCachedLengthIntegrity(ConstCFStorageRef storage
, const CFStorageNode
*node
) {
1427 ASSERT(node
->numBytes
> 0 || node
== &storage
->rootNode
);
1428 return node
->numBytes
;
1431 CFStorageNode
*childNode
;
1432 CFIndex expectedResult
= __CFStorageCheckNodeCachedLengthIntegrity(storage
, node
->info
.notLeaf
.child
[0]);
1433 if ((childNode
= node
->info
.notLeaf
.child
[1])) {
1434 expectedResult
+= __CFStorageCheckNodeCachedLengthIntegrity(storage
, childNode
);
1435 if ((childNode
= node
->info
.notLeaf
.child
[2])) {
1436 expectedResult
+= __CFStorageCheckNodeCachedLengthIntegrity(storage
, childNode
);
1439 ASSERT(expectedResult
== node
->numBytes
);
1440 return expectedResult
;
1444 static void __CFStorageCheckNodeIntegrity(ConstCFStorageRef storage
, const CFStorageNode
*node
) {
1445 ASSERT(node
->isFrozen
== 0 || node
->isFrozen
== 1);
1447 __CFStorageCheckNodeCachedLengthIntegrity(storage
, node
);
1448 /* If we are a branch, make sure that we have at least one child, and that there is not a non-NULL child after a NULL child */
1449 if (! node
->isLeaf
) {
1450 ASSERT(node
->info
.notLeaf
.child
[0] != NULL
);
1451 if (node
->info
.notLeaf
.child
[1] == NULL
) ASSERT(node
->info
.notLeaf
.child
[2] == NULL
);
1455 static void __CFStorageCheckIntegrity(CFStorageRef storage
) {
1456 __CFStorageApplyNodeBlock(storage
, ^(CFStorageRef storage
, CFStorageNode
*node
) {
1457 __CFStorageCheckNodeIntegrity(storage
, node
);
1461 /* Used by CFArray.c */
1463 static void __CFStorageNodeSetUnscanned(CFStorageNode
*node
, auto_zone_t
*zone
) {
1465 auto_zone_set_unscanned(zone
, node
->info
.leaf
.memory
);
1467 CFStorageNode
**children
= node
->info
.notLeaf
.child
;
1468 if (children
[0]) __CFStorageNodeSetUnscanned(children
[0], zone
);
1469 if (children
[1]) __CFStorageNodeSetUnscanned(children
[1], zone
);
1470 if (children
[2]) __CFStorageNodeSetUnscanned(children
[2], zone
);
1474 __private_extern__
void _CFStorageSetWeak(CFStorageRef storage
) {
1475 storage
->nodeHint
= 0;
1476 __CFStorageNodeSetUnscanned(&storage
->rootNode
, (auto_zone_t
*)objc_collectableZone());