2 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 Copyright (c) 1999-2009, Apple Inc. All rights reserved.
26 Responsibility: Ali Ozer
30 2-3 tree storing arbitrary sized values.
32 ??? Currently elementSize cannot be greater than storage->maxLeafCapacity, which is less than or equal to __CFStorageMaxLeafCapacity
34 CFStorage is thread-safe for multiple readers, but not thread safe for simultaneous reading and writing.
37 #include "CFStorage.h"
38 #include "CFInternal.h"
40 #if !defined(PAGE_SIZE)
41 #define PAGE_SIZE 4096
44 // Above 15K, malloc switches (??? or used to in early Leopard) to using vm allocates for blocks; it seems best to avoid that.
45 // Also, tests with StorageTimer.c done in 4/07 indicate that 4096 * 3 is better than smaller or larger node sizes.
46 #define __CFStorageMaxLeafCapacity (4096 * 3)
48 // If the max length of a node is less than can fit in a half-word (half of a long), we can get away without ever caching the high half-word the cachedRange
50 #if __CFStorageMaxLeafCapacity > 0xFFFFFFFFULL
51 #define POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD 1
54 #if __CFStorageMaxLeafCapacity > 0xFFFFUL
55 #define POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD 1
59 #define COPYMEM(src,dst,n) objc_memmove_collectable((dst), (src), (n))
60 #define PAGE_LIMIT ((CFIndex)PAGE_SIZE / 2)
62 CF_INLINE
int32_t roundToPage(int32_t num
) {
63 return (num
+ PAGE_SIZE
- 1) & ~(PAGE_SIZE
- 1);
68 /* Each node in the storage. isLeaf determines whether the node is a leaf node or a node inside the tree. If latter, number of children are determined by the number of non-NULL entries in child[]. (NULL entries are at the end.)
70 typedef struct __CFStorageNode
{
71 CFIndex numBytes
; /* Number of actual bytes in this node and all its children */
75 CFIndex capacityInBytes
; // capacityInBytes is capacity of memory; this is either 0, or >= numBytes
79 struct __CFStorageNode
*child
[3];
84 /* This is the struct used to store the cache in the CFStorage; it enables us to make the cache thread-safe for multiple readers (which update the cache). The values in this cache store half the desired value in the top half, and the genCount of the writer in the low half. This cache is consistent only if all of the genCounts are the same. Note that the cache does not provide thread-safety for readers in the presence of a writer.
86 The cached range (location, length) is in terms of values; if the cached range is not (0,0), then the cached node needs to be non-NULL and pointing at a leaf node.
89 unsigned long locationHi
, locationLo
; // cachedRange.location
90 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
91 unsigned long lengthHi
;
93 unsigned long lengthLo
; // cachedRange.length; note that we actually do not bother with lengthHi if __CFStorageMaxLeafCapacity is less than half-word
94 unsigned long cachedNodeHi
, cachedNodeLo
; // cachedNode
95 } CFStorageAccessCacheParts
;
97 /* The CFStorage object.
102 CFSpinLock_t cacheReaderMemoryAllocationLock
;
103 int cacheGenerationCount
;
104 CFStorageAccessCacheParts cacheParts
;
105 CFIndex maxLeafCapacity
; // In terms of bytes
106 CFStorageNode rootNode
;
107 CFOptionFlags nodeHint
; // __kCFAllocatorGCScannedMemory or 0.
113 /* Allocates the memory and initializes the capacity in a leaf. __CFStorageAllocLeafNodeMemory() is the entry point; __CFStorageAllocLeafNodeMemoryAux is called if actual reallocation is needed. __CFStorageAllocLeafNodeMemoryAux() locks not for mutations (mutations are not thread-safe in general), but for lazy allocation of storage during reading.
115 static void __CFStorageAllocLeafNodeMemoryAux(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex cap
) {
116 __CFSpinLock(&(storage
->cacheReaderMemoryAllocationLock
));
117 __CFAssignWithWriteBarrier((void **)&node
->info
.leaf
.memory
, _CFAllocatorReallocateGC(allocator
, node
->info
.leaf
.memory
, cap
, storage
->nodeHint
)); // This will free...
118 if (__CFOASafe
) __CFSetLastAllocationEventName(node
->info
.leaf
.memory
, "CFStorage (node bytes)");
119 node
->info
.leaf
.capacityInBytes
= cap
;
120 __CFSpinUnlock(&(storage
->cacheReaderMemoryAllocationLock
));
123 CF_INLINE
void __CFStorageAllocLeafNodeMemory(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex cap
, bool compact
) {
124 if (cap
> PAGE_LIMIT
) {
125 cap
= roundToPage(cap
);
126 if (cap
> storage
->maxLeafCapacity
) cap
= storage
->maxLeafCapacity
;
128 cap
= (((cap
+ 63) / 64) * 64);
130 if (compact
? (cap
!= node
->info
.leaf
.capacityInBytes
) : (cap
> node
->info
.leaf
.capacityInBytes
)) __CFStorageAllocLeafNodeMemoryAux(allocator
, storage
, node
, cap
);
137 #define genCountMask 0x00000000FFFFFFFFUL
138 #define dataMask 0xFFFFFFFF00000000UL
139 #define shiftLowWordBy 32
141 #define genCountMask 0x0000FFFFUL
142 #define dataMask 0xFFFF0000UL
143 #define shiftLowWordBy 16
146 /* Sets the cache to point at the specified node. loc and len are in terms of values, not bytes. To clear the cache set these two to 0.
147 At least one of node or memory should be non-NULL. memory is consulted first when using the cache.
149 CF_INLINE
void __CFStorageSetCache(CFStorageRef storage
, CFStorageNode
*node
, CFIndex loc
, CFIndex len
) {
150 unsigned int genCount
= ((unsigned int)OSAtomicIncrement32(&storage
->cacheGenerationCount
)) & genCountMask
;
151 CFStorageAccessCacheParts cacheParts
;
152 cacheParts
.locationHi
= (loc
& dataMask
) | genCount
;
153 cacheParts
.locationLo
= (loc
<< shiftLowWordBy
) | genCount
;
154 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
155 cacheParts
.lengthHi
= (len
& dataMask
) | genCount
;
157 cacheParts
.lengthLo
= (len
<< shiftLowWordBy
) | genCount
;
158 cacheParts
.cachedNodeHi
= ((unsigned long)node
& dataMask
) | genCount
;
159 cacheParts
.cachedNodeLo
= ((unsigned long)node
<< shiftLowWordBy
) | genCount
;
160 storage
->cacheParts
= cacheParts
;
163 /* Thread-safely get the cached range and node info.
165 CF_INLINE Boolean
__CFStorageGetCache(CFStorageRef storage
, CFRange
*cachedRangePtr
, CFStorageNode
**cachedNodePtr
) {
166 CFStorageAccessCacheParts cacheParts
= storage
->cacheParts
;
168 unsigned int genCount
= cacheParts
.locationHi
& genCountMask
;
170 // Check to make sure the genCounts of all the items are the same; if not, the cache was inconsistent
171 if ((cacheParts
.locationLo
& genCountMask
) == genCount
&&
172 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
173 (cacheParts
.lengthHi
& genCountMask
) == genCount
&&
175 (cacheParts
.lengthLo
& genCountMask
) == genCount
&&
176 (cacheParts
.cachedNodeHi
& genCountMask
) == genCount
&&
177 (cacheParts
.cachedNodeLo
& genCountMask
) == genCount
) {
179 *cachedNodePtr
= (CFStorageNode
*)((cacheParts
.cachedNodeHi
& dataMask
) | ((cacheParts
.cachedNodeLo
& dataMask
) >> shiftLowWordBy
));
180 cachedRangePtr
->location
= (cacheParts
.locationHi
& dataMask
) | ((cacheParts
.locationLo
& dataMask
) >> shiftLowWordBy
);
181 cachedRangePtr
->length
= (cacheParts
.lengthLo
& dataMask
) >> shiftLowWordBy
;
182 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
183 cachedRangePtr
->length
|= (cacheParts
.lengthHi
& dataMask
);
191 /* Gets the location for the specified absolute loc from the cached info.
192 Returns NULL if the location is not in the cache.
194 CF_INLINE
uint8_t *__CFStorageGetFromCache(CFStorageRef storage
, CFIndex loc
, CFRange
*validConsecutiveValueRange
) {
195 CFRange cachedRange
= {0, 0};
196 CFStorageNode
*cachedNode
= 0;
198 // If we can't get values from the cache, return NULL
199 if (!__CFStorageGetCache(storage
, &cachedRange
, &cachedNode
)) return NULL
;
201 // Check to see if the index is in the cache
202 if (loc
< cachedRange
.location
|| loc
>= cachedRange
.location
+ cachedRange
.length
) return NULL
;
204 // If the cached node has no memory, return here; it will be allocated as a result of the non-cached lookup.
205 if (!cachedNode
->info
.leaf
.memory
) return NULL
;
207 // The cache has consistent values, and in fact, the values we're looking for!
208 uint8_t *result
= cachedNode
->info
.leaf
.memory
+ (loc
- cachedRange
.location
) * storage
->valueSize
;
209 *validConsecutiveValueRange
= cachedRange
;
217 /* Returns the number of the child containing the desired value and the relative index of the value in that child.
218 forInsertion = true means that we are looking for the child in which to insert; this changes the behavior when the index is at the end of a child
219 relativeByteNum (not optional, for performance reasons) returns the relative byte number of the specified byte in the child.
220 Don't call with leaf nodes!
222 CF_INLINE
void __CFStorageFindChild(CFStorageNode
*node
, CFIndex byteNum
, bool forInsertion
, CFIndex
*childNum
, CFIndex
*relativeByteNum
) {
223 if (forInsertion
) byteNum
--; /* If for insertion, we do <= checks, not <, so this accomplishes the same thing */
224 if (byteNum
< node
->info
.notLeaf
.child
[0]->numBytes
) *childNum
= 0;
226 byteNum
-= node
->info
.notLeaf
.child
[0]->numBytes
;
227 if (byteNum
< node
->info
.notLeaf
.child
[1]->numBytes
) *childNum
= 1;
229 byteNum
-= node
->info
.notLeaf
.child
[1]->numBytes
;
233 if (forInsertion
) byteNum
++;
234 *relativeByteNum
= byteNum
;
237 /* Finds the location where the specified byte is stored. If validConsecutiveByteRange is not NULL, returns
238 the range of bytes that are consecutive with this one.
239 !!! Assumes the byteNum is within the range of this node.
241 static void *__CFStorageFindByte(CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFStorageNode
**resultNode
, CFRange
*validConsecutiveByteRange
) {
243 if (validConsecutiveByteRange
) *validConsecutiveByteRange
= CFRangeMake(0, node
->numBytes
);
244 __CFStorageAllocLeafNodeMemory(CFGetAllocator(storage
), storage
, node
, node
->numBytes
, false);
245 if (resultNode
) *resultNode
= node
;
246 return node
->info
.leaf
.memory
+ byteNum
;
250 CFIndex relativeByteNum
;
251 __CFStorageFindChild(node
, byteNum
, false, &childNum
, &relativeByteNum
);
252 result
= __CFStorageFindByte(storage
, node
->info
.notLeaf
.child
[childNum
], relativeByteNum
, resultNode
, validConsecutiveByteRange
);
253 if (validConsecutiveByteRange
) {
254 if (childNum
> 0) validConsecutiveByteRange
->location
+= node
->info
.notLeaf
.child
[0]->numBytes
;
255 if (childNum
> 1) validConsecutiveByteRange
->location
+= node
->info
.notLeaf
.child
[1]->numBytes
;
261 /* Guts of CFStorageGetValueAtIndex(); note that validConsecutiveValueRange is not optional.
262 Consults and updates cache.
264 CF_INLINE
void *__CFStorageGetValueAtIndex(CFStorageRef storage
, CFIndex idx
, CFRange
*validConsecutiveValueRange
) {
266 if (!(result
= __CFStorageGetFromCache(storage
, idx
, validConsecutiveValueRange
))) {
267 CFRange rangeInBytes
;
268 CFStorageNode
*resultNode
;
269 result
= (uint8_t *)__CFStorageFindByte(storage
, &storage
->rootNode
, idx
* storage
->valueSize
, &resultNode
, &rangeInBytes
);
270 CFRange rangeInValues
= CFRangeMake(rangeInBytes
.location
/ storage
->valueSize
, rangeInBytes
.length
/ storage
->valueSize
);
271 __CFStorageSetCache(storage
, resultNode
, rangeInValues
.location
, rangeInValues
.length
);
272 *validConsecutiveValueRange
= rangeInValues
;
277 // returns refcount==1 node under GC
278 static CFStorageNode
*__CFStorageCreateNode(CFAllocatorRef allocator
, bool isLeaf
, CFIndex numBytes
) {
279 CFStorageNode
*newNode
= (CFStorageNode
*)CFAllocatorAllocate(allocator
, sizeof(CFStorageNode
), __kCFAllocatorGCScannedMemory
);
280 if (__CFOASafe
) __CFSetLastAllocationEventName(newNode
, "CFStorage (node)");
281 newNode
->isLeaf
= isLeaf
;
282 newNode
->numBytes
= numBytes
;
284 newNode
->info
.leaf
.capacityInBytes
= 0;
285 newNode
->info
.leaf
.memory
= NULL
;
287 newNode
->info
.notLeaf
.child
[0] = newNode
->info
.notLeaf
.child
[1] = newNode
->info
.notLeaf
.child
[2] = NULL
;
292 static void __CFStorageNodeDealloc(CFAllocatorRef allocator
, CFStorageNode
*node
, bool freeNodeItself
) {
294 _CFAllocatorDeallocateGC(allocator
, node
->info
.leaf
.memory
);
297 for (cnt
= 0; cnt
< 3; cnt
++) if (node
->info
.notLeaf
.child
[cnt
]) __CFStorageNodeDealloc(allocator
, node
->info
.notLeaf
.child
[cnt
], true);
299 if (freeNodeItself
) _CFAllocatorDeallocateGC(allocator
, node
);
302 static CFIndex
__CFStorageGetNumChildren(CFStorageNode
*node
) {
303 if (!node
|| node
->isLeaf
) return 0;
304 if (node
->info
.notLeaf
.child
[2]) return 3;
305 if (node
->info
.notLeaf
.child
[1]) return 2;
306 if (node
->info
.notLeaf
.child
[0]) return 1;
310 /* The boolean compact indicates whether leaf nodes that get smaller should be realloced.
312 static void __CFStorageDelete(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFRange range
, bool compact
) {
314 node
->numBytes
-= range
.length
;
315 // If this node had memory allocated, readjust the bytes...
316 if (node
->info
.leaf
.memory
) {
317 COPYMEM(node
->info
.leaf
.memory
+ range
.location
+ range
.length
, node
->info
.leaf
.memory
+ range
.location
, node
->numBytes
- range
.location
);
318 if (compact
) __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, node
->numBytes
, true);
321 bool childrenAreLeaves
= node
->info
.notLeaf
.child
[0]->isLeaf
;
322 node
->numBytes
-= range
.length
;
323 while (range
.length
> 0) {
324 CFRange rangeToDelete
;
325 CFIndex relativeByteNum
;
327 __CFStorageFindChild(node
, range
.location
+ range
.length
, true, &childNum
, &relativeByteNum
);
328 if (range
.length
> relativeByteNum
) {
329 rangeToDelete
.length
= relativeByteNum
;
330 rangeToDelete
.location
= 0;
332 rangeToDelete
.length
= range
.length
;
333 rangeToDelete
.location
= relativeByteNum
- range
.length
;
335 __CFStorageDelete(allocator
, storage
, node
->info
.notLeaf
.child
[childNum
], rangeToDelete
, compact
);
336 if (node
->info
.notLeaf
.child
[childNum
]->numBytes
== 0) { // Delete empty node and compact
338 _CFAllocatorDeallocateGC(allocator
, node
->info
.notLeaf
.child
[childNum
]);
339 for (cnt
= childNum
; cnt
< 2; cnt
++) {
340 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[cnt
], node
->info
.notLeaf
.child
[cnt
+1]);
342 node
->info
.notLeaf
.child
[2] = NULL
;
344 range
.length
-= rangeToDelete
.length
;
346 // At this point the remaining children are packed
347 if (childrenAreLeaves
) {
348 // Children are leaves; if their total bytes is smaller than a leaf's worth, collapse into one...
349 if (node
->numBytes
> 0 && node
->numBytes
<= storage
->maxLeafCapacity
) {
350 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
->info
.notLeaf
.child
[0], node
->numBytes
, false);
351 if (node
->info
.notLeaf
.child
[1] && node
->info
.notLeaf
.child
[1]->numBytes
) {
352 COPYMEM(node
->info
.notLeaf
.child
[1]->info
.leaf
.memory
, node
->info
.notLeaf
.child
[0]->info
.leaf
.memory
+ node
->info
.notLeaf
.child
[0]->numBytes
, node
->info
.notLeaf
.child
[1]->numBytes
);
353 if (node
->info
.notLeaf
.child
[2] && node
->info
.notLeaf
.child
[2]->numBytes
) {
354 COPYMEM(node
->info
.notLeaf
.child
[2]->info
.leaf
.memory
, node
->info
.notLeaf
.child
[0]->info
.leaf
.memory
+ node
->info
.notLeaf
.child
[0]->numBytes
+ node
->info
.notLeaf
.child
[1]->numBytes
, node
->info
.notLeaf
.child
[2]->numBytes
);
355 __CFStorageNodeDealloc(allocator
, node
->info
.notLeaf
.child
[2], true);
356 node
->info
.notLeaf
.child
[2] = NULL
;
358 __CFStorageNodeDealloc(allocator
, node
->info
.notLeaf
.child
[1], true);
359 node
->info
.notLeaf
.child
[1] = NULL
;
361 node
->info
.notLeaf
.child
[0]->numBytes
= node
->numBytes
;
364 // Children are not leaves; combine their children to assure each node has 2 or 3 children...
365 // (Could try to bypass all this by noting up above whether the number of grandchildren changed...)
366 CFStorageNode
*gChildren
[9];
367 CFIndex cCnt
, gCnt
, cnt
;
368 CFIndex totalG
= 0; // Total number of grandchildren
369 for (cCnt
= 0; cCnt
< 3; cCnt
++) {
370 CFStorageNode
*child
= node
->info
.notLeaf
.child
[cCnt
];
372 for (gCnt
= 0; gCnt
< 3; gCnt
++) if (child
->info
.notLeaf
.child
[gCnt
]) {
373 gChildren
[totalG
++] = child
->info
.notLeaf
.child
[gCnt
];
374 child
->info
.notLeaf
.child
[gCnt
] = NULL
;
379 gCnt
= 0; // Total number of grandchildren placed
380 for (cCnt
= 0; cCnt
< 3; cCnt
++) {
381 // These tables indicate how many children each child should have, given the total number of grandchildren (last child gets remainder)
382 static const unsigned char forChild0
[10] = {0, 1, 2, 3, 2, 3, 3, 3, 3, 3};
383 static const unsigned char forChild1
[10] = {0, 0, 0, 0, 2, 2, 3, 2, 3, 3};
384 // sCnt is the number of grandchildren to be placed into child cCnt
385 // Depending on child number, pick the right number
386 CFIndex sCnt
= (cCnt
== 0) ? forChild0
[totalG
] : ((cCnt
== 1) ? forChild1
[totalG
] : totalG
);
387 // Assure we have that many grandchildren...
388 if (sCnt
> totalG
- gCnt
) sCnt
= totalG
- gCnt
;
390 if (!node
->info
.notLeaf
.child
[cCnt
]) {
391 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, false, 0);
392 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[cCnt
], newNode
);
393 Boolean GC
= CF_IS_COLLECTABLE_ALLOCATOR(allocator
);
394 if (GC
) auto_zone_release(auto_zone(), newNode
);
396 for (cnt
= 0; cnt
< sCnt
; cnt
++) {
397 node
->info
.notLeaf
.child
[cCnt
]->numBytes
+= gChildren
[gCnt
]->numBytes
;
398 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[cCnt
]->info
.notLeaf
.child
[cnt
], gChildren
[gCnt
++]);
401 if (node
->info
.notLeaf
.child
[cCnt
]) {
402 _CFAllocatorDeallocateGC(allocator
, node
->info
.notLeaf
.child
[cCnt
]);
403 node
->info
.notLeaf
.child
[cCnt
] = NULL
;
412 /* Returns NULL or additional node to come after this node
413 Assumption: size is never > storage->maxLeafCapacity
414 Under GC node has a retain count to keep it alive in unregistered pthreads
416 static CFStorageNode
*__CFStorageInsert(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
418 if (size
+ node
->numBytes
> storage
->maxLeafCapacity
) { // Need to create more child nodes
419 if (byteNum
== node
->numBytes
) { // Inserting at end; easy...
420 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, true, size
);
421 __CFStorageSetCache(storage
, newNode
, absoluteByteNum
/ storage
->valueSize
, size
/ storage
->valueSize
);
423 } else if (byteNum
== 0) { // Inserting at front; also easy, but we need to swap node and newNode
424 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, true, 0);
425 objc_memmove_collectable(newNode
, node
, sizeof(CFStorageNode
));
427 node
->numBytes
= size
;
428 node
->info
.leaf
.capacityInBytes
= 0;
429 node
->info
.leaf
.memory
= NULL
;
430 __CFStorageSetCache(storage
, node
, absoluteByteNum
/ storage
->valueSize
, size
/ storage
->valueSize
);
432 } else if (byteNum
+ size
<= storage
->maxLeafCapacity
) { // Inserting at middle; inserted region will fit into existing child
433 // Create new node to hold the overflow
434 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, true, node
->numBytes
- byteNum
);
435 if (node
->info
.leaf
.memory
) { // We allocate memory lazily...
436 __CFStorageAllocLeafNodeMemory(allocator
, storage
, newNode
, node
->numBytes
- byteNum
, false);
437 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, newNode
->info
.leaf
.memory
, node
->numBytes
- byteNum
);
438 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, byteNum
+ size
, false);
440 node
->numBytes
= byteNum
+ size
;
441 __CFStorageSetCache(storage
, node
, (absoluteByteNum
- byteNum
) / storage
->valueSize
, node
->numBytes
/ storage
->valueSize
);
443 } else { // Inserting some of new into one node, rest into another; remember that the assumption is size <= storage->maxLeafCapacity
444 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, true, node
->numBytes
+ size
- storage
->maxLeafCapacity
); // New stuff
445 if (node
->info
.leaf
.memory
) { // We allocate memory lazily...
446 __CFStorageAllocLeafNodeMemory(allocator
, storage
, newNode
, node
->numBytes
+ size
- storage
->maxLeafCapacity
, false);
447 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, newNode
->info
.leaf
.memory
+ byteNum
+ size
- storage
->maxLeafCapacity
, node
->numBytes
- byteNum
);
448 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, storage
->maxLeafCapacity
, false);
450 node
->numBytes
= storage
->maxLeafCapacity
;
451 __CFStorageSetCache(storage
, node
, (absoluteByteNum
- byteNum
) / storage
->valueSize
, node
->numBytes
/ storage
->valueSize
);
454 } else { // No need to create new nodes!
455 if (node
->info
.leaf
.memory
) {
456 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, node
->numBytes
+ size
, false);
457 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, node
->info
.leaf
.memory
+ byteNum
+ size
, node
->numBytes
- byteNum
);
459 node
->numBytes
+= size
;
460 __CFStorageSetCache(storage
, node
, (absoluteByteNum
- byteNum
) / storage
->valueSize
, node
->numBytes
/ storage
->valueSize
);
464 CFIndex relativeByteNum
;
466 CFStorageNode
*newNode
;
467 __CFStorageFindChild(node
, byteNum
, true, &childNum
, &relativeByteNum
);
468 newNode
= __CFStorageInsert(allocator
, storage
, node
->info
.notLeaf
.child
[childNum
], relativeByteNum
, size
, absoluteByteNum
);
470 if (node
->info
.notLeaf
.child
[2] == NULL
) { // There's an empty slot for the new node, cool
471 if (childNum
== 0) __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[2], node
->info
.notLeaf
.child
[1]); // Make room
472 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[childNum
+ 1], newNode
);
473 Boolean GC
= CF_IS_COLLECTABLE_ALLOCATOR(allocator
);
474 if (GC
) auto_zone_release(auto_zone(), newNode
);
475 node
->numBytes
+= size
;
478 CFStorageNode
*anotherNode
= __CFStorageCreateNode(allocator
, false, 0); // Create another node
479 if (childNum
== 0) { // Last two children go to new node
480 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[0], node
->info
.notLeaf
.child
[1]);
481 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[1], node
->info
.notLeaf
.child
[2]);
482 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[1], newNode
);
483 node
->info
.notLeaf
.child
[2] = NULL
;
484 } else if (childNum
== 1) { // Last child goes to new node
485 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[0], newNode
);
486 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[1], node
->info
.notLeaf
.child
[2]);
487 node
->info
.notLeaf
.child
[2] = NULL
;
488 } else { // New node contains the new comers...
489 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[0], node
->info
.notLeaf
.child
[2]);
490 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[1], newNode
);
491 node
->info
.notLeaf
.child
[2] = NULL
;
493 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator
)) {
494 auto_zone_release(auto_zone(), newNode
);
496 node
->numBytes
= node
->info
.notLeaf
.child
[0]->numBytes
+ node
->info
.notLeaf
.child
[1]->numBytes
;
497 anotherNode
->numBytes
= anotherNode
->info
.notLeaf
.child
[0]->numBytes
+ anotherNode
->info
.notLeaf
.child
[1]->numBytes
;
501 node
->numBytes
+= size
;
507 CF_INLINE CFIndex
__CFStorageGetCount(CFStorageRef storage
) {
508 return storage
->rootNode
.numBytes
/ storage
->valueSize
;
511 static Boolean
__CFStorageEqual(CFTypeRef cf1
, CFTypeRef cf2
) {
512 CFStorageRef storage1
= (CFStorageRef
)cf1
;
513 CFStorageRef storage2
= (CFStorageRef
)cf2
;
514 CFIndex loc
, count
, valueSize
;
515 CFRange range1
, range2
;
516 uint8_t *ptr1
, *ptr2
;
518 count
= __CFStorageGetCount(storage1
);
519 if (count
!= __CFStorageGetCount(storage2
)) return false;
521 valueSize
= __CFStorageGetValueSize(storage1
);
522 if (valueSize
!= __CFStorageGetValueSize(storage2
)) return false;
524 loc
= range1
.location
= range1
.length
= range2
.location
= range2
.length
= 0;
527 while (loc
< count
) {
529 if (loc
>= range1
.location
+ range1
.length
) ptr1
= (uint8_t *)CFStorageGetValueAtIndex(storage1
, loc
, &range1
);
530 if (loc
>= range2
.location
+ range2
.length
) ptr2
= (uint8_t *)CFStorageGetValueAtIndex(storage2
, loc
, &range2
);
531 cntThisTime
= range1
.location
+ range1
.length
;
532 if (range2
.location
+ range2
.length
< cntThisTime
) cntThisTime
= range2
.location
+ range2
.length
;
534 if (memcmp(ptr1
, ptr2
, valueSize
* cntThisTime
) != 0) return false;
535 ptr1
+= valueSize
* cntThisTime
;
536 ptr2
+= valueSize
* cntThisTime
;
542 static CFHashCode
__CFStorageHash(CFTypeRef cf
) {
543 CFStorageRef Storage
= (CFStorageRef
)cf
;
544 return __CFStorageGetCount(Storage
);
547 static void __CFStorageDescribeNode(CFStorageNode
*node
, CFMutableStringRef str
, CFIndex level
) {
549 for (cnt
= 0; cnt
< level
; cnt
++) CFStringAppendCString(str
, " ", CFStringGetSystemEncoding());
552 CFStringAppendFormat(str
, NULL
, CFSTR("Leaf %d/%d\n"), node
->numBytes
, node
->info
.leaf
.capacityInBytes
);
554 CFStringAppendFormat(str
, NULL
, CFSTR("Node %d\n"), node
->numBytes
);
555 for (cnt
= 0; cnt
< 3; cnt
++) if (node
->info
.notLeaf
.child
[cnt
]) __CFStorageDescribeNode(node
->info
.notLeaf
.child
[cnt
], str
, level
+1);
559 static CFIndex
__CFStorageGetNodeCapacity(CFStorageNode
*node
) {
561 if (node
->isLeaf
) return node
->info
.leaf
.capacityInBytes
;
562 return __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[0]) + __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[1]) + __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[2]);
565 CFIndex
__CFStorageGetCapacity(CFStorageRef storage
) {
566 return __CFStorageGetNodeCapacity(&storage
->rootNode
) / storage
->valueSize
;
569 CFIndex
__CFStorageGetValueSize(CFStorageRef storage
) {
570 return storage
->valueSize
;
573 static CFStringRef
__CFStorageCopyDescription(CFTypeRef cf
) {
574 CFStorageRef storage
= (CFStorageRef
)cf
;
575 CFMutableStringRef result
;
576 CFAllocatorRef allocator
= CFGetAllocator(storage
);
577 result
= CFStringCreateMutable(allocator
, 0);
578 CFStringAppendFormat(result
, NULL
, CFSTR("<CFStorage %p [%p]>[count = %u, capacity = %u]\n"), storage
, allocator
, __CFStorageGetCount(storage
), __CFStorageGetCapacity(storage
));
579 __CFStorageDescribeNode(&storage
->rootNode
, result
, 0);
583 static void __CFStorageDeallocate(CFTypeRef cf
) {
584 CFStorageRef storage
= (CFStorageRef
)cf
;
585 CFAllocatorRef allocator
= CFGetAllocator(storage
);
586 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator
)) return; // XXX_PCB GC will take care of us.
587 __CFStorageNodeDealloc(allocator
, &storage
->rootNode
, false);
590 static CFTypeID __kCFStorageTypeID
= _kCFRuntimeNotATypeID
;
592 static const CFRuntimeClass __CFStorageClass
= {
593 _kCFRuntimeScannedObject
,
597 __CFStorageDeallocate
,
601 __CFStorageCopyDescription
604 __private_extern__
void __CFStorageInitialize(void) {
605 __kCFStorageTypeID
= _CFRuntimeRegisterClass(&__CFStorageClass
);
610 CFStorageRef
CFStorageCreate(CFAllocatorRef allocator
, CFIndex valueSize
) {
611 CFStorageRef storage
;
612 CFIndex size
= sizeof(struct __CFStorage
) - sizeof(CFRuntimeBase
);
613 storage
= (CFStorageRef
)_CFRuntimeCreateInstance(allocator
, __kCFStorageTypeID
, size
, NULL
);
614 if (NULL
== storage
) {
617 storage
->valueSize
= valueSize
;
618 CF_SPINLOCK_INIT_FOR_STRUCTS(storage
->cacheReaderMemoryAllocationLock
);
619 storage
->cacheGenerationCount
= 0;
620 storage
->cacheParts
.locationHi
= 0;
621 storage
->cacheParts
.locationLo
= 0;
622 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
623 storage
->cacheParts
.lengthHi
= 0;
625 storage
->cacheParts
.lengthLo
= 0;
626 storage
->cacheParts
.cachedNodeHi
= 0;
627 storage
->cacheParts
.cachedNodeLo
= 0;
628 storage
->maxLeafCapacity
= __CFStorageMaxLeafCapacity
;
629 if (valueSize
&& ((storage
->maxLeafCapacity
% valueSize
) != 0)) {
630 storage
->maxLeafCapacity
= (storage
->maxLeafCapacity
/ valueSize
) * valueSize
; // Make it fit perfectly (3406853)
632 memset(&(storage
->rootNode
), 0, sizeof(CFStorageNode
));
633 storage
->rootNode
.isLeaf
= true;
634 storage
->nodeHint
= __kCFAllocatorGCScannedMemory
;
635 if (__CFOASafe
) __CFSetLastAllocationEventName(storage
, "CFStorage");
639 CFTypeID
CFStorageGetTypeID(void) {
640 return __kCFStorageTypeID
;
643 CFIndex
CFStorageGetCount(CFStorageRef storage
) {
644 return __CFStorageGetCount(storage
);
647 /* Returns pointer to the specified value
648 index and validConsecutiveValueRange are in terms of values
650 void *CFStorageGetValueAtIndex(CFStorageRef storage
, CFIndex idx
, CFRange
*validConsecutiveValueRange
) {
652 return __CFStorageGetValueAtIndex(storage
, idx
, validConsecutiveValueRange
? validConsecutiveValueRange
: &range
);
655 /* Makes space for range.length values at location range.location
656 This function deepens the tree if necessary...
658 void CFStorageInsertValues(CFStorageRef storage
, CFRange range
) {
659 CFIndex numBytesToInsert
= range
.length
* storage
->valueSize
;
660 CFIndex byteNum
= range
.location
* storage
->valueSize
;
661 while (numBytesToInsert
> 0) {
662 CFStorageNode
*newNode
;
663 CFAllocatorRef allocator
= CFGetAllocator(storage
);
664 CFIndex insertThisTime
= numBytesToInsert
;
665 if (insertThisTime
> storage
->maxLeafCapacity
) {
666 insertThisTime
= (storage
->maxLeafCapacity
/ storage
->valueSize
) * storage
->valueSize
;
668 newNode
= __CFStorageInsert(allocator
, storage
, &storage
->rootNode
, byteNum
, insertThisTime
, byteNum
);
670 CFStorageNode
*tempRootNode
= __CFStorageCreateNode(allocator
, false, 0); // Will copy the (static) rootNode over to this
671 objc_memmove_collectable(tempRootNode
, &storage
->rootNode
, sizeof(CFStorageNode
));
672 storage
->rootNode
.isLeaf
= false;
673 __CFAssignWithWriteBarrier((void **)&storage
->rootNode
.info
.notLeaf
.child
[0], tempRootNode
);
674 __CFAssignWithWriteBarrier((void **)&storage
->rootNode
.info
.notLeaf
.child
[1], newNode
);
675 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator
)) {
676 auto_zone_release(auto_zone(), tempRootNode
);
677 auto_zone_release(auto_zone(), newNode
);
679 storage
->rootNode
.info
.notLeaf
.child
[2] = NULL
;
680 storage
->rootNode
.numBytes
= tempRootNode
->numBytes
+ newNode
->numBytes
;
683 __CFStorageSetCache(storage
, NULL
, 0, 0);
685 if (storage
->cache
.cachedNode
== &(storage
->rootNode
)) __CFAssignWithWriteBarrier((void **)&storage
->cache
.cachedNode
, tempRootNode
); // The cache should follow the node
688 numBytesToInsert
-= insertThisTime
;
689 byteNum
+= insertThisTime
;
693 /* Deletes the values in the specified range
694 This function gets rid of levels if necessary...
696 void CFStorageDeleteValues(CFStorageRef storage
, CFRange range
) {
697 CFAllocatorRef allocator
= CFGetAllocator(storage
);
698 range
.location
*= storage
->valueSize
;
699 range
.length
*= storage
->valueSize
;
700 __CFStorageDelete(allocator
, storage
, &storage
->rootNode
, range
, true);
701 while (__CFStorageGetNumChildren(&storage
->rootNode
) == 1) {
702 CFStorageNode
*child
= storage
->rootNode
.info
.notLeaf
.child
[0]; // The single child
703 objc_memmove_collectable(&storage
->rootNode
, child
, sizeof(CFStorageNode
));
704 _CFAllocatorDeallocateGC(allocator
, child
);
706 if (__CFStorageGetNumChildren(&storage
->rootNode
) == 0 && !storage
->rootNode
.isLeaf
) {
707 storage
->rootNode
.isLeaf
= true;
708 storage
->rootNode
.info
.leaf
.capacityInBytes
= 0;
709 storage
->rootNode
.info
.leaf
.memory
= NULL
;
711 // !!! Need to update the cache
712 __CFStorageSetCache(storage
, NULL
, 0, 0);
715 void CFStorageGetValues(CFStorageRef storage
, CFRange range
, void *values
) {
716 while (range
.length
> 0) {
718 void *storagePtr
= __CFStorageGetValueAtIndex(storage
, range
.location
, &leafRange
);
719 CFIndex cntThisTime
= range
.length
;
720 if (cntThisTime
> leafRange
.length
- (range
.location
- leafRange
.location
)) cntThisTime
= leafRange
.length
- (range
.location
- leafRange
.location
);
721 COPYMEM(storagePtr
, values
, cntThisTime
* storage
->valueSize
);
722 values
= (uint8_t *)values
+ (cntThisTime
* storage
->valueSize
);
723 range
.location
+= cntThisTime
;
724 range
.length
-= cntThisTime
;
728 unsigned long _CFStorageFastEnumeration(CFStorageRef storage
, struct __objcFastEnumerationStateEquivalent
*state
, void *stackbuffer
, unsigned long count
) {
729 // without trying to understand the data structure, each time through search for block containing index
731 if (state
->state
== 0) { /* first time, get length */
732 state
->extra
[0] = __CFStorageGetCount(storage
);
734 if (state
->state
>= state
->extra
[0]) return 0;
735 state
->itemsPtr
= (unsigned long *)CFStorageGetValueAtIndex(storage
, state
->state
, &leafRange
);
736 state
->state
+= leafRange
.length
;
737 return leafRange
.length
;
740 void CFStorageApplyFunction(CFStorageRef storage
, CFRange range
, CFStorageApplierFunction applier
, void *context
) {
741 while (0 < range
.length
) {
743 const void *storagePtr
;
745 storagePtr
= CFStorageGetValueAtIndex(storage
, range
.location
, &leafRange
);
746 cnt
= __CFMin(range
.length
, leafRange
.location
+ leafRange
.length
- range
.location
);
747 for (idx
= 0; idx
< cnt
; idx
++) {
748 applier(storagePtr
, context
);
749 storagePtr
= (const char *)storagePtr
+ storage
->valueSize
;
752 range
.location
+= cnt
;
756 void CFStorageReplaceValues(CFStorageRef storage
, CFRange range
, const void *values
) {
757 while (range
.length
> 0) {
759 void *storagePtr
= __CFStorageGetValueAtIndex(storage
, range
.location
, &leafRange
);
760 CFIndex cntThisTime
= range
.length
;
761 if (cntThisTime
> leafRange
.length
- (range
.location
- leafRange
.location
)) cntThisTime
= leafRange
.length
- (range
.location
- leafRange
.location
);
762 COPYMEM(values
, storagePtr
, cntThisTime
* storage
->valueSize
);
763 values
= (const uint8_t *)values
+ (cntThisTime
* storage
->valueSize
);
764 range
.location
+= cntThisTime
;
765 range
.length
-= cntThisTime
;
769 /* Used by CFArray.c */
771 static void __CFStorageNodeSetUnscanned(CFStorageNode
*node
, auto_zone_t
*zone
) {
773 auto_zone_set_unscanned(zone
, node
->info
.leaf
.memory
);
775 CFStorageNode
**children
= node
->info
.notLeaf
.child
;
776 if (children
[0]) __CFStorageNodeSetUnscanned(children
[0], zone
);
777 if (children
[1]) __CFStorageNodeSetUnscanned(children
[1], zone
);
778 if (children
[2]) __CFStorageNodeSetUnscanned(children
[2], zone
);
782 __private_extern__
void _CFStorageSetWeak(CFStorageRef storage
) {
783 storage
->nodeHint
= 0;
784 __CFStorageNodeSetUnscanned(&storage
->rootNode
, (auto_zone_t
*)auto_zone());