2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 Copyright (c) 1999-2009, Apple Inc. All rights reserved.
25 Responsibility: Ali Ozer
29 2-3 tree storing arbitrary sized values.
31 ??? Currently elementSize cannot be greater than storage->maxLeafCapacity, which is less than or equal to __CFStorageMaxLeafCapacity
33 CFStorage is thread-safe for multiple readers, but not thread safe for simultaneous reading and writing.
36 #include "CFStorage.h"
37 #include "CFInternal.h"
39 #if !defined(PAGE_SIZE)
40 #define PAGE_SIZE 4096
43 // Above 15K, malloc switches (??? or used to in early Leopard) to using vm allocates for blocks; it seems best to avoid that.
44 // Also, tests with StorageTimer.c done in 4/07 indicate that 4096 * 3 is better than smaller or larger node sizes.
45 #define __CFStorageMaxLeafCapacity (4096 * 3)
47 // If the max length of a node is less than can fit in a half-word (half of a long), we can get away without ever caching the high half-word the cachedRange
49 #if __CFStorageMaxLeafCapacity > 0xFFFFFFFFULL
50 #define POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD 1
53 #if __CFStorageMaxLeafCapacity > 0xFFFFUL
54 #define POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD 1
58 #define COPYMEM(src,dst,n) objc_memmove_collectable((dst), (src), (n))
59 #define PAGE_LIMIT ((CFIndex)PAGE_SIZE / 2)
61 CF_INLINE
int32_t roundToPage(int32_t num
) {
62 return (num
+ PAGE_SIZE
- 1) & ~(PAGE_SIZE
- 1);
67 /* Each node in the storage. isLeaf determines whether the node is a leaf node or a node inside the tree. If latter, number of children are determined by the number of non-NULL entries in child[]. (NULL entries are at the end.)
69 typedef struct __CFStorageNode
{
70 CFIndex numBytes
; /* Number of actual bytes in this node and all its children */
74 CFIndex capacityInBytes
; // capacityInBytes is capacity of memory; this is either 0, or >= numBytes
78 struct __CFStorageNode
*child
[3];
83 /* This is the struct used to store the cache in the CFStorage; it enables us to make the cache thread-safe for multiple readers (which update the cache). The values in this cache store half the desired value in the top half, and the genCount of the writer in the low half. This cache is consistent only if all of the genCounts are the same. Note that the cache does not provide thread-safety for readers in the presence of a writer.
85 The cached range (location, length) is in terms of values; if the cached range is not (0,0), then the cached node needs to be non-NULL and pointing at a leaf node.
88 unsigned long locationHi
, locationLo
; // cachedRange.location
89 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
90 unsigned long lengthHi
;
92 unsigned long lengthLo
; // cachedRange.length; note that we actually do not bother with lengthHi if __CFStorageMaxLeafCapacity is less than half-word
93 unsigned long cachedNodeHi
, cachedNodeLo
; // cachedNode
94 } CFStorageAccessCacheParts
;
96 /* The CFStorage object.
101 CFSpinLock_t cacheReaderMemoryAllocationLock
;
102 int cacheGenerationCount
;
103 CFStorageAccessCacheParts cacheParts
;
104 CFIndex maxLeafCapacity
; // In terms of bytes
105 CFStorageNode rootNode
;
106 CFOptionFlags nodeHint
; // __kCFAllocatorGCScannedMemory or 0.
112 /* Allocates the memory and initializes the capacity in a leaf. __CFStorageAllocLeafNodeMemory() is the entry point; __CFStorageAllocLeafNodeMemoryAux is called if actual reallocation is needed. __CFStorageAllocLeafNodeMemoryAux() locks not for mutations (mutations are not thread-safe in general), but for lazy allocation of storage during reading.
114 static void __CFStorageAllocLeafNodeMemoryAux(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex cap
) {
115 __CFSpinLock(&(storage
->cacheReaderMemoryAllocationLock
));
116 __CFAssignWithWriteBarrier((void **)&node
->info
.leaf
.memory
, _CFAllocatorReallocateGC(allocator
, node
->info
.leaf
.memory
, cap
, storage
->nodeHint
)); // This will free...
117 if (__CFOASafe
) __CFSetLastAllocationEventName(node
->info
.leaf
.memory
, "CFStorage (node bytes)");
118 node
->info
.leaf
.capacityInBytes
= cap
;
119 __CFSpinUnlock(&(storage
->cacheReaderMemoryAllocationLock
));
122 CF_INLINE
void __CFStorageAllocLeafNodeMemory(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex cap
, bool compact
) {
123 if (cap
> PAGE_LIMIT
) {
124 cap
= roundToPage(cap
);
125 if (cap
> storage
->maxLeafCapacity
) cap
= storage
->maxLeafCapacity
;
127 cap
= (((cap
+ 63) / 64) * 64);
129 if (compact
? (cap
!= node
->info
.leaf
.capacityInBytes
) : (cap
> node
->info
.leaf
.capacityInBytes
)) __CFStorageAllocLeafNodeMemoryAux(allocator
, storage
, node
, cap
);
136 #define genCountMask 0x00000000FFFFFFFFUL
137 #define dataMask 0xFFFFFFFF00000000UL
138 #define shiftLowWordBy 32
140 #define genCountMask 0x0000FFFFUL
141 #define dataMask 0xFFFF0000UL
142 #define shiftLowWordBy 16
145 /* Sets the cache to point at the specified node. loc and len are in terms of values, not bytes. To clear the cache set these two to 0.
146 At least one of node or memory should be non-NULL. memory is consulted first when using the cache.
148 CF_INLINE
void __CFStorageSetCache(CFStorageRef storage
, CFStorageNode
*node
, CFIndex loc
, CFIndex len
) {
149 unsigned int genCount
= ((unsigned int)OSAtomicIncrement32(&storage
->cacheGenerationCount
)) & genCountMask
;
150 CFStorageAccessCacheParts cacheParts
;
151 cacheParts
.locationHi
= (loc
& dataMask
) | genCount
;
152 cacheParts
.locationLo
= (loc
<< shiftLowWordBy
) | genCount
;
153 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
154 cacheParts
.lengthHi
= (len
& dataMask
) | genCount
;
156 cacheParts
.lengthLo
= (len
<< shiftLowWordBy
) | genCount
;
157 cacheParts
.cachedNodeHi
= ((unsigned long)node
& dataMask
) | genCount
;
158 cacheParts
.cachedNodeLo
= ((unsigned long)node
<< shiftLowWordBy
) | genCount
;
159 storage
->cacheParts
= cacheParts
;
162 /* Thread-safely get the cached range and node info.
164 CF_INLINE Boolean
__CFStorageGetCache(CFStorageRef storage
, CFRange
*cachedRangePtr
, CFStorageNode
**cachedNodePtr
) {
165 CFStorageAccessCacheParts cacheParts
= storage
->cacheParts
;
167 unsigned int genCount
= cacheParts
.locationHi
& genCountMask
;
169 // Check to make sure the genCounts of all the items are the same; if not, the cache was inconsistent
170 if ((cacheParts
.locationLo
& genCountMask
) == genCount
&&
171 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
172 (cacheParts
.lengthHi
& genCountMask
) == genCount
&&
174 (cacheParts
.lengthLo
& genCountMask
) == genCount
&&
175 (cacheParts
.cachedNodeHi
& genCountMask
) == genCount
&&
176 (cacheParts
.cachedNodeLo
& genCountMask
) == genCount
) {
178 *cachedNodePtr
= (CFStorageNode
*)((cacheParts
.cachedNodeHi
& dataMask
) | ((cacheParts
.cachedNodeLo
& dataMask
) >> shiftLowWordBy
));
179 cachedRangePtr
->location
= (cacheParts
.locationHi
& dataMask
) | ((cacheParts
.locationLo
& dataMask
) >> shiftLowWordBy
);
180 cachedRangePtr
->length
= (cacheParts
.lengthLo
& dataMask
) >> shiftLowWordBy
;
181 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
182 cachedRangePtr
->length
|= (cacheParts
.lengthHi
& dataMask
);
190 /* Gets the location for the specified absolute loc from the cached info.
191 Returns NULL if the location is not in the cache.
193 CF_INLINE
uint8_t *__CFStorageGetFromCache(CFStorageRef storage
, CFIndex loc
, CFRange
*validConsecutiveValueRange
) {
194 CFRange cachedRange
= {0, 0};
195 CFStorageNode
*cachedNode
= 0;
197 // If we can't get values from the cache, return NULL
198 if (!__CFStorageGetCache(storage
, &cachedRange
, &cachedNode
)) return NULL
;
200 // Check to see if the index is in the cache
201 if (loc
< cachedRange
.location
|| loc
>= cachedRange
.location
+ cachedRange
.length
) return NULL
;
203 // If the cached node has no memory, return here; it will be allocated as a result of the non-cached lookup.
204 if (!cachedNode
->info
.leaf
.memory
) return NULL
;
206 // The cache has consistent values, and in fact, the values we're looking for!
207 uint8_t *result
= cachedNode
->info
.leaf
.memory
+ (loc
- cachedRange
.location
) * storage
->valueSize
;
208 *validConsecutiveValueRange
= cachedRange
;
216 /* Returns the number of the child containing the desired value and the relative index of the value in that child.
217 forInsertion = true means that we are looking for the child in which to insert; this changes the behavior when the index is at the end of a child
218 relativeByteNum (not optional, for performance reasons) returns the relative byte number of the specified byte in the child.
219 Don't call with leaf nodes!
221 CF_INLINE
void __CFStorageFindChild(CFStorageNode
*node
, CFIndex byteNum
, bool forInsertion
, CFIndex
*childNum
, CFIndex
*relativeByteNum
) {
222 if (forInsertion
) byteNum
--; /* If for insertion, we do <= checks, not <, so this accomplishes the same thing */
223 if (byteNum
< node
->info
.notLeaf
.child
[0]->numBytes
) *childNum
= 0;
225 byteNum
-= node
->info
.notLeaf
.child
[0]->numBytes
;
226 if (byteNum
< node
->info
.notLeaf
.child
[1]->numBytes
) *childNum
= 1;
228 byteNum
-= node
->info
.notLeaf
.child
[1]->numBytes
;
232 if (forInsertion
) byteNum
++;
233 *relativeByteNum
= byteNum
;
236 /* Finds the location where the specified byte is stored. If validConsecutiveByteRange is not NULL, returns
237 the range of bytes that are consecutive with this one.
238 !!! Assumes the byteNum is within the range of this node.
240 static void *__CFStorageFindByte(CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFStorageNode
**resultNode
, CFRange
*validConsecutiveByteRange
) {
242 if (validConsecutiveByteRange
) *validConsecutiveByteRange
= CFRangeMake(0, node
->numBytes
);
243 __CFStorageAllocLeafNodeMemory(CFGetAllocator(storage
), storage
, node
, node
->numBytes
, false);
244 if (resultNode
) *resultNode
= node
;
245 return node
->info
.leaf
.memory
+ byteNum
;
249 CFIndex relativeByteNum
;
250 __CFStorageFindChild(node
, byteNum
, false, &childNum
, &relativeByteNum
);
251 result
= __CFStorageFindByte(storage
, node
->info
.notLeaf
.child
[childNum
], relativeByteNum
, resultNode
, validConsecutiveByteRange
);
252 if (validConsecutiveByteRange
) {
253 if (childNum
> 0) validConsecutiveByteRange
->location
+= node
->info
.notLeaf
.child
[0]->numBytes
;
254 if (childNum
> 1) validConsecutiveByteRange
->location
+= node
->info
.notLeaf
.child
[1]->numBytes
;
260 /* Guts of CFStorageGetValueAtIndex(); note that validConsecutiveValueRange is not optional.
261 Consults and updates cache.
263 CF_INLINE
void *__CFStorageGetValueAtIndex(CFStorageRef storage
, CFIndex idx
, CFRange
*validConsecutiveValueRange
) {
265 if (!(result
= __CFStorageGetFromCache(storage
, idx
, validConsecutiveValueRange
))) {
266 CFRange rangeInBytes
;
267 CFStorageNode
*resultNode
;
268 result
= (uint8_t *)__CFStorageFindByte(storage
, &storage
->rootNode
, idx
* storage
->valueSize
, &resultNode
, &rangeInBytes
);
269 CFRange rangeInValues
= CFRangeMake(rangeInBytes
.location
/ storage
->valueSize
, rangeInBytes
.length
/ storage
->valueSize
);
270 __CFStorageSetCache(storage
, resultNode
, rangeInValues
.location
, rangeInValues
.length
);
271 *validConsecutiveValueRange
= rangeInValues
;
276 // returns refcount==1 node under GC
277 static CFStorageNode
*__CFStorageCreateNode(CFAllocatorRef allocator
, bool isLeaf
, CFIndex numBytes
) {
278 CFStorageNode
*newNode
= (CFStorageNode
*)CFAllocatorAllocate(allocator
, sizeof(CFStorageNode
), __kCFAllocatorGCScannedMemory
);
279 if (__CFOASafe
) __CFSetLastAllocationEventName(newNode
, "CFStorage (node)");
280 newNode
->isLeaf
= isLeaf
;
281 newNode
->numBytes
= numBytes
;
283 newNode
->info
.leaf
.capacityInBytes
= 0;
284 newNode
->info
.leaf
.memory
= NULL
;
286 newNode
->info
.notLeaf
.child
[0] = newNode
->info
.notLeaf
.child
[1] = newNode
->info
.notLeaf
.child
[2] = NULL
;
291 static void __CFStorageNodeDealloc(CFAllocatorRef allocator
, CFStorageNode
*node
, bool freeNodeItself
) {
293 _CFAllocatorDeallocateGC(allocator
, node
->info
.leaf
.memory
);
296 for (cnt
= 0; cnt
< 3; cnt
++) if (node
->info
.notLeaf
.child
[cnt
]) __CFStorageNodeDealloc(allocator
, node
->info
.notLeaf
.child
[cnt
], true);
298 if (freeNodeItself
) _CFAllocatorDeallocateGC(allocator
, node
);
301 static CFIndex
__CFStorageGetNumChildren(CFStorageNode
*node
) {
302 if (!node
|| node
->isLeaf
) return 0;
303 if (node
->info
.notLeaf
.child
[2]) return 3;
304 if (node
->info
.notLeaf
.child
[1]) return 2;
305 if (node
->info
.notLeaf
.child
[0]) return 1;
309 /* The boolean compact indicates whether leaf nodes that get smaller should be realloced.
311 static void __CFStorageDelete(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFRange range
, bool compact
) {
313 node
->numBytes
-= range
.length
;
314 // If this node had memory allocated, readjust the bytes...
315 if (node
->info
.leaf
.memory
) {
316 COPYMEM(node
->info
.leaf
.memory
+ range
.location
+ range
.length
, node
->info
.leaf
.memory
+ range
.location
, node
->numBytes
- range
.location
);
317 if (compact
) __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, node
->numBytes
, true);
320 bool childrenAreLeaves
= node
->info
.notLeaf
.child
[0]->isLeaf
;
321 node
->numBytes
-= range
.length
;
322 while (range
.length
> 0) {
323 CFRange rangeToDelete
;
324 CFIndex relativeByteNum
;
326 __CFStorageFindChild(node
, range
.location
+ range
.length
, true, &childNum
, &relativeByteNum
);
327 if (range
.length
> relativeByteNum
) {
328 rangeToDelete
.length
= relativeByteNum
;
329 rangeToDelete
.location
= 0;
331 rangeToDelete
.length
= range
.length
;
332 rangeToDelete
.location
= relativeByteNum
- range
.length
;
334 __CFStorageDelete(allocator
, storage
, node
->info
.notLeaf
.child
[childNum
], rangeToDelete
, compact
);
335 if (node
->info
.notLeaf
.child
[childNum
]->numBytes
== 0) { // Delete empty node and compact
337 _CFAllocatorDeallocateGC(allocator
, node
->info
.notLeaf
.child
[childNum
]);
338 for (cnt
= childNum
; cnt
< 2; cnt
++) {
339 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[cnt
], node
->info
.notLeaf
.child
[cnt
+1]);
341 node
->info
.notLeaf
.child
[2] = NULL
;
343 range
.length
-= rangeToDelete
.length
;
345 // At this point the remaining children are packed
346 if (childrenAreLeaves
) {
347 // Children are leaves; if their total bytes is smaller than a leaf's worth, collapse into one...
348 if (node
->numBytes
> 0 && node
->numBytes
<= storage
->maxLeafCapacity
) {
349 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
->info
.notLeaf
.child
[0], node
->numBytes
, false);
350 if (node
->info
.notLeaf
.child
[1] && node
->info
.notLeaf
.child
[1]->numBytes
) {
351 COPYMEM(node
->info
.notLeaf
.child
[1]->info
.leaf
.memory
, node
->info
.notLeaf
.child
[0]->info
.leaf
.memory
+ node
->info
.notLeaf
.child
[0]->numBytes
, node
->info
.notLeaf
.child
[1]->numBytes
);
352 if (node
->info
.notLeaf
.child
[2] && node
->info
.notLeaf
.child
[2]->numBytes
) {
353 COPYMEM(node
->info
.notLeaf
.child
[2]->info
.leaf
.memory
, node
->info
.notLeaf
.child
[0]->info
.leaf
.memory
+ node
->info
.notLeaf
.child
[0]->numBytes
+ node
->info
.notLeaf
.child
[1]->numBytes
, node
->info
.notLeaf
.child
[2]->numBytes
);
354 __CFStorageNodeDealloc(allocator
, node
->info
.notLeaf
.child
[2], true);
355 node
->info
.notLeaf
.child
[2] = NULL
;
357 __CFStorageNodeDealloc(allocator
, node
->info
.notLeaf
.child
[1], true);
358 node
->info
.notLeaf
.child
[1] = NULL
;
360 node
->info
.notLeaf
.child
[0]->numBytes
= node
->numBytes
;
363 // Children are not leaves; combine their children to assure each node has 2 or 3 children...
364 // (Could try to bypass all this by noting up above whether the number of grandchildren changed...)
365 CFStorageNode
*gChildren
[9];
366 CFIndex cCnt
, gCnt
, cnt
;
367 CFIndex totalG
= 0; // Total number of grandchildren
368 for (cCnt
= 0; cCnt
< 3; cCnt
++) {
369 CFStorageNode
*child
= node
->info
.notLeaf
.child
[cCnt
];
371 for (gCnt
= 0; gCnt
< 3; gCnt
++) if (child
->info
.notLeaf
.child
[gCnt
]) {
372 gChildren
[totalG
++] = child
->info
.notLeaf
.child
[gCnt
];
373 child
->info
.notLeaf
.child
[gCnt
] = NULL
;
378 gCnt
= 0; // Total number of grandchildren placed
379 for (cCnt
= 0; cCnt
< 3; cCnt
++) {
380 // These tables indicate how many children each child should have, given the total number of grandchildren (last child gets remainder)
381 static const unsigned char forChild0
[10] = {0, 1, 2, 3, 2, 3, 3, 3, 3, 3};
382 static const unsigned char forChild1
[10] = {0, 0, 0, 0, 2, 2, 3, 2, 3, 3};
383 // sCnt is the number of grandchildren to be placed into child cCnt
384 // Depending on child number, pick the right number
385 CFIndex sCnt
= (cCnt
== 0) ? forChild0
[totalG
] : ((cCnt
== 1) ? forChild1
[totalG
] : totalG
);
386 // Assure we have that many grandchildren...
387 if (sCnt
> totalG
- gCnt
) sCnt
= totalG
- gCnt
;
389 if (!node
->info
.notLeaf
.child
[cCnt
]) {
390 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, false, 0);
391 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[cCnt
], newNode
);
392 Boolean GC
= CF_IS_COLLECTABLE_ALLOCATOR(allocator
);
393 if (GC
) auto_zone_release(auto_zone(), newNode
);
395 for (cnt
= 0; cnt
< sCnt
; cnt
++) {
396 node
->info
.notLeaf
.child
[cCnt
]->numBytes
+= gChildren
[gCnt
]->numBytes
;
397 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[cCnt
]->info
.notLeaf
.child
[cnt
], gChildren
[gCnt
++]);
400 if (node
->info
.notLeaf
.child
[cCnt
]) {
401 _CFAllocatorDeallocateGC(allocator
, node
->info
.notLeaf
.child
[cCnt
]);
402 node
->info
.notLeaf
.child
[cCnt
] = NULL
;
411 /* Returns NULL or additional node to come after this node
412 Assumption: size is never > storage->maxLeafCapacity
413 Under GC node has a retain count to keep it alive in unregistered pthreads
415 static CFStorageNode
*__CFStorageInsert(CFAllocatorRef allocator
, CFStorageRef storage
, CFStorageNode
*node
, CFIndex byteNum
, CFIndex size
, CFIndex absoluteByteNum
) {
417 if (size
+ node
->numBytes
> storage
->maxLeafCapacity
) { // Need to create more child nodes
418 if (byteNum
== node
->numBytes
) { // Inserting at end; easy...
419 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, true, size
);
420 __CFStorageSetCache(storage
, newNode
, absoluteByteNum
/ storage
->valueSize
, size
/ storage
->valueSize
);
422 } else if (byteNum
== 0) { // Inserting at front; also easy, but we need to swap node and newNode
423 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, true, 0);
424 objc_memmove_collectable(newNode
, node
, sizeof(CFStorageNode
));
426 node
->numBytes
= size
;
427 node
->info
.leaf
.capacityInBytes
= 0;
428 node
->info
.leaf
.memory
= NULL
;
429 __CFStorageSetCache(storage
, node
, absoluteByteNum
/ storage
->valueSize
, size
/ storage
->valueSize
);
431 } else if (byteNum
+ size
<= storage
->maxLeafCapacity
) { // Inserting at middle; inserted region will fit into existing child
432 // Create new node to hold the overflow
433 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, true, node
->numBytes
- byteNum
);
434 if (node
->info
.leaf
.memory
) { // We allocate memory lazily...
435 __CFStorageAllocLeafNodeMemory(allocator
, storage
, newNode
, node
->numBytes
- byteNum
, false);
436 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, newNode
->info
.leaf
.memory
, node
->numBytes
- byteNum
);
437 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, byteNum
+ size
, false);
439 node
->numBytes
= byteNum
+ size
;
440 __CFStorageSetCache(storage
, node
, (absoluteByteNum
- byteNum
) / storage
->valueSize
, node
->numBytes
/ storage
->valueSize
);
442 } else { // Inserting some of new into one node, rest into another; remember that the assumption is size <= storage->maxLeafCapacity
443 CFStorageNode
*newNode
= __CFStorageCreateNode(allocator
, true, node
->numBytes
+ size
- storage
->maxLeafCapacity
); // New stuff
444 if (node
->info
.leaf
.memory
) { // We allocate memory lazily...
445 __CFStorageAllocLeafNodeMemory(allocator
, storage
, newNode
, node
->numBytes
+ size
- storage
->maxLeafCapacity
, false);
446 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, newNode
->info
.leaf
.memory
+ byteNum
+ size
- storage
->maxLeafCapacity
, node
->numBytes
- byteNum
);
447 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, storage
->maxLeafCapacity
, false);
449 node
->numBytes
= storage
->maxLeafCapacity
;
450 __CFStorageSetCache(storage
, node
, (absoluteByteNum
- byteNum
) / storage
->valueSize
, node
->numBytes
/ storage
->valueSize
);
453 } else { // No need to create new nodes!
454 if (node
->info
.leaf
.memory
) {
455 __CFStorageAllocLeafNodeMemory(allocator
, storage
, node
, node
->numBytes
+ size
, false);
456 COPYMEM(node
->info
.leaf
.memory
+ byteNum
, node
->info
.leaf
.memory
+ byteNum
+ size
, node
->numBytes
- byteNum
);
458 node
->numBytes
+= size
;
459 __CFStorageSetCache(storage
, node
, (absoluteByteNum
- byteNum
) / storage
->valueSize
, node
->numBytes
/ storage
->valueSize
);
463 CFIndex relativeByteNum
;
465 CFStorageNode
*newNode
;
466 __CFStorageFindChild(node
, byteNum
, true, &childNum
, &relativeByteNum
);
467 newNode
= __CFStorageInsert(allocator
, storage
, node
->info
.notLeaf
.child
[childNum
], relativeByteNum
, size
, absoluteByteNum
);
469 if (node
->info
.notLeaf
.child
[2] == NULL
) { // There's an empty slot for the new node, cool
470 if (childNum
== 0) __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[2], node
->info
.notLeaf
.child
[1]); // Make room
471 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[childNum
+ 1], newNode
);
472 Boolean GC
= CF_IS_COLLECTABLE_ALLOCATOR(allocator
);
473 if (GC
) auto_zone_release(auto_zone(), newNode
);
474 node
->numBytes
+= size
;
477 CFStorageNode
*anotherNode
= __CFStorageCreateNode(allocator
, false, 0); // Create another node
478 if (childNum
== 0) { // Last two children go to new node
479 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[0], node
->info
.notLeaf
.child
[1]);
480 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[1], node
->info
.notLeaf
.child
[2]);
481 __CFAssignWithWriteBarrier((void **)&node
->info
.notLeaf
.child
[1], newNode
);
482 node
->info
.notLeaf
.child
[2] = NULL
;
483 } else if (childNum
== 1) { // Last child goes to new node
484 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[0], newNode
);
485 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[1], node
->info
.notLeaf
.child
[2]);
486 node
->info
.notLeaf
.child
[2] = NULL
;
487 } else { // New node contains the new comers...
488 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[0], node
->info
.notLeaf
.child
[2]);
489 __CFAssignWithWriteBarrier((void **)&anotherNode
->info
.notLeaf
.child
[1], newNode
);
490 node
->info
.notLeaf
.child
[2] = NULL
;
492 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator
)) {
493 auto_zone_release(auto_zone(), newNode
);
495 node
->numBytes
= node
->info
.notLeaf
.child
[0]->numBytes
+ node
->info
.notLeaf
.child
[1]->numBytes
;
496 anotherNode
->numBytes
= anotherNode
->info
.notLeaf
.child
[0]->numBytes
+ anotherNode
->info
.notLeaf
.child
[1]->numBytes
;
500 node
->numBytes
+= size
;
506 CF_INLINE CFIndex
__CFStorageGetCount(CFStorageRef storage
) {
507 return storage
->rootNode
.numBytes
/ storage
->valueSize
;
510 static Boolean
__CFStorageEqual(CFTypeRef cf1
, CFTypeRef cf2
) {
511 CFStorageRef storage1
= (CFStorageRef
)cf1
;
512 CFStorageRef storage2
= (CFStorageRef
)cf2
;
513 CFIndex loc
, count
, valueSize
;
514 CFRange range1
, range2
;
515 uint8_t *ptr1
, *ptr2
;
517 count
= __CFStorageGetCount(storage1
);
518 if (count
!= __CFStorageGetCount(storage2
)) return false;
520 valueSize
= __CFStorageGetValueSize(storage1
);
521 if (valueSize
!= __CFStorageGetValueSize(storage2
)) return false;
523 loc
= range1
.location
= range1
.length
= range2
.location
= range2
.length
= 0;
526 while (loc
< count
) {
528 if (loc
>= range1
.location
+ range1
.length
) ptr1
= (uint8_t *)CFStorageGetValueAtIndex(storage1
, loc
, &range1
);
529 if (loc
>= range2
.location
+ range2
.length
) ptr2
= (uint8_t *)CFStorageGetValueAtIndex(storage2
, loc
, &range2
);
530 cntThisTime
= range1
.location
+ range1
.length
;
531 if (range2
.location
+ range2
.length
< cntThisTime
) cntThisTime
= range2
.location
+ range2
.length
;
533 if (memcmp(ptr1
, ptr2
, valueSize
* cntThisTime
) != 0) return false;
534 ptr1
+= valueSize
* cntThisTime
;
535 ptr2
+= valueSize
* cntThisTime
;
541 static CFHashCode
__CFStorageHash(CFTypeRef cf
) {
542 CFStorageRef Storage
= (CFStorageRef
)cf
;
543 return __CFStorageGetCount(Storage
);
546 static void __CFStorageDescribeNode(CFStorageNode
*node
, CFMutableStringRef str
, CFIndex level
) {
548 for (cnt
= 0; cnt
< level
; cnt
++) CFStringAppendCString(str
, " ", CFStringGetSystemEncoding());
551 CFStringAppendFormat(str
, NULL
, CFSTR("Leaf %d/%d\n"), node
->numBytes
, node
->info
.leaf
.capacityInBytes
);
553 CFStringAppendFormat(str
, NULL
, CFSTR("Node %d\n"), node
->numBytes
);
554 for (cnt
= 0; cnt
< 3; cnt
++) if (node
->info
.notLeaf
.child
[cnt
]) __CFStorageDescribeNode(node
->info
.notLeaf
.child
[cnt
], str
, level
+1);
558 static CFIndex
__CFStorageGetNodeCapacity(CFStorageNode
*node
) {
560 if (node
->isLeaf
) return node
->info
.leaf
.capacityInBytes
;
561 return __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[0]) + __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[1]) + __CFStorageGetNodeCapacity(node
->info
.notLeaf
.child
[2]);
564 CFIndex
__CFStorageGetCapacity(CFStorageRef storage
) {
565 return __CFStorageGetNodeCapacity(&storage
->rootNode
) / storage
->valueSize
;
568 CFIndex
__CFStorageGetValueSize(CFStorageRef storage
) {
569 return storage
->valueSize
;
572 static CFStringRef
__CFStorageCopyDescription(CFTypeRef cf
) {
573 CFStorageRef storage
= (CFStorageRef
)cf
;
574 CFMutableStringRef result
;
575 CFAllocatorRef allocator
= CFGetAllocator(storage
);
576 result
= CFStringCreateMutable(allocator
, 0);
577 CFStringAppendFormat(result
, NULL
, CFSTR("<CFStorage %p [%p]>[count = %u, capacity = %u]\n"), storage
, allocator
, __CFStorageGetCount(storage
), __CFStorageGetCapacity(storage
));
578 __CFStorageDescribeNode(&storage
->rootNode
, result
, 0);
582 static void __CFStorageDeallocate(CFTypeRef cf
) {
583 CFStorageRef storage
= (CFStorageRef
)cf
;
584 CFAllocatorRef allocator
= CFGetAllocator(storage
);
585 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator
)) return; // XXX_PCB GC will take care of us.
586 __CFStorageNodeDealloc(allocator
, &storage
->rootNode
, false);
589 static CFTypeID __kCFStorageTypeID
= _kCFRuntimeNotATypeID
;
591 static const CFRuntimeClass __CFStorageClass
= {
592 _kCFRuntimeScannedObject
,
596 __CFStorageDeallocate
,
600 __CFStorageCopyDescription
603 __private_extern__
void __CFStorageInitialize(void) {
604 __kCFStorageTypeID
= _CFRuntimeRegisterClass(&__CFStorageClass
);
609 CFStorageRef
CFStorageCreate(CFAllocatorRef allocator
, CFIndex valueSize
) {
610 CFStorageRef storage
;
611 CFIndex size
= sizeof(struct __CFStorage
) - sizeof(CFRuntimeBase
);
612 storage
= (CFStorageRef
)_CFRuntimeCreateInstance(allocator
, __kCFStorageTypeID
, size
, NULL
);
613 if (NULL
== storage
) {
616 storage
->valueSize
= valueSize
;
617 CF_SPINLOCK_INIT_FOR_STRUCTS(storage
->cacheReaderMemoryAllocationLock
);
618 storage
->cacheGenerationCount
= 0;
619 storage
->cacheParts
.locationHi
= 0;
620 storage
->cacheParts
.locationLo
= 0;
621 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
622 storage
->cacheParts
.lengthHi
= 0;
624 storage
->cacheParts
.lengthLo
= 0;
625 storage
->cacheParts
.cachedNodeHi
= 0;
626 storage
->cacheParts
.cachedNodeLo
= 0;
627 storage
->maxLeafCapacity
= __CFStorageMaxLeafCapacity
;
628 if (valueSize
&& ((storage
->maxLeafCapacity
% valueSize
) != 0)) {
629 storage
->maxLeafCapacity
= (storage
->maxLeafCapacity
/ valueSize
) * valueSize
; // Make it fit perfectly (3406853)
631 memset(&(storage
->rootNode
), 0, sizeof(CFStorageNode
));
632 storage
->rootNode
.isLeaf
= true;
633 storage
->nodeHint
= __kCFAllocatorGCScannedMemory
;
634 if (__CFOASafe
) __CFSetLastAllocationEventName(storage
, "CFStorage");
638 CFTypeID
CFStorageGetTypeID(void) {
639 return __kCFStorageTypeID
;
642 CFIndex
CFStorageGetCount(CFStorageRef storage
) {
643 return __CFStorageGetCount(storage
);
646 /* Returns pointer to the specified value
647 index and validConsecutiveValueRange are in terms of values
649 void *CFStorageGetValueAtIndex(CFStorageRef storage
, CFIndex idx
, CFRange
*validConsecutiveValueRange
) {
651 return __CFStorageGetValueAtIndex(storage
, idx
, validConsecutiveValueRange
? validConsecutiveValueRange
: &range
);
654 /* Makes space for range.length values at location range.location
655 This function deepens the tree if necessary...
657 void CFStorageInsertValues(CFStorageRef storage
, CFRange range
) {
658 CFIndex numBytesToInsert
= range
.length
* storage
->valueSize
;
659 CFIndex byteNum
= range
.location
* storage
->valueSize
;
660 while (numBytesToInsert
> 0) {
661 CFStorageNode
*newNode
;
662 CFAllocatorRef allocator
= CFGetAllocator(storage
);
663 CFIndex insertThisTime
= numBytesToInsert
;
664 if (insertThisTime
> storage
->maxLeafCapacity
) {
665 insertThisTime
= (storage
->maxLeafCapacity
/ storage
->valueSize
) * storage
->valueSize
;
667 newNode
= __CFStorageInsert(allocator
, storage
, &storage
->rootNode
, byteNum
, insertThisTime
, byteNum
);
669 CFStorageNode
*tempRootNode
= __CFStorageCreateNode(allocator
, false, 0); // Will copy the (static) rootNode over to this
670 objc_memmove_collectable(tempRootNode
, &storage
->rootNode
, sizeof(CFStorageNode
));
671 storage
->rootNode
.isLeaf
= false;
672 __CFAssignWithWriteBarrier((void **)&storage
->rootNode
.info
.notLeaf
.child
[0], tempRootNode
);
673 __CFAssignWithWriteBarrier((void **)&storage
->rootNode
.info
.notLeaf
.child
[1], newNode
);
674 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator
)) {
675 auto_zone_release(auto_zone(), tempRootNode
);
676 auto_zone_release(auto_zone(), newNode
);
678 storage
->rootNode
.info
.notLeaf
.child
[2] = NULL
;
679 storage
->rootNode
.numBytes
= tempRootNode
->numBytes
+ newNode
->numBytes
;
682 __CFStorageSetCache(storage
, NULL
, 0, 0);
684 if (storage
->cache
.cachedNode
== &(storage
->rootNode
)) __CFAssignWithWriteBarrier((void **)&storage
->cache
.cachedNode
, tempRootNode
); // The cache should follow the node
687 numBytesToInsert
-= insertThisTime
;
688 byteNum
+= insertThisTime
;
692 /* Deletes the values in the specified range
693 This function gets rid of levels if necessary...
695 void CFStorageDeleteValues(CFStorageRef storage
, CFRange range
) {
696 CFAllocatorRef allocator
= CFGetAllocator(storage
);
697 range
.location
*= storage
->valueSize
;
698 range
.length
*= storage
->valueSize
;
699 __CFStorageDelete(allocator
, storage
, &storage
->rootNode
, range
, true);
700 while (__CFStorageGetNumChildren(&storage
->rootNode
) == 1) {
701 CFStorageNode
*child
= storage
->rootNode
.info
.notLeaf
.child
[0]; // The single child
702 objc_memmove_collectable(&storage
->rootNode
, child
, sizeof(CFStorageNode
));
703 _CFAllocatorDeallocateGC(allocator
, child
);
705 if (__CFStorageGetNumChildren(&storage
->rootNode
) == 0 && !storage
->rootNode
.isLeaf
) {
706 storage
->rootNode
.isLeaf
= true;
707 storage
->rootNode
.info
.leaf
.capacityInBytes
= 0;
708 storage
->rootNode
.info
.leaf
.memory
= NULL
;
710 // !!! Need to update the cache
711 __CFStorageSetCache(storage
, NULL
, 0, 0);
714 void CFStorageGetValues(CFStorageRef storage
, CFRange range
, void *values
) {
715 while (range
.length
> 0) {
717 void *storagePtr
= __CFStorageGetValueAtIndex(storage
, range
.location
, &leafRange
);
718 CFIndex cntThisTime
= range
.length
;
719 if (cntThisTime
> leafRange
.length
- (range
.location
- leafRange
.location
)) cntThisTime
= leafRange
.length
- (range
.location
- leafRange
.location
);
720 COPYMEM(storagePtr
, values
, cntThisTime
* storage
->valueSize
);
721 values
= (uint8_t *)values
+ (cntThisTime
* storage
->valueSize
);
722 range
.location
+= cntThisTime
;
723 range
.length
-= cntThisTime
;
727 unsigned long _CFStorageFastEnumeration(CFStorageRef storage
, struct __objcFastEnumerationStateEquivalent
*state
, void *stackbuffer
, unsigned long count
) {
728 // without trying to understand the data structure, each time through search for block containing index
730 if (state
->state
== 0) { /* first time, get length */
731 state
->extra
[0] = __CFStorageGetCount(storage
);
733 if (state
->state
>= state
->extra
[0]) return 0;
734 state
->itemsPtr
= (unsigned long *)CFStorageGetValueAtIndex(storage
, state
->state
, &leafRange
);
735 state
->state
+= leafRange
.length
;
736 return leafRange
.length
;
739 void CFStorageApplyFunction(CFStorageRef storage
, CFRange range
, CFStorageApplierFunction applier
, void *context
) {
740 while (0 < range
.length
) {
742 const void *storagePtr
;
744 storagePtr
= CFStorageGetValueAtIndex(storage
, range
.location
, &leafRange
);
745 cnt
= __CFMin(range
.length
, leafRange
.location
+ leafRange
.length
- range
.location
);
746 for (idx
= 0; idx
< cnt
; idx
++) {
747 applier(storagePtr
, context
);
748 storagePtr
= (const char *)storagePtr
+ storage
->valueSize
;
751 range
.location
+= cnt
;
755 void CFStorageReplaceValues(CFStorageRef storage
, CFRange range
, const void *values
) {
756 while (range
.length
> 0) {
758 void *storagePtr
= __CFStorageGetValueAtIndex(storage
, range
.location
, &leafRange
);
759 CFIndex cntThisTime
= range
.length
;
760 if (cntThisTime
> leafRange
.length
- (range
.location
- leafRange
.location
)) cntThisTime
= leafRange
.length
- (range
.location
- leafRange
.location
);
761 COPYMEM(values
, storagePtr
, cntThisTime
* storage
->valueSize
);
762 values
= (const uint8_t *)values
+ (cntThisTime
* storage
->valueSize
);
763 range
.location
+= cntThisTime
;
764 range
.length
-= cntThisTime
;
768 /* Used by CFArray.c */
770 static void __CFStorageNodeSetUnscanned(CFStorageNode
*node
, auto_zone_t
*zone
) {
772 auto_zone_set_unscanned(zone
, node
->info
.leaf
.memory
);
774 CFStorageNode
**children
= node
->info
.notLeaf
.child
;
775 if (children
[0]) __CFStorageNodeSetUnscanned(children
[0], zone
);
776 if (children
[1]) __CFStorageNodeSetUnscanned(children
[1], zone
);
777 if (children
[2]) __CFStorageNodeSetUnscanned(children
[2], zone
);
781 __private_extern__
void _CFStorageSetWeak(CFStorageRef storage
) {
782 storage
->nodeHint
= 0;
783 __CFStorageNodeSetUnscanned(&storage
->rootNode
, (auto_zone_t
*)auto_zone());