]> git.saurik.com Git - apple/cf.git/blob - CFStorage.c
CF-550.tar.gz
[apple/cf.git] / CFStorage.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* CFStorage.c
24 Copyright (c) 1999-2009, Apple Inc. All rights reserved.
25 Responsibility: Ali Ozer
26 */
27
28 /*
29 2-3 tree storing arbitrary sized values.
30
31 ??? Currently elementSize cannot be greater than storage->maxLeafCapacity, which is less than or equal to __CFStorageMaxLeafCapacity
32
33 CFStorage is thread-safe for multiple readers, but not thread safe for simultaneous reading and writing.
34 */
35
36 #include "CFStorage.h"
37 #include "CFInternal.h"
38
39 #if !defined(PAGE_SIZE)
40 #define PAGE_SIZE 4096
41 #endif
42
43 // Above 15K, malloc switches (??? or used to in early Leopard) to using vm allocates for blocks; it seems best to avoid that.
44 // Also, tests with StorageTimer.c done in 4/07 indicate that 4096 * 3 is better than smaller or larger node sizes.
45 #define __CFStorageMaxLeafCapacity (4096 * 3)
46
47 // If the max length of a node is less than can fit in a half-word (half of a long), we can get away without ever caching the high half-word the cachedRange
48 #if __LP64__
49 #if __CFStorageMaxLeafCapacity > 0xFFFFFFFFULL
50 #define POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD 1
51 #endif
52 #else
53 #if __CFStorageMaxLeafCapacity > 0xFFFFUL
54 #define POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD 1
55 #endif
56 #endif
57
58 #define COPYMEM(src,dst,n) objc_memmove_collectable((dst), (src), (n))
59 #define PAGE_LIMIT ((CFIndex)PAGE_SIZE / 2)
60
61 CF_INLINE int32_t roundToPage(int32_t num) {
62 return (num + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
63 }
64
65
66
67 /* Each node in the storage. isLeaf determines whether the node is a leaf node or a node inside the tree. If latter, number of children are determined by the number of non-NULL entries in child[]. (NULL entries are at the end.)
68 */
69 typedef struct __CFStorageNode {
70 CFIndex numBytes; /* Number of actual bytes in this node and all its children */
71 bool isLeaf;
72 union {
73 struct {
74 CFIndex capacityInBytes; // capacityInBytes is capacity of memory; this is either 0, or >= numBytes
75 uint8_t *memory;
76 } leaf;
77 struct {
78 struct __CFStorageNode *child[3];
79 } notLeaf;
80 } info;
81 } CFStorageNode;
82
83 /* This is the struct used to store the cache in the CFStorage; it enables us to make the cache thread-safe for multiple readers (which update the cache). The values in this cache store half the desired value in the top half, and the genCount of the writer in the low half. This cache is consistent only if all of the genCounts are the same. Note that the cache does not provide thread-safety for readers in the presence of a writer.
84
85 The cached range (location, length) is in terms of values; if the cached range is not (0,0), then the cached node needs to be non-NULL and pointing at a leaf node.
86 */
87 typedef struct {
88 unsigned long locationHi, locationLo; // cachedRange.location
89 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
90 unsigned long lengthHi;
91 #endif
92 unsigned long lengthLo; // cachedRange.length; note that we actually do not bother with lengthHi if __CFStorageMaxLeafCapacity is less than half-word
93 unsigned long cachedNodeHi, cachedNodeLo; // cachedNode
94 } CFStorageAccessCacheParts;
95
96 /* The CFStorage object.
97 */
98 struct __CFStorage {
99 CFRuntimeBase base;
100 CFIndex valueSize;
101 CFSpinLock_t cacheReaderMemoryAllocationLock;
102 int cacheGenerationCount;
103 CFStorageAccessCacheParts cacheParts;
104 CFIndex maxLeafCapacity; // In terms of bytes
105 CFStorageNode rootNode;
106 CFOptionFlags nodeHint; // __kCFAllocatorGCScannedMemory or 0.
107 };
108
109
110
111
112 /* Allocates the memory and initializes the capacity in a leaf. __CFStorageAllocLeafNodeMemory() is the entry point; __CFStorageAllocLeafNodeMemoryAux is called if actual reallocation is needed. __CFStorageAllocLeafNodeMemoryAux() locks not for mutations (mutations are not thread-safe in general), but for lazy allocation of storage during reading.
113 */
114 static void __CFStorageAllocLeafNodeMemoryAux(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex cap) {
115 __CFSpinLock(&(storage->cacheReaderMemoryAllocationLock));
116 __CFAssignWithWriteBarrier((void **)&node->info.leaf.memory, _CFAllocatorReallocateGC(allocator, node->info.leaf.memory, cap, storage->nodeHint)); // This will free...
117 if (__CFOASafe) __CFSetLastAllocationEventName(node->info.leaf.memory, "CFStorage (node bytes)");
118 node->info.leaf.capacityInBytes = cap;
119 __CFSpinUnlock(&(storage->cacheReaderMemoryAllocationLock));
120 }
121
122 CF_INLINE void __CFStorageAllocLeafNodeMemory(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex cap, bool compact) {
123 if (cap > PAGE_LIMIT) {
124 cap = roundToPage(cap);
125 if (cap > storage->maxLeafCapacity) cap = storage->maxLeafCapacity;
126 } else {
127 cap = (((cap + 63) / 64) * 64);
128 }
129 if (compact ? (cap != node->info.leaf.capacityInBytes) : (cap > node->info.leaf.capacityInBytes)) __CFStorageAllocLeafNodeMemoryAux(allocator, storage, node, cap);
130 }
131
132
133
134
135 #if __LP64__
136 #define genCountMask 0x00000000FFFFFFFFUL
137 #define dataMask 0xFFFFFFFF00000000UL
138 #define shiftLowWordBy 32
139 #else
140 #define genCountMask 0x0000FFFFUL
141 #define dataMask 0xFFFF0000UL
142 #define shiftLowWordBy 16
143 #endif
144
145 /* Sets the cache to point at the specified node. loc and len are in terms of values, not bytes. To clear the cache set these two to 0.
146 At least one of node or memory should be non-NULL. memory is consulted first when using the cache.
147 */
148 CF_INLINE void __CFStorageSetCache(CFStorageRef storage, CFStorageNode *node, CFIndex loc, CFIndex len) {
149 unsigned int genCount = ((unsigned int)OSAtomicIncrement32(&storage->cacheGenerationCount)) & genCountMask;
150 CFStorageAccessCacheParts cacheParts;
151 cacheParts.locationHi = (loc & dataMask) | genCount;
152 cacheParts.locationLo = (loc << shiftLowWordBy) | genCount;
153 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
154 cacheParts.lengthHi = (len & dataMask) | genCount;
155 #endif
156 cacheParts.lengthLo = (len << shiftLowWordBy) | genCount;
157 cacheParts.cachedNodeHi = ((unsigned long)node & dataMask) | genCount;
158 cacheParts.cachedNodeLo = ((unsigned long)node << shiftLowWordBy) | genCount;
159 storage->cacheParts = cacheParts;
160 }
161
162 /* Thread-safely get the cached range and node info.
163 */
164 CF_INLINE Boolean __CFStorageGetCache(CFStorageRef storage, CFRange *cachedRangePtr, CFStorageNode **cachedNodePtr) {
165 CFStorageAccessCacheParts cacheParts = storage->cacheParts;
166
167 unsigned int genCount = cacheParts.locationHi & genCountMask;
168
169 // Check to make sure the genCounts of all the items are the same; if not, the cache was inconsistent
170 if ((cacheParts.locationLo & genCountMask) == genCount &&
171 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
172 (cacheParts.lengthHi & genCountMask) == genCount &&
173 #endif
174 (cacheParts.lengthLo & genCountMask) == genCount &&
175 (cacheParts.cachedNodeHi & genCountMask) == genCount &&
176 (cacheParts.cachedNodeLo & genCountMask) == genCount) {
177
178 *cachedNodePtr = (CFStorageNode *)((cacheParts.cachedNodeHi & dataMask) | ((cacheParts.cachedNodeLo & dataMask) >> shiftLowWordBy));
179 cachedRangePtr->location = (cacheParts.locationHi & dataMask) | ((cacheParts.locationLo & dataMask) >> shiftLowWordBy);
180 cachedRangePtr->length = (cacheParts.lengthLo & dataMask) >> shiftLowWordBy;
181 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
182 cachedRangePtr->length |= (cacheParts.lengthHi & dataMask);
183 #endif
184
185 return true;
186 }
187 return false;
188 }
189
190 /* Gets the location for the specified absolute loc from the cached info.
191 Returns NULL if the location is not in the cache.
192 */
193 CF_INLINE uint8_t *__CFStorageGetFromCache(CFStorageRef storage, CFIndex loc, CFRange *validConsecutiveValueRange) {
194 CFRange cachedRange = {0, 0};
195 CFStorageNode *cachedNode = 0;
196
197 // If we can't get values from the cache, return NULL
198 if (!__CFStorageGetCache(storage, &cachedRange, &cachedNode)) return NULL;
199
200 // Check to see if the index is in the cache
201 if (loc < cachedRange.location || loc >= cachedRange.location + cachedRange.length) return NULL;
202
203 // If the cached node has no memory, return here; it will be allocated as a result of the non-cached lookup.
204 if (!cachedNode->info.leaf.memory) return NULL;
205
206 // The cache has consistent values, and in fact, the values we're looking for!
207 uint8_t *result = cachedNode->info.leaf.memory + (loc - cachedRange.location) * storage->valueSize;
208 *validConsecutiveValueRange = cachedRange;
209
210 return result;
211 }
212
213
214
215
216 /* Returns the number of the child containing the desired value and the relative index of the value in that child.
217 forInsertion = true means that we are looking for the child in which to insert; this changes the behavior when the index is at the end of a child
218 relativeByteNum (not optional, for performance reasons) returns the relative byte number of the specified byte in the child.
219 Don't call with leaf nodes!
220 */
221 CF_INLINE void __CFStorageFindChild(CFStorageNode *node, CFIndex byteNum, bool forInsertion, CFIndex *childNum, CFIndex *relativeByteNum) {
222 if (forInsertion) byteNum--; /* If for insertion, we do <= checks, not <, so this accomplishes the same thing */
223 if (byteNum < node->info.notLeaf.child[0]->numBytes) *childNum = 0;
224 else {
225 byteNum -= node->info.notLeaf.child[0]->numBytes;
226 if (byteNum < node->info.notLeaf.child[1]->numBytes) *childNum = 1;
227 else {
228 byteNum -= node->info.notLeaf.child[1]->numBytes;
229 *childNum = 2;
230 }
231 }
232 if (forInsertion) byteNum++;
233 *relativeByteNum = byteNum;
234 }
235
236 /* Finds the location where the specified byte is stored. If validConsecutiveByteRange is not NULL, returns
237 the range of bytes that are consecutive with this one.
238 !!! Assumes the byteNum is within the range of this node.
239 */
240 static void *__CFStorageFindByte(CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFStorageNode **resultNode, CFRange *validConsecutiveByteRange) {
241 if (node->isLeaf) {
242 if (validConsecutiveByteRange) *validConsecutiveByteRange = CFRangeMake(0, node->numBytes);
243 __CFStorageAllocLeafNodeMemory(CFGetAllocator(storage), storage, node, node->numBytes, false);
244 if (resultNode) *resultNode = node;
245 return node->info.leaf.memory + byteNum;
246 } else {
247 void *result;
248 CFIndex childNum;
249 CFIndex relativeByteNum;
250 __CFStorageFindChild(node, byteNum, false, &childNum, &relativeByteNum);
251 result = __CFStorageFindByte(storage, node->info.notLeaf.child[childNum], relativeByteNum, resultNode, validConsecutiveByteRange);
252 if (validConsecutiveByteRange) {
253 if (childNum > 0) validConsecutiveByteRange->location += node->info.notLeaf.child[0]->numBytes;
254 if (childNum > 1) validConsecutiveByteRange->location += node->info.notLeaf.child[1]->numBytes;
255 }
256 return result;
257 }
258 }
259
260 /* Guts of CFStorageGetValueAtIndex(); note that validConsecutiveValueRange is not optional.
261 Consults and updates cache.
262 */
263 CF_INLINE void *__CFStorageGetValueAtIndex(CFStorageRef storage, CFIndex idx, CFRange *validConsecutiveValueRange) {
264 uint8_t *result;
265 if (!(result = __CFStorageGetFromCache(storage, idx, validConsecutiveValueRange))) {
266 CFRange rangeInBytes;
267 CFStorageNode *resultNode;
268 result = (uint8_t *)__CFStorageFindByte(storage, &storage->rootNode, idx * storage->valueSize, &resultNode, &rangeInBytes);
269 CFRange rangeInValues = CFRangeMake(rangeInBytes.location / storage->valueSize, rangeInBytes.length / storage->valueSize);
270 __CFStorageSetCache(storage, resultNode, rangeInValues.location, rangeInValues.length);
271 *validConsecutiveValueRange = rangeInValues;
272 }
273 return result;
274 }
275
276 // returns refcount==1 node under GC
277 static CFStorageNode *__CFStorageCreateNode(CFAllocatorRef allocator, bool isLeaf, CFIndex numBytes) {
278 CFStorageNode *newNode = (CFStorageNode *)CFAllocatorAllocate(allocator, sizeof(CFStorageNode), __kCFAllocatorGCScannedMemory);
279 if (__CFOASafe) __CFSetLastAllocationEventName(newNode, "CFStorage (node)");
280 newNode->isLeaf = isLeaf;
281 newNode->numBytes = numBytes;
282 if (isLeaf) {
283 newNode->info.leaf.capacityInBytes = 0;
284 newNode->info.leaf.memory = NULL;
285 } else {
286 newNode->info.notLeaf.child[0] = newNode->info.notLeaf.child[1] = newNode->info.notLeaf.child[2] = NULL;
287 }
288 return newNode;
289 }
290
291 static void __CFStorageNodeDealloc(CFAllocatorRef allocator, CFStorageNode *node, bool freeNodeItself) {
292 if (node->isLeaf) {
293 _CFAllocatorDeallocateGC(allocator, node->info.leaf.memory);
294 } else {
295 int cnt;
296 for (cnt = 0; cnt < 3; cnt++) if (node->info.notLeaf.child[cnt]) __CFStorageNodeDealloc(allocator, node->info.notLeaf.child[cnt], true);
297 }
298 if (freeNodeItself) _CFAllocatorDeallocateGC(allocator, node);
299 }
300
301 static CFIndex __CFStorageGetNumChildren(CFStorageNode *node) {
302 if (!node || node->isLeaf) return 0;
303 if (node->info.notLeaf.child[2]) return 3;
304 if (node->info.notLeaf.child[1]) return 2;
305 if (node->info.notLeaf.child[0]) return 1;
306 return 0;
307 }
308
309 /* The boolean compact indicates whether leaf nodes that get smaller should be realloced.
310 */
311 static void __CFStorageDelete(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFRange range, bool compact) {
312 if (node->isLeaf) {
313 node->numBytes -= range.length;
314 // If this node had memory allocated, readjust the bytes...
315 if (node->info.leaf.memory) {
316 COPYMEM(node->info.leaf.memory + range.location + range.length, node->info.leaf.memory + range.location, node->numBytes - range.location);
317 if (compact) __CFStorageAllocLeafNodeMemory(allocator, storage, node, node->numBytes, true);
318 }
319 } else {
320 bool childrenAreLeaves = node->info.notLeaf.child[0]->isLeaf;
321 node->numBytes -= range.length;
322 while (range.length > 0) {
323 CFRange rangeToDelete;
324 CFIndex relativeByteNum;
325 CFIndex childNum;
326 __CFStorageFindChild(node, range.location + range.length, true, &childNum, &relativeByteNum);
327 if (range.length > relativeByteNum) {
328 rangeToDelete.length = relativeByteNum;
329 rangeToDelete.location = 0;
330 } else {
331 rangeToDelete.length = range.length;
332 rangeToDelete.location = relativeByteNum - range.length;
333 }
334 __CFStorageDelete(allocator, storage, node->info.notLeaf.child[childNum], rangeToDelete, compact);
335 if (node->info.notLeaf.child[childNum]->numBytes == 0) { // Delete empty node and compact
336 int cnt;
337 _CFAllocatorDeallocateGC(allocator, node->info.notLeaf.child[childNum]);
338 for (cnt = childNum; cnt < 2; cnt++) {
339 __CFAssignWithWriteBarrier((void **)&node->info.notLeaf.child[cnt], node->info.notLeaf.child[cnt+1]);
340 }
341 node->info.notLeaf.child[2] = NULL;
342 }
343 range.length -= rangeToDelete.length;
344 }
345 // At this point the remaining children are packed
346 if (childrenAreLeaves) {
347 // Children are leaves; if their total bytes is smaller than a leaf's worth, collapse into one...
348 if (node->numBytes > 0 && node->numBytes <= storage->maxLeafCapacity) {
349 __CFStorageAllocLeafNodeMemory(allocator, storage, node->info.notLeaf.child[0], node->numBytes, false);
350 if (node->info.notLeaf.child[1] && node->info.notLeaf.child[1]->numBytes) {
351 COPYMEM(node->info.notLeaf.child[1]->info.leaf.memory, node->info.notLeaf.child[0]->info.leaf.memory + node->info.notLeaf.child[0]->numBytes, node->info.notLeaf.child[1]->numBytes);
352 if (node->info.notLeaf.child[2] && node->info.notLeaf.child[2]->numBytes) {
353 COPYMEM(node->info.notLeaf.child[2]->info.leaf.memory, node->info.notLeaf.child[0]->info.leaf.memory + node->info.notLeaf.child[0]->numBytes + node->info.notLeaf.child[1]->numBytes, node->info.notLeaf.child[2]->numBytes);
354 __CFStorageNodeDealloc(allocator, node->info.notLeaf.child[2], true);
355 node->info.notLeaf.child[2] = NULL;
356 }
357 __CFStorageNodeDealloc(allocator, node->info.notLeaf.child[1], true);
358 node->info.notLeaf.child[1] = NULL;
359 }
360 node->info.notLeaf.child[0]->numBytes = node->numBytes;
361 }
362 } else {
363 // Children are not leaves; combine their children to assure each node has 2 or 3 children...
364 // (Could try to bypass all this by noting up above whether the number of grandchildren changed...)
365 CFStorageNode *gChildren[9];
366 CFIndex cCnt, gCnt, cnt;
367 CFIndex totalG = 0; // Total number of grandchildren
368 for (cCnt = 0; cCnt < 3; cCnt++) {
369 CFStorageNode *child = node->info.notLeaf.child[cCnt];
370 if (child) {
371 for (gCnt = 0; gCnt < 3; gCnt++) if (child->info.notLeaf.child[gCnt]) {
372 gChildren[totalG++] = child->info.notLeaf.child[gCnt];
373 child->info.notLeaf.child[gCnt] = NULL;
374 }
375 child->numBytes = 0;
376 }
377 }
378 gCnt = 0; // Total number of grandchildren placed
379 for (cCnt = 0; cCnt < 3; cCnt++) {
380 // These tables indicate how many children each child should have, given the total number of grandchildren (last child gets remainder)
381 static const unsigned char forChild0[10] = {0, 1, 2, 3, 2, 3, 3, 3, 3, 3};
382 static const unsigned char forChild1[10] = {0, 0, 0, 0, 2, 2, 3, 2, 3, 3};
383 // sCnt is the number of grandchildren to be placed into child cCnt
384 // Depending on child number, pick the right number
385 CFIndex sCnt = (cCnt == 0) ? forChild0[totalG] : ((cCnt == 1) ? forChild1[totalG] : totalG);
386 // Assure we have that many grandchildren...
387 if (sCnt > totalG - gCnt) sCnt = totalG - gCnt;
388 if (sCnt) {
389 if (!node->info.notLeaf.child[cCnt]) {
390 CFStorageNode *newNode = __CFStorageCreateNode(allocator, false, 0);
391 __CFAssignWithWriteBarrier((void **)&node->info.notLeaf.child[cCnt], newNode);
392 Boolean GC = CF_IS_COLLECTABLE_ALLOCATOR(allocator);
393 if (GC) auto_zone_release(auto_zone(), newNode);
394 }
395 for (cnt = 0; cnt < sCnt; cnt++) {
396 node->info.notLeaf.child[cCnt]->numBytes += gChildren[gCnt]->numBytes;
397 __CFAssignWithWriteBarrier((void **)&node->info.notLeaf.child[cCnt]->info.notLeaf.child[cnt], gChildren[gCnt++]);
398 }
399 } else {
400 if (node->info.notLeaf.child[cCnt]) {
401 _CFAllocatorDeallocateGC(allocator, node->info.notLeaf.child[cCnt]);
402 node->info.notLeaf.child[cCnt] = NULL;
403 }
404 }
405 }
406 }
407 }
408 }
409
410
411 /* Returns NULL or additional node to come after this node
412 Assumption: size is never > storage->maxLeafCapacity
413 Under GC node has a retain count to keep it alive in unregistered pthreads
414 */
415 static CFStorageNode *__CFStorageInsert(CFAllocatorRef allocator, CFStorageRef storage, CFStorageNode *node, CFIndex byteNum, CFIndex size, CFIndex absoluteByteNum) {
416 if (node->isLeaf) {
417 if (size + node->numBytes > storage->maxLeafCapacity) { // Need to create more child nodes
418 if (byteNum == node->numBytes) { // Inserting at end; easy...
419 CFStorageNode *newNode = __CFStorageCreateNode(allocator, true, size);
420 __CFStorageSetCache(storage, newNode, absoluteByteNum / storage->valueSize, size / storage->valueSize);
421 return newNode;
422 } else if (byteNum == 0) { // Inserting at front; also easy, but we need to swap node and newNode
423 CFStorageNode *newNode = __CFStorageCreateNode(allocator, true, 0);
424 objc_memmove_collectable(newNode, node, sizeof(CFStorageNode));
425 node->isLeaf = true;
426 node->numBytes = size;
427 node->info.leaf.capacityInBytes = 0;
428 node->info.leaf.memory = NULL;
429 __CFStorageSetCache(storage, node, absoluteByteNum / storage->valueSize, size / storage->valueSize);
430 return newNode;
431 } else if (byteNum + size <= storage->maxLeafCapacity) { // Inserting at middle; inserted region will fit into existing child
432 // Create new node to hold the overflow
433 CFStorageNode *newNode = __CFStorageCreateNode(allocator, true, node->numBytes - byteNum);
434 if (node->info.leaf.memory) { // We allocate memory lazily...
435 __CFStorageAllocLeafNodeMemory(allocator, storage, newNode, node->numBytes - byteNum, false);
436 COPYMEM(node->info.leaf.memory + byteNum, newNode->info.leaf.memory, node->numBytes - byteNum);
437 __CFStorageAllocLeafNodeMemory(allocator, storage, node, byteNum + size, false);
438 }
439 node->numBytes = byteNum + size;
440 __CFStorageSetCache(storage, node, (absoluteByteNum - byteNum) / storage->valueSize, node->numBytes / storage->valueSize);
441 return newNode;
442 } else { // Inserting some of new into one node, rest into another; remember that the assumption is size <= storage->maxLeafCapacity
443 CFStorageNode *newNode = __CFStorageCreateNode(allocator, true, node->numBytes + size - storage->maxLeafCapacity); // New stuff
444 if (node->info.leaf.memory) { // We allocate memory lazily...
445 __CFStorageAllocLeafNodeMemory(allocator, storage, newNode, node->numBytes + size - storage->maxLeafCapacity, false);
446 COPYMEM(node->info.leaf.memory + byteNum, newNode->info.leaf.memory + byteNum + size - storage->maxLeafCapacity, node->numBytes - byteNum);
447 __CFStorageAllocLeafNodeMemory(allocator, storage, node, storage->maxLeafCapacity, false);
448 }
449 node->numBytes = storage->maxLeafCapacity;
450 __CFStorageSetCache(storage, node, (absoluteByteNum - byteNum) / storage->valueSize, node->numBytes / storage->valueSize);
451 return newNode;
452 }
453 } else { // No need to create new nodes!
454 if (node->info.leaf.memory) {
455 __CFStorageAllocLeafNodeMemory(allocator, storage, node, node->numBytes + size, false);
456 COPYMEM(node->info.leaf.memory + byteNum, node->info.leaf.memory + byteNum + size, node->numBytes - byteNum);
457 }
458 node->numBytes += size;
459 __CFStorageSetCache(storage, node, (absoluteByteNum - byteNum) / storage->valueSize, node->numBytes / storage->valueSize);
460 return NULL;
461 }
462 } else {
463 CFIndex relativeByteNum;
464 CFIndex childNum;
465 CFStorageNode *newNode;
466 __CFStorageFindChild(node, byteNum, true, &childNum, &relativeByteNum);
467 newNode = __CFStorageInsert(allocator, storage, node->info.notLeaf.child[childNum], relativeByteNum, size, absoluteByteNum);
468 if (newNode) {
469 if (node->info.notLeaf.child[2] == NULL) { // There's an empty slot for the new node, cool
470 if (childNum == 0) __CFAssignWithWriteBarrier((void **)&node->info.notLeaf.child[2], node->info.notLeaf.child[1]); // Make room
471 __CFAssignWithWriteBarrier((void **)&node->info.notLeaf.child[childNum + 1], newNode);
472 Boolean GC = CF_IS_COLLECTABLE_ALLOCATOR(allocator);
473 if (GC) auto_zone_release(auto_zone(), newNode);
474 node->numBytes += size;
475 return NULL;
476 } else {
477 CFStorageNode *anotherNode = __CFStorageCreateNode(allocator, false, 0); // Create another node
478 if (childNum == 0) { // Last two children go to new node
479 __CFAssignWithWriteBarrier((void **)&anotherNode->info.notLeaf.child[0], node->info.notLeaf.child[1]);
480 __CFAssignWithWriteBarrier((void **)&anotherNode->info.notLeaf.child[1], node->info.notLeaf.child[2]);
481 __CFAssignWithWriteBarrier((void **)&node->info.notLeaf.child[1], newNode);
482 node->info.notLeaf.child[2] = NULL;
483 } else if (childNum == 1) { // Last child goes to new node
484 __CFAssignWithWriteBarrier((void **)&anotherNode->info.notLeaf.child[0], newNode);
485 __CFAssignWithWriteBarrier((void **)&anotherNode->info.notLeaf.child[1], node->info.notLeaf.child[2]);
486 node->info.notLeaf.child[2] = NULL;
487 } else { // New node contains the new comers...
488 __CFAssignWithWriteBarrier((void **)&anotherNode->info.notLeaf.child[0], node->info.notLeaf.child[2]);
489 __CFAssignWithWriteBarrier((void **)&anotherNode->info.notLeaf.child[1], newNode);
490 node->info.notLeaf.child[2] = NULL;
491 }
492 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
493 auto_zone_release(auto_zone(), newNode);
494 }
495 node->numBytes = node->info.notLeaf.child[0]->numBytes + node->info.notLeaf.child[1]->numBytes;
496 anotherNode->numBytes = anotherNode->info.notLeaf.child[0]->numBytes + anotherNode->info.notLeaf.child[1]->numBytes;
497 return anotherNode;
498 }
499 } else {
500 node->numBytes += size;
501 }
502 }
503 return NULL;
504 }
505
506 CF_INLINE CFIndex __CFStorageGetCount(CFStorageRef storage) {
507 return storage->rootNode.numBytes / storage->valueSize;
508 }
509
510 static Boolean __CFStorageEqual(CFTypeRef cf1, CFTypeRef cf2) {
511 CFStorageRef storage1 = (CFStorageRef)cf1;
512 CFStorageRef storage2 = (CFStorageRef)cf2;
513 CFIndex loc, count, valueSize;
514 CFRange range1, range2;
515 uint8_t *ptr1, *ptr2;
516
517 count = __CFStorageGetCount(storage1);
518 if (count != __CFStorageGetCount(storage2)) return false;
519
520 valueSize = __CFStorageGetValueSize(storage1);
521 if (valueSize != __CFStorageGetValueSize(storage2)) return false;
522
523 loc = range1.location = range1.length = range2.location = range2.length = 0;
524 ptr1 = ptr2 = NULL;
525
526 while (loc < count) {
527 CFIndex cntThisTime;
528 if (loc >= range1.location + range1.length) ptr1 = (uint8_t *)CFStorageGetValueAtIndex(storage1, loc, &range1);
529 if (loc >= range2.location + range2.length) ptr2 = (uint8_t *)CFStorageGetValueAtIndex(storage2, loc, &range2);
530 cntThisTime = range1.location + range1.length;
531 if (range2.location + range2.length < cntThisTime) cntThisTime = range2.location + range2.length;
532 cntThisTime -= loc;
533 if (memcmp(ptr1, ptr2, valueSize * cntThisTime) != 0) return false;
534 ptr1 += valueSize * cntThisTime;
535 ptr2 += valueSize * cntThisTime;
536 loc += cntThisTime;
537 }
538 return true;
539 }
540
541 static CFHashCode __CFStorageHash(CFTypeRef cf) {
542 CFStorageRef Storage = (CFStorageRef)cf;
543 return __CFStorageGetCount(Storage);
544 }
545
546 static void __CFStorageDescribeNode(CFStorageNode *node, CFMutableStringRef str, CFIndex level) {
547 int cnt;
548 for (cnt = 0; cnt < level; cnt++) CFStringAppendCString(str, " ", CFStringGetSystemEncoding());
549
550 if (node->isLeaf) {
551 CFStringAppendFormat(str, NULL, CFSTR("Leaf %d/%d\n"), node->numBytes, node->info.leaf.capacityInBytes);
552 } else {
553 CFStringAppendFormat(str, NULL, CFSTR("Node %d\n"), node->numBytes);
554 for (cnt = 0; cnt < 3; cnt++) if (node->info.notLeaf.child[cnt]) __CFStorageDescribeNode(node->info.notLeaf.child[cnt], str, level+1);
555 }
556 }
557
558 static CFIndex __CFStorageGetNodeCapacity(CFStorageNode *node) {
559 if (!node) return 0;
560 if (node->isLeaf) return node->info.leaf.capacityInBytes;
561 return __CFStorageGetNodeCapacity(node->info.notLeaf.child[0]) + __CFStorageGetNodeCapacity(node->info.notLeaf.child[1]) + __CFStorageGetNodeCapacity(node->info.notLeaf.child[2]);
562 }
563
564 CFIndex __CFStorageGetCapacity(CFStorageRef storage) {
565 return __CFStorageGetNodeCapacity(&storage->rootNode) / storage->valueSize;
566 }
567
568 CFIndex __CFStorageGetValueSize(CFStorageRef storage) {
569 return storage->valueSize;
570 }
571
572 static CFStringRef __CFStorageCopyDescription(CFTypeRef cf) {
573 CFStorageRef storage = (CFStorageRef)cf;
574 CFMutableStringRef result;
575 CFAllocatorRef allocator = CFGetAllocator(storage);
576 result = CFStringCreateMutable(allocator, 0);
577 CFStringAppendFormat(result, NULL, CFSTR("<CFStorage %p [%p]>[count = %u, capacity = %u]\n"), storage, allocator, __CFStorageGetCount(storage), __CFStorageGetCapacity(storage));
578 __CFStorageDescribeNode(&storage->rootNode, result, 0);
579 return result;
580 }
581
582 static void __CFStorageDeallocate(CFTypeRef cf) {
583 CFStorageRef storage = (CFStorageRef)cf;
584 CFAllocatorRef allocator = CFGetAllocator(storage);
585 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) return; // XXX_PCB GC will take care of us.
586 __CFStorageNodeDealloc(allocator, &storage->rootNode, false);
587 }
588
589 static CFTypeID __kCFStorageTypeID = _kCFRuntimeNotATypeID;
590
591 static const CFRuntimeClass __CFStorageClass = {
592 _kCFRuntimeScannedObject,
593 "CFStorage",
594 NULL, // init
595 NULL, // copy
596 __CFStorageDeallocate,
597 __CFStorageEqual,
598 __CFStorageHash,
599 NULL, //
600 __CFStorageCopyDescription
601 };
602
603 __private_extern__ void __CFStorageInitialize(void) {
604 __kCFStorageTypeID = _CFRuntimeRegisterClass(&__CFStorageClass);
605 }
606
607 /*** Public API ***/
608
609 CFStorageRef CFStorageCreate(CFAllocatorRef allocator, CFIndex valueSize) {
610 CFStorageRef storage;
611 CFIndex size = sizeof(struct __CFStorage) - sizeof(CFRuntimeBase);
612 storage = (CFStorageRef)_CFRuntimeCreateInstance(allocator, __kCFStorageTypeID, size, NULL);
613 if (NULL == storage) {
614 return NULL;
615 }
616 storage->valueSize = valueSize;
617 CF_SPINLOCK_INIT_FOR_STRUCTS(storage->cacheReaderMemoryAllocationLock);
618 storage->cacheGenerationCount = 0;
619 storage->cacheParts.locationHi = 0;
620 storage->cacheParts.locationLo = 0;
621 #if POSSIBLE_TO_HAVE_LENGTH_MORE_THAN_HALFWORD
622 storage->cacheParts.lengthHi = 0;
623 #endif
624 storage->cacheParts.lengthLo = 0;
625 storage->cacheParts.cachedNodeHi = 0;
626 storage->cacheParts.cachedNodeLo = 0;
627 storage->maxLeafCapacity = __CFStorageMaxLeafCapacity;
628 if (valueSize && ((storage->maxLeafCapacity % valueSize) != 0)) {
629 storage->maxLeafCapacity = (storage->maxLeafCapacity / valueSize) * valueSize; // Make it fit perfectly (3406853)
630 }
631 memset(&(storage->rootNode), 0, sizeof(CFStorageNode));
632 storage->rootNode.isLeaf = true;
633 storage->nodeHint = __kCFAllocatorGCScannedMemory;
634 if (__CFOASafe) __CFSetLastAllocationEventName(storage, "CFStorage");
635 return storage;
636 }
637
638 CFTypeID CFStorageGetTypeID(void) {
639 return __kCFStorageTypeID;
640 }
641
642 CFIndex CFStorageGetCount(CFStorageRef storage) {
643 return __CFStorageGetCount(storage);
644 }
645
646 /* Returns pointer to the specified value
647 index and validConsecutiveValueRange are in terms of values
648 */
649 void *CFStorageGetValueAtIndex(CFStorageRef storage, CFIndex idx, CFRange *validConsecutiveValueRange) {
650 CFRange range;
651 return __CFStorageGetValueAtIndex(storage, idx, validConsecutiveValueRange ? validConsecutiveValueRange : &range);
652 }
653
654 /* Makes space for range.length values at location range.location
655 This function deepens the tree if necessary...
656 */
657 void CFStorageInsertValues(CFStorageRef storage, CFRange range) {
658 CFIndex numBytesToInsert = range.length * storage->valueSize;
659 CFIndex byteNum = range.location * storage->valueSize;
660 while (numBytesToInsert > 0) {
661 CFStorageNode *newNode;
662 CFAllocatorRef allocator = CFGetAllocator(storage);
663 CFIndex insertThisTime = numBytesToInsert;
664 if (insertThisTime > storage->maxLeafCapacity) {
665 insertThisTime = (storage->maxLeafCapacity / storage->valueSize) * storage->valueSize;
666 }
667 newNode = __CFStorageInsert(allocator, storage, &storage->rootNode, byteNum, insertThisTime, byteNum);
668 if (newNode) {
669 CFStorageNode *tempRootNode = __CFStorageCreateNode(allocator, false, 0); // Will copy the (static) rootNode over to this
670 objc_memmove_collectable(tempRootNode, &storage->rootNode, sizeof(CFStorageNode));
671 storage->rootNode.isLeaf = false;
672 __CFAssignWithWriteBarrier((void **)&storage->rootNode.info.notLeaf.child[0], tempRootNode);
673 __CFAssignWithWriteBarrier((void **)&storage->rootNode.info.notLeaf.child[1], newNode);
674 if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
675 auto_zone_release(auto_zone(), tempRootNode);
676 auto_zone_release(auto_zone(), newNode);
677 }
678 storage->rootNode.info.notLeaf.child[2] = NULL;
679 storage->rootNode.numBytes = tempRootNode->numBytes + newNode->numBytes;
680 #if 1
681 // ???
682 __CFStorageSetCache(storage, NULL, 0, 0);
683 #else
684 if (storage->cache.cachedNode == &(storage->rootNode)) __CFAssignWithWriteBarrier((void **)&storage->cache.cachedNode, tempRootNode); // The cache should follow the node
685 #endif
686 }
687 numBytesToInsert -= insertThisTime;
688 byteNum += insertThisTime;
689 }
690 }
691
692 /* Deletes the values in the specified range
693 This function gets rid of levels if necessary...
694 */
695 void CFStorageDeleteValues(CFStorageRef storage, CFRange range) {
696 CFAllocatorRef allocator = CFGetAllocator(storage);
697 range.location *= storage->valueSize;
698 range.length *= storage->valueSize;
699 __CFStorageDelete(allocator, storage, &storage->rootNode, range, true);
700 while (__CFStorageGetNumChildren(&storage->rootNode) == 1) {
701 CFStorageNode *child = storage->rootNode.info.notLeaf.child[0]; // The single child
702 objc_memmove_collectable(&storage->rootNode, child, sizeof(CFStorageNode));
703 _CFAllocatorDeallocateGC(allocator, child);
704 }
705 if (__CFStorageGetNumChildren(&storage->rootNode) == 0 && !storage->rootNode.isLeaf) {
706 storage->rootNode.isLeaf = true;
707 storage->rootNode.info.leaf.capacityInBytes = 0;
708 storage->rootNode.info.leaf.memory = NULL;
709 }
710 // !!! Need to update the cache
711 __CFStorageSetCache(storage, NULL, 0, 0);
712 }
713
714 void CFStorageGetValues(CFStorageRef storage, CFRange range, void *values) {
715 while (range.length > 0) {
716 CFRange leafRange;
717 void *storagePtr = __CFStorageGetValueAtIndex(storage, range.location, &leafRange);
718 CFIndex cntThisTime = range.length;
719 if (cntThisTime > leafRange.length - (range.location - leafRange.location)) cntThisTime = leafRange.length - (range.location - leafRange.location);
720 COPYMEM(storagePtr, values, cntThisTime * storage->valueSize);
721 values = (uint8_t *)values + (cntThisTime * storage->valueSize);
722 range.location += cntThisTime;
723 range.length -= cntThisTime;
724 }
725 }
726
727 unsigned long _CFStorageFastEnumeration(CFStorageRef storage, struct __objcFastEnumerationStateEquivalent *state, void *stackbuffer, unsigned long count) {
728 // without trying to understand the data structure, each time through search for block containing index
729 CFRange leafRange;
730 if (state->state == 0) { /* first time, get length */
731 state->extra[0] = __CFStorageGetCount(storage);
732 }
733 if (state->state >= state->extra[0]) return 0;
734 state->itemsPtr = (unsigned long *)CFStorageGetValueAtIndex(storage, state->state, &leafRange);
735 state->state += leafRange.length;
736 return leafRange.length;
737 }
738
739 void CFStorageApplyFunction(CFStorageRef storage, CFRange range, CFStorageApplierFunction applier, void *context) {
740 while (0 < range.length) {
741 CFRange leafRange;
742 const void *storagePtr;
743 CFIndex idx, cnt;
744 storagePtr = CFStorageGetValueAtIndex(storage, range.location, &leafRange);
745 cnt = __CFMin(range.length, leafRange.location + leafRange.length - range.location);
746 for (idx = 0; idx < cnt; idx++) {
747 applier(storagePtr, context);
748 storagePtr = (const char *)storagePtr + storage->valueSize;
749 }
750 range.length -= cnt;
751 range.location += cnt;
752 }
753 }
754
755 void CFStorageReplaceValues(CFStorageRef storage, CFRange range, const void *values) {
756 while (range.length > 0) {
757 CFRange leafRange;
758 void *storagePtr = __CFStorageGetValueAtIndex(storage, range.location, &leafRange);
759 CFIndex cntThisTime = range.length;
760 if (cntThisTime > leafRange.length - (range.location - leafRange.location)) cntThisTime = leafRange.length - (range.location - leafRange.location);
761 COPYMEM(values, storagePtr, cntThisTime * storage->valueSize);
762 values = (const uint8_t *)values + (cntThisTime * storage->valueSize);
763 range.location += cntThisTime;
764 range.length -= cntThisTime;
765 }
766 }
767
768 /* Used by CFArray.c */
769
770 static void __CFStorageNodeSetUnscanned(CFStorageNode *node, auto_zone_t *zone) {
771 if (node->isLeaf) {
772 auto_zone_set_unscanned(zone, node->info.leaf.memory);
773 } else {
774 CFStorageNode **children = node->info.notLeaf.child;
775 if (children[0]) __CFStorageNodeSetUnscanned(children[0], zone);
776 if (children[1]) __CFStorageNodeSetUnscanned(children[1], zone);
777 if (children[2]) __CFStorageNodeSetUnscanned(children[2], zone);
778 }
779 }
780
781 __private_extern__ void _CFStorageSetWeak(CFStorageRef storage) {
782 storage->nodeHint = 0;
783 __CFStorageNodeSetUnscanned(&storage->rootNode, (auto_zone_t *)auto_zone());
784 }
785
786 #undef COPYMEM
787 #undef PAGE_LIMIT
788