]> git.saurik.com Git - hfs.git/blob - fsck_hfs/cache.c
hfs-226.1.1.tar.gz
[hfs.git] / fsck_hfs / cache.c
1 /*
2 * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/mman.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <sys/uio.h>
32 #include <unistd.h>
33 #include <string.h>
34
35 #include "fsck_hfs.h"
36 #include "cache.h"
37
38 #define true 1
39 #define false 0
40
41 #define CACHE_DEBUG 0
42
43 /*
44 * CacheAllocBlock
45 *
46 * Allocate an unused cache block.
47 */
48 void *CacheAllocBlock (Cache_t *cache);
49
50 /*
51 * CacheFreeBlock
52 *
53 * Release an active cache block.
54 */
55 static int
56 CacheFreeBlock( Cache_t *cache, Tag_t *tag );
57
58 /*
59 * CacheLookup
60 *
61 * Obtain a cache block. If one already exists, it is returned. Otherwise a
62 * new one is created and inserted into the cache.
63 */
64 int CacheLookup (Cache_t *cache, uint64_t off, Tag_t **tag);
65
66 /*
67 * CacheRawRead
68 *
69 * Perform a direct read on the file.
70 */
71 int CacheRawRead (Cache_t *cache, uint64_t off, uint32_t len, void *buf);
72
73 /*
74 * CacheRawWrite
75 *
76 * Perform a direct write on the file.
77 */
78 int CacheRawWrite (Cache_t *cache, uint64_t off, uint32_t len, void *buf);
79
80 /*
81 * CacheFlushRange
82 *
83 * Flush, and optionally remove, all cache blocks that intersect
84 * a given range.
85 */
86 static int
87 CacheFlushRange( Cache_t *cache, uint64_t start, uint64_t len, int remove);
88
89 /*
90 * LRUInit
91 *
92 * Initializes the LRU data structures.
93 */
94 static int LRUInit (LRU_t *lru);
95
96 /*
97 * LRUDestroy
98 *
99 * Shutdown the LRU.
100 *
101 * NOTE: This is typically a no-op, since the cache manager will be clearing
102 * all cache tags.
103 */
104 static int LRUDestroy (LRU_t *lru);
105
106 /*
107 * LRUHit
108 *
109 * Registers data activity on the given node. If the node is already in the
110 * LRU, it is moved to the front. Otherwise, it is inserted at the front.
111 *
112 * NOTE: If the node is not in the LRU, we assume that its pointers are NULL.
113 */
114 static int LRUHit (LRU_t *lru, LRUNode_t *node, int age);
115
116 /*
117 * LRUEvict
118 *
119 * Chooses a buffer to release.
120 *
121 * TODO: Under extreme conditions, it should be possible to release the buffer
122 * of an actively referenced cache buffer, leaving the tag behind as a
123 * placeholder. This would be required for implementing 2Q-LRU
124 * replacement.
125 */
126 static int LRUEvict (LRU_t *lru, LRUNode_t *node);
127
128 /*
129 * CalculateCacheSizes
130 *
131 * Determine the cache size values that should be used to initialize the cache.
132 * If the requested value does not validate according to the conditions described
133 * below, it is adjusted.
134 *
135 * If no input values are provided, use default values for cache size
136 * and cache block size.
137 *
138 * Cache size should be -
139 * a. greater than or equal to minimum cache size
140 * b. less than or equal to maximum cache size. The maximum cache size
141 * is limited by the maximum value that can be allocated using malloc
142 * or mmap (maximum value for size_t)
143 * c. multiple of cache block size
144 *
145 * Returns: void
146 * *calcBlockSize: the size of the blocks in the cache
147 * *calcTotalBlocks: the number of blocks in the cache
148 */
149 void CalculateCacheSizes(uint64_t cacheSize, uint32_t *calcBlockSize, uint32_t *calcTotalBlocks, char cache_debug)
150 {
151 uint32_t blockSize = DefaultCacheBlockSize;
152 const size_t max_size_t = ~0; /* Maximum value represented by size_t */
153
154 /* Simple case - no user cache size, use default values */
155 if (!cacheSize) {
156 *calcBlockSize = DefaultCacheBlockSize;
157 *calcTotalBlocks = DefaultCacheBlocks;
158 goto out;
159 }
160
161 /* User provided cache size - check with minimum and maximum values */
162 if (cacheSize < MinCacheSize) {
163 cacheSize = MinCacheSize;
164 }
165 if (cacheSize > max_size_t ||
166 cacheSize > MaxCacheSize) {
167 if (cache_debug) {
168 printf ("\tCache size should be greater than %uM and less than %luM\n", MinCacheSize/(1024*1024), max_size_t/(1024*1024));
169 }
170 cacheSize = MaxCacheSize;
171 }
172
173 /* Cache size should be multiple of cache block size */
174 if (cacheSize % blockSize) {
175 if (cache_debug) {
176 printf ("\tCache size should be multiple of cache block size (currently %uK)\n", blockSize/1024);
177 }
178 cacheSize = (cacheSize / blockSize) * blockSize;
179 }
180
181 *calcBlockSize = blockSize;
182 *calcTotalBlocks = cacheSize / blockSize;
183
184 out:
185 return;
186 }
187
188 /*
189 * CacheInit
190 *
191 * Initializes the cache for use. If preTouch is non-zero, the cache memory will
192 * be iterated through, with one byte per page touched. (This is to ensure that
193 * the memory is actually created, and is used to avoid deadlocking due to swapping
194 * during a live verify of the boot volume.)
195 */
196 int CacheInit (Cache_t *cache, int fdRead, int fdWrite, uint32_t devBlockSize,
197 uint32_t cacheBlockSize, uint32_t cacheTotalBlocks, uint32_t hashSize, int preTouch)
198 {
199 void ** temp;
200 uint32_t i;
201 Buf_t * buf;
202
203 memset (cache, 0x00, sizeof (Cache_t));
204
205 cache->FD_R = fdRead;
206 cache->FD_W = fdWrite;
207 cache->DevBlockSize = devBlockSize;
208 /* CacheFlush requires cleared cache->Hash */
209 cache->Hash = (Tag_t **) calloc( 1, (sizeof (Tag_t *) * hashSize) );
210 cache->HashSize = hashSize;
211 cache->BlockSize = cacheBlockSize;
212
213 /* Allocate the cache memory */
214 /* Break out of the loop on success, or when the proposed cache is < MinCacheSize */
215 while (1) {
216 cache->FreeHead = mmap (NULL,
217 cacheTotalBlocks * cacheBlockSize,
218 PROT_READ | PROT_WRITE,
219 MAP_ANON | MAP_PRIVATE,
220 -1,
221 0);
222 if (cache->FreeHead == (void *)-1) {
223 if ((cacheTotalBlocks * cacheBlockSize) <= MinCacheSize) {
224 if (debug)
225 printf("\tTried to allocate %dK, minimum is %dK\n",
226 (cacheTotalBlocks * cacheBlockSize) / 1024,
227 MinCacheSize / 1024);
228 break;
229 }
230 if (debug)
231 printf("\tFailed to allocate %uK for cache; trying %uK\n",
232 (cacheTotalBlocks * cacheBlockSize) / 1024,
233 (cacheTotalBlocks * cacheBlockSize / 2) / 1024);
234 CalculateCacheSizes((cacheTotalBlocks * cacheBlockSize) / 2, &cacheBlockSize, &cacheTotalBlocks, debug);
235 continue;
236 } else {
237 if (debug) {
238 printf ("\tUsing cacheBlockSize=%uK cacheTotalBlock=%u cacheSize=%uK.\n", cacheBlockSize/1024, cacheTotalBlocks, (cacheBlockSize/1024) * cacheTotalBlocks);
239 }
240 break;
241 }
242 }
243 if (cache->FreeHead == (void*)-1) {
244 #if CACHE_DEBUG
245 printf("%s(%d): FreeHead = -1\n", __FUNCTION__, __LINE__);
246 #endif
247 return (ENOMEM);
248 }
249
250
251 /* If necessary, touch a byte in each page */
252 if (preTouch) {
253 size_t pageSize = getpagesize();
254 unsigned char *ptr = (unsigned char *)cache->FreeHead;
255 unsigned char *end = ptr + (cacheTotalBlocks * cacheBlockSize);
256 while (ptr < end) {
257 *ptr = 0;
258 ptr += pageSize;
259 }
260 }
261
262 /* Initialize the cache memory free list */
263 temp = cache->FreeHead;
264 for (i = 0; i < cacheTotalBlocks - 1; i++) {
265 *temp = ((char *)temp + cacheBlockSize);
266 temp = (void **)((char *)temp + cacheBlockSize);
267 }
268 *temp = NULL;
269 cache->FreeSize = cacheTotalBlocks;
270
271 buf = (Buf_t *)malloc(sizeof(Buf_t) * MAXBUFS);
272 if (buf == NULL) {
273 #if CACHE_DEBUG
274 printf("%s(%d): malloc(%zu) failed\n", __FUNCTION__, __LINE__, sizeof(Buf_t) * MAXBUFS);
275 #endif
276 return (ENOMEM);
277 }
278
279 memset (&buf[0], 0x00, sizeof (Buf_t) * MAXBUFS);
280 for (i = 1 ; i < MAXBUFS ; i++) {
281 (&buf[i-1])->Next = &buf[i];
282 }
283 cache->FreeBufs = &buf[0];
284
285 #if CACHE_DEBUG
286 printf( "%s - cacheTotalBlocks %d cacheBlockSize %d hashSize %d \n",
287 __FUNCTION__, cacheTotalBlocks, cacheBlockSize, hashSize );
288 printf( "%s - cache memory %d \n", __FUNCTION__, (cacheTotalBlocks * cacheBlockSize) );
289 #endif
290
291 return (LRUInit (&cache->LRU));
292 }
293
294
295 /*
296 * CacheDestroy
297 *
298 * Shutdown the cache.
299 */
300 int CacheDestroy (Cache_t *cache)
301 {
302 CacheFlush( cache );
303
304 #if CACHE_DEBUG
305 /* Print cache report */
306 printf ("Cache Report:\n");
307 printf ("\tRead Requests: %d\n", cache->ReqRead);
308 printf ("\tWrite Requests: %d\n", cache->ReqWrite);
309 printf ("\tDisk Reads: %d\n", cache->DiskRead);
310 printf ("\tDisk Writes: %d\n", cache->DiskWrite);
311 printf ("\tSpans: %d\n", cache->Span);
312 #endif
313 /* Shutdown the LRU */
314 LRUDestroy (&cache->LRU);
315
316 /* I'm lazy, I'll come back to it :P */
317 return (EOK);
318 }
319
320 /*
321 * CacheRead
322 *
323 * Reads a range of bytes from the cache, returning a pointer to a buffer
324 * containing the requested bytes.
325 *
326 * NOTE: The returned buffer may directly refer to a cache block, or an
327 * anonymous buffer. Do not make any assumptions about the nature of
328 * the returned buffer, except that it is contiguous.
329 */
330 int CacheRead (Cache_t *cache, uint64_t off, uint32_t len, Buf_t **bufp)
331 {
332 Tag_t * tag;
333 Buf_t * searchBuf;
334 Buf_t * buf;
335 uint32_t coff = (off % cache->BlockSize);
336 uint64_t cblk = (off - coff);
337 int error;
338
339 /* Check for conflicts with other bufs */
340 searchBuf = cache->ActiveBufs;
341 while (searchBuf != NULL) {
342 if ((searchBuf->Offset >= off) && (searchBuf->Offset < off + len)) {
343 #if CACHE_DEBUG
344 printf ("ERROR: CacheRead: Deadlock\n");
345 #endif
346 return (EDEADLK);
347 }
348
349 searchBuf = searchBuf->Next;
350 }
351
352 /* get a free buffer */
353 if ((buf = cache->FreeBufs) == NULL) {
354 #if CACHE_DEBUG
355 printf ("ERROR: CacheRead: no more bufs!\n");
356 #endif
357 return (ENOBUFS);
358 }
359 cache->FreeBufs = buf->Next;
360 *bufp = buf;
361
362 /* Clear the buf structure */
363 buf->Next = NULL;
364 buf->Prev = NULL;
365 buf->Flags = 0;
366 buf->Offset = off;
367 buf->Length = len;
368 buf->Buffer = NULL;
369
370 /* If this is unaligned or spans multiple cache blocks */
371 if ((cblk / cache->BlockSize) != ((off + len - 1) / cache->BlockSize)) {
372 buf->Flags |= BUF_SPAN;
373 }
374 /* Fetch the first cache block */
375 error = CacheLookup (cache, cblk, &tag);
376 if (error != EOK) {
377 #if CACHE_DEBUG
378 printf ("ERROR: CacheRead: CacheLookup error %d\n", error);
379 #endif
380 return (error);
381 }
382
383 /* If we live nicely inside a cache block */
384 if (!(buf->Flags & BUF_SPAN)) {
385 /* Offset the buffer into the cache block */
386 buf->Buffer = tag->Buffer + coff;
387
388 /* Bump the cache block's reference count */
389 tag->Refs++;
390
391 /* Kick the node into the right queue */
392 LRUHit (&cache->LRU, (LRUNode_t *)tag, 0);
393
394 /* Otherwise, things get ugly */
395 } else {
396 uint32_t boff; /* Offset into the buffer */
397 uint32_t blen; /* Space to fill in the buffer */
398 uint32_t temp;
399
400 /* Allocate a temp buffer */
401 buf->Buffer = (void *)malloc (len);
402 if (buf->Buffer == NULL) {
403 #if CACHE_DEBUG
404 printf ("ERROR: CacheRead: No Memory\n");
405 #endif
406 return (ENOMEM);
407 }
408
409 /* Blit the first chunk into the buffer */
410 boff = cache->BlockSize - coff;
411 blen = len - boff;
412 #if CACHE_DEBUG
413 printf("INFO: memcpy(%p, %p + %u, %u)\n", buf->Buffer, tag->Buffer, coff, boff);
414 #endif
415 memcpy (buf->Buffer, tag->Buffer + coff, boff);
416
417 /* Bump the cache block's reference count */
418 tag->Refs++;
419
420 /* Kick the node into the right queue */
421 LRUHit (&cache->LRU, (LRUNode_t *)tag, 0);
422
423 /* Next cache block */
424 cblk += cache->BlockSize;
425
426 /* Read data a cache block at a time */
427 while (blen) {
428 /* Fetch the next cache block */
429 error = CacheLookup (cache, cblk, &tag);
430 if (error != EOK) {
431 /* Free the allocated buffer */
432 free (buf->Buffer);
433 buf->Buffer = NULL;
434
435 /* Release all the held tags */
436 cblk -= cache->BlockSize;
437 while (!boff) {
438 if (CacheLookup (cache, cblk, &tag) != EOK) {
439 fprintf (stderr, "CacheRead: Unrecoverable error\n");
440 exit (-1);
441 }
442 tag->Refs--;
443
444 /* Kick the node into the right queue */
445 LRUHit (&cache->LRU, (LRUNode_t *)tag, 0);
446 }
447
448 return (error);
449 }
450
451 /* Blit the cache block into the buffer */
452 temp = ((blen > cache->BlockSize) ? cache->BlockSize : blen);
453 #if CACHE_DEBUG
454 printf ("INFO: memcpy(%p + %u, %p, %u)\n", buf->Buffer, boff, tag->Buffer, temp);
455 #endif
456 memcpy (buf->Buffer + boff,
457 tag->Buffer,
458 temp);
459
460 /* Update counters */
461 boff += temp;
462 blen -= temp;
463 tag->Refs++;
464
465 /* Advance to the next cache block */
466 cblk += cache->BlockSize;
467
468 /* Kick the node into the right queue */
469 LRUHit (&cache->LRU, (LRUNode_t *)tag, 0);
470 }
471
472 /* Count the spanned access */
473 cache->Span++;
474 }
475
476 /* Attach to head of active buffers list */
477 if (cache->ActiveBufs != NULL) {
478 buf->Next = cache->ActiveBufs;
479 buf->Prev = NULL;
480
481 cache->ActiveBufs->Prev = buf;
482
483 } else {
484 cache->ActiveBufs = buf;
485 }
486
487 /* Update counters */
488 cache->ReqRead++;
489 return (EOK);
490 }
491
492 /*
493 * XXX
494 * All of the uses of kLockWrite need to be audited for
495 * when the journal replay is writing.
496 */
497 /*
498 * CacheWrite
499 *
500 * Writes a buffer through the cache.
501 */
502 int CacheWrite ( Cache_t *cache, Buf_t *buf, int age, uint32_t writeOptions )
503 {
504 Tag_t * tag;
505 uint32_t coff = (buf->Offset % cache->BlockSize);
506 uint64_t cblk = (buf->Offset - coff);
507 int error;
508
509 /* Fetch the first cache block */
510 error = CacheLookup (cache, cblk, &tag);
511 if (error != EOK) return (error);
512
513 /* If the buffer was a direct reference */
514 if (!(buf->Flags & BUF_SPAN)) {
515 /* Commit the dirty block */
516 if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 )
517 {
518 /* Copy flags to tag */
519 tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite));
520 }
521 else
522 {
523 error = CacheRawWrite (cache,
524 tag->Offset,
525 cache->BlockSize,
526 tag->Buffer);
527 if (error != EOK) return (error);
528 }
529
530 /* Release the reference */
531 if ((writeOptions & kLockWrite) == 0)
532 tag->Refs--;
533
534 /* Kick the node into the right queue */
535 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
536
537 /* Otherwise, we do the ugly thing again */
538 } else {
539 uint32_t boff; /* Offset into the buffer */
540 uint32_t blen; /* Space to fill in the buffer */
541 uint32_t temp;
542
543 /* Blit the first chunk back into the cache */
544 boff = cache->BlockSize - coff;
545 blen = buf->Length - boff;
546 memcpy (tag->Buffer + coff, buf->Buffer, boff);
547
548 /* Commit the dirty block */
549 if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 )
550 {
551 /* flag this for lazy write */
552 tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite));
553 }
554 else
555 {
556 error = CacheRawWrite (cache,
557 tag->Offset,
558 cache->BlockSize,
559 tag->Buffer);
560 if (error != EOK) return (error);
561 }
562
563 /* Release the cache block reference */
564 if ((writeOptions & kLockWrite) == 0)
565 tag->Refs--;
566
567 /* Kick the node into the right queue */
568 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
569
570 /* Next cache block */
571 cblk += cache->BlockSize;
572
573 /* Write data a cache block at a time */
574 while (blen) {
575 /* Fetch the next cache block */
576 error = CacheLookup (cache, cblk, &tag);
577 /* We must go through with the write regardless */
578
579 /* Blit the next buffer chunk back into the cache */
580 temp = ((blen > cache->BlockSize) ? cache->BlockSize : blen);
581 memcpy (tag->Buffer,
582 buf->Buffer + boff,
583 temp);
584
585 /* Commit the dirty block */
586 if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 )
587 {
588 /* flag this for lazy write */
589 tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite));
590 }
591 else
592 {
593 error = CacheRawWrite (cache,
594 tag->Offset,
595 cache->BlockSize,
596 tag->Buffer);
597 if (error != EOK) return (error);
598 }
599
600 /* Update counters */
601 boff += temp;
602 blen -= temp;
603 if ((writeOptions & kLockWrite) == 0)
604 tag->Refs--;
605
606 /* Kick the node into the right queue */
607 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
608 /* And go to the next cache block */
609 cblk += cache->BlockSize;
610 }
611
612 /* Release the anonymous buffer */
613 free (buf->Buffer);
614 }
615
616 /* Detach the buffer */
617 if (buf->Next != NULL)
618 buf->Next->Prev = buf->Prev;
619 if (buf->Prev != NULL)
620 buf->Prev->Next = buf->Next;
621 if (cache->ActiveBufs == buf)
622 cache->ActiveBufs = buf->Next;
623
624 /* Clear the buffer and put it back on free list */
625 memset (buf, 0x00, sizeof (Buf_t));
626 buf->Next = cache->FreeBufs;
627 cache->FreeBufs = buf;
628
629 /* Update counters */
630 cache->ReqWrite++;
631
632 return (EOK);
633 }
634
635 /*
636 * CacheRelease
637 *
638 * Releases a clean buffer.
639 *
640 * NOTE: We don't verify whether it's dirty or not.
641 */
642 int CacheRelease (Cache_t *cache, Buf_t *buf, int age)
643 {
644 Tag_t * tag;
645 uint32_t coff = (buf->Offset % cache->BlockSize);
646 uint64_t cblk = (buf->Offset - coff);
647 int error;
648
649 /* Fetch the first cache block */
650 error = CacheLookup (cache, cblk, &tag);
651 if (error != EOK) {
652 #if CACHE_DEBUG
653 printf ("ERROR: CacheRelease: CacheLookup error\n");
654 #endif
655 return (error);
656 }
657
658 /* If the buffer was a direct reference */
659 if (!(buf->Flags & BUF_SPAN)) {
660 /* Release the reference */
661 if ((tag->Flags & kLockWrite) == 0) {
662 tag->Refs--;
663 }
664
665 /* Kick the node into the right queue */
666 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
667
668 /* Otherwise, we do the ugly thing again */
669 } else {
670 uint32_t blen; /* Space to fill in the buffer */
671
672 /* Blit the first chunk back into the cache */
673 blen = buf->Length - cache->BlockSize + coff;
674
675 /* Release the cache block reference */
676 if ((tag->Flags & kLockWrite) == 0) {
677 tag->Refs--;
678 }
679
680 /* Kick the node into the right queue */
681 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
682
683 /* Next cache block */
684 cblk += cache->BlockSize;
685
686 /* Release cache blocks one at a time */
687 while (blen) {
688 /* Fetch the next cache block */
689 error = CacheLookup (cache, cblk, &tag);
690 /* We must go through with the write regardless */
691
692 /* Update counters */
693 blen -= ((blen > cache->BlockSize) ? cache->BlockSize : blen);
694 if ((tag->Flags & kLockWrite) == 0)
695 tag->Refs--;
696
697 /* Kick the node into the right queue */
698 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
699 /* Advance to the next block */
700 cblk += cache->BlockSize;
701 }
702
703 /* Release the anonymous buffer */
704 free (buf->Buffer);
705 }
706
707 /* Detach the buffer */
708 if (buf->Next != NULL)
709 buf->Next->Prev = buf->Prev;
710 if (buf->Prev != NULL)
711 buf->Prev->Next = buf->Next;
712 if (cache->ActiveBufs == buf)
713 cache->ActiveBufs = buf->Next;
714
715 /* Clear the buffer and put it back on free list */
716 memset (buf, 0x00, sizeof (Buf_t));
717 buf->Next = cache->FreeBufs;
718 cache->FreeBufs = buf;
719
720 return (EOK);
721 }
722
723 /*
724 * CacheRemove
725 *
726 * Disposes of a particular buffer.
727 */
728 int CacheRemove (Cache_t *cache, Tag_t *tag)
729 {
730 int error;
731
732 /* Make sure it's not busy */
733 if (tag->Refs) return (EBUSY);
734
735 /* Detach the tag */
736 if (tag->Next != NULL)
737 tag->Next->Prev = tag->Prev;
738 if (tag->Prev != NULL)
739 tag->Prev->Next = tag->Next;
740 else
741 cache->Hash[tag->Offset % cache->HashSize] = tag->Next;
742
743 /* Make sure the head node doesn't have a back pointer */
744 if ((cache->Hash[tag->Offset % cache->HashSize] != NULL) &&
745 (cache->Hash[tag->Offset % cache->HashSize]->Prev != NULL)) {
746 #if CACHE_DEBUG
747 printf ("ERROR: CacheRemove: Corrupt hash chain\n");
748 #endif
749 }
750
751 /* Release it's buffer (if it has one) */
752 if (tag->Buffer != NULL)
753 {
754 error = CacheFreeBlock (cache, tag);
755 if ( EOK != error )
756 return( error );
757 }
758
759 /* Zero the tag (for easy debugging) */
760 memset (tag, 0x00, sizeof (Tag_t));
761
762 /* Free the tag */
763 free (tag);
764
765 return (EOK);
766 }
767
768 /*
769 * CacheEvict
770 *
771 * Only dispose of the buffer, leave the tag intact.
772 */
773 int CacheEvict (Cache_t *cache, Tag_t *tag)
774 {
775 int error;
776
777 /* Make sure it's not busy */
778 if (tag->Refs) return (EBUSY);
779
780 /* Release the buffer */
781 if (tag->Buffer != NULL)
782 {
783 error = CacheFreeBlock (cache, tag);
784 if ( EOK != error )
785 return( error );
786 }
787 tag->Buffer = NULL;
788
789 return (EOK);
790 }
791
792 /*
793 * CacheAllocBlock
794 *
795 * Allocate an unused cache block.
796 */
797 void *CacheAllocBlock (Cache_t *cache)
798 {
799 void * temp;
800
801 if (cache->FreeHead == NULL)
802 return (NULL);
803 if (cache->FreeSize == 0)
804 return (NULL);
805
806 temp = cache->FreeHead;
807 cache->FreeHead = *((void **)cache->FreeHead);
808 cache->FreeSize--;
809
810 return (temp);
811 }
812
813 /*
814 * CacheFreeBlock
815 *
816 * Release an active cache block.
817 */
818 static int
819 CacheFreeBlock( Cache_t *cache, Tag_t *tag )
820 {
821 int error;
822
823 if ( (tag->Flags & kLazyWrite) != 0 )
824 {
825 /* this cache block has been marked for lazy write - do it now */
826 error = CacheRawWrite( cache,
827 tag->Offset,
828 cache->BlockSize,
829 tag->Buffer );
830 if ( EOK != error )
831 {
832 #if CACHE_DEBUG
833 printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error );
834 #endif
835 return ( error );
836 }
837 tag->Flags &= ~kLazyWrite;
838 }
839
840 if ((tag->Flags & kLockWrite) == 0)
841 {
842 *((void **)tag->Buffer) = cache->FreeHead;
843 cache->FreeHead = (void **)tag->Buffer;
844 cache->FreeSize++;
845 }
846 return( EOK );
847 }
848
849
850 /*
851 * CacheFlush
852 *
853 * Write out any blocks that are marked for lazy write.
854 */
855 int
856 CacheFlush( Cache_t *cache )
857 {
858 int error;
859 int i;
860 Tag_t * myTagPtr;
861
862 for ( i = 0; i < cache->HashSize; i++ )
863 {
864 myTagPtr = cache->Hash[ i ];
865
866 while ( NULL != myTagPtr )
867 {
868 if ( (myTagPtr->Flags & kLazyWrite) != 0 )
869 {
870 /* this cache block has been marked for lazy write - do it now */
871 error = CacheRawWrite( cache,
872 myTagPtr->Offset,
873 cache->BlockSize,
874 myTagPtr->Buffer );
875 if ( EOK != error )
876 {
877 #if CACHE_DEBUG
878 printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error );
879 #endif
880 return( error );
881 }
882 myTagPtr->Flags &= ~kLazyWrite;
883 }
884 myTagPtr = myTagPtr->Next;
885 } /* while */
886 } /* for */
887
888 return( EOK );
889
890 } /* CacheFlush */
891
892
893 /*
894 * RangeIntersect
895 *
896 * Return true if the two given ranges intersect.
897 *
898 */
899 static int
900 RangeIntersect(uint64_t start1, uint64_t len1, uint64_t start2, uint64_t len2)
901 {
902 uint64_t end1 = start1 + len1 - 1;
903 uint64_t end2 = start2 + len2 - 1;
904
905 if (end1 < start2 || start1 > end2)
906 return 0;
907 else
908 return 1;
909 }
910
911
912 /*
913 * CacheFlushRange
914 *
915 * Flush, and optionally remove, all cache blocks that intersect
916 * a given range.
917 */
918 static int
919 CacheFlushRange( Cache_t *cache, uint64_t start, uint64_t len, int remove)
920 {
921 int error;
922 int i;
923 Tag_t *currentTag, *nextTag;
924
925 for ( i = 0; i < cache->HashSize; i++ )
926 {
927 currentTag = cache->Hash[ i ];
928
929 while ( NULL != currentTag )
930 {
931 /* Keep track of the next block, in case we remove the current block */
932 nextTag = currentTag->Next;
933
934 if ( currentTag->Flags & kLazyWrite &&
935 RangeIntersect(currentTag->Offset, cache->BlockSize, start, len))
936 {
937 error = CacheRawWrite( cache,
938 currentTag->Offset,
939 cache->BlockSize,
940 currentTag->Buffer );
941 if ( EOK != error )
942 {
943 #if CACHE_DEBUG
944 printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error );
945 #endif
946 return error;
947 }
948 currentTag->Flags &= ~kLazyWrite;
949
950 if ( remove && ((currentTag->Flags & kLockWrite) == 0))
951 CacheRemove( cache, currentTag );
952 }
953
954 currentTag = nextTag;
955 } /* while */
956 } /* for */
957
958 return EOK;
959 } /* CacheFlushRange */
960
961 /* Function: CacheCopyDiskBlocks
962 *
963 * Description: Perform direct disk block copy from from_offset to to_offset
964 * of given length.
965 *
966 * The function flushes the cache blocks intersecting with disk blocks
967 * belonging to from_offset. Invalidating the disk blocks belonging to
968 * to_offset from the cache would have been sufficient, but its block
969 * start and end might not lie on cache block size boundary. Therefore we
970 * flush the disk blocks belonging to to_offset on the disk .
971 *
972 * The function performs raw read and write on the disk of cache block size,
973 * with exception of last operation.
974 *
975 * Note that the data written to disk does not exist in cache after
976 * this function. This function however ensures that if the device
977 * offset being read/written on disk existed in cache, it is invalidated and
978 * written to disk before performing any read/write operation.
979 *
980 * Input:
981 * 1. cache - pointer to cache.
982 * 2. from_offset - disk offset to copy from.
983 * 3. to_offset - disk offset to copy to.
984 * 4. len - length in bytes to be copied. Note that this length should be
985 * a multiple of disk block size, else read/write will return error.
986 *
987 * Output:
988 * zero (EOK) on success.
989 * On failure, non-zero value.
990 * Known error values:
991 * ENOMEM - insufficient memory to allocate intermediate copy buffer.
992 * EINVAL - the length of data to read/write is not multiple of
993 * device block size, or
994 * the device offset is not multiple of device block size, or
995 * ENXIO - invalid disk offset
996 */
997 int CacheCopyDiskBlocks (Cache_t *cache, uint64_t from_offset, uint64_t to_offset, uint32_t len)
998 {
999 int i;
1000 int error;
1001 char *tmpBuffer = NULL;
1002 uint32_t ioReqCount;
1003 uint32_t numberOfBuffersToWrite;
1004
1005 /* Return error if length of data to be written on disk is
1006 * less than the length of the buffer to be written, or
1007 * disk offsets are not multiple of device block size
1008 */
1009 if ((len % cache->DevBlockSize) ||
1010 (from_offset % cache->DevBlockSize) ||
1011 (to_offset % cache->DevBlockSize)) {
1012 error = EINVAL;
1013 goto out;
1014 }
1015
1016 /* Flush contents of from_offset on the disk */
1017 error = CacheFlushRange(cache, from_offset, len, 1);
1018 if (error != EOK) goto out;
1019
1020 /* Flush contents of to_offset on the disk */
1021 error = CacheFlushRange(cache, to_offset, len, 1);
1022 if (error != EOK) goto out;
1023
1024 /* Allocate temporary buffer for reading/writing, currently
1025 * set to block size of cache.
1026 */
1027 tmpBuffer = malloc(cache->BlockSize);
1028 if (!tmpBuffer) {
1029 #if CACHE_DEBUG
1030 printf("%s(%d): malloc(%zd) failed\n", __FUNCTION__, __LINE__, (size_t)cache->BlockSize);
1031 #endif
1032 error = ENOMEM;
1033 goto out;
1034 }
1035
1036 ioReqCount = cache->BlockSize;
1037 numberOfBuffersToWrite = (len + ioReqCount - 1) / ioReqCount;
1038
1039 for (i=0; i<numberOfBuffersToWrite; i++) {
1040 if (i == (numberOfBuffersToWrite - 1)) {
1041 /* last buffer */
1042 ioReqCount = len - (i * cache->BlockSize);
1043 }
1044
1045 /* Read data */
1046 error = CacheRawRead (cache, from_offset, ioReqCount, tmpBuffer);
1047 if (error != EOK) goto out;
1048
1049 /* Write data */
1050 error = CacheRawWrite (cache, to_offset, ioReqCount, tmpBuffer);
1051 if (error != EOK) goto out;
1052
1053 #if 0
1054 printf ("%s: Copying %d bytes from %qd to %qd\n", __FUNCTION__, ioReqCount, from_offset, to_offset);
1055 #endif
1056
1057 /* Increment offsets with data read/written */
1058 from_offset += ioReqCount;
1059 to_offset += ioReqCount;
1060 }
1061
1062 out:
1063 if (tmpBuffer) {
1064 free (tmpBuffer);
1065 }
1066 return error;
1067 }
1068
1069 /* Function: CacheWriteBufferToDisk
1070 *
1071 * Description: Write data on disk starting at given offset for upto write_len.
1072 * The data from given buffer upto buf_len is written to the disk starting
1073 * at given offset. If the amount of data written on disk is greater than
1074 * the length of buffer, all the remaining data is written as zeros.
1075 *
1076 * If no buffer is provided or if length of buffer is zero, the function
1077 * writes zeros on disk from offset upto write_len bytes.
1078 *
1079 * The function requires the length of buffer is either equal to or less
1080 * than the data to be written on disk. It also requires that the length
1081 * of data to be written on disk is a multiple of device block size.
1082 *
1083 * Note that the data written to disk does not exist in cache after
1084 * this function. This function however ensures that if the device
1085 * offset being written on disk existed in cache, it is invalidated and
1086 * written to disk before performing any read/write operation.
1087 *
1088 * Input:
1089 * 1. cache - pointer to cache
1090 * 2. offset - offset on disk to write data of buffer
1091 * 3. buffer - pointer to data to be written on disk
1092 * 4. len - length of buffer to be written on disk.
1093 *
1094 * Output:
1095 * zero (EOK) on success.
1096 * On failure, non-zero value.
1097 * Known error values:
1098 * ENOMEM - insufficient memory to allocate intermediate copy buffer.
1099 * EINVAL - the length of data to read/write is not multiple of
1100 * device block size, or
1101 * the device offset is not multiple of device block size, or
1102 * the length of data to be written on disk is less than
1103 * the length of buffer.
1104 * ENXIO - invalid disk offset
1105 */
1106 int CacheWriteBufferToDisk (Cache_t *cache, uint64_t offset, uint32_t write_len, u_char *buffer, uint32_t buf_len)
1107 {
1108 int error;
1109 u_char *write_buffer = NULL;
1110 uint32_t io_count;
1111 uint32_t buf_offset;
1112 uint32_t bytes_remain;
1113 uint8_t zero_fill = false;
1114
1115 /* Check if buffer is provided */
1116 if (buffer == NULL) {
1117 buf_len = 0;
1118 }
1119
1120 /* Return error if length of data to be written on disk is,
1121 * less than the length of the buffer to be written, or
1122 * is not a multiple of device block size, or offset to write
1123 * is not multiple of device block size
1124 */
1125 if ((write_len % cache->DevBlockSize) ||
1126 (offset % cache->DevBlockSize) ||
1127 (write_len < buf_len)) {
1128 error = EINVAL;
1129 goto out;
1130 }
1131
1132 /* Flush cache contents of offset range to be written on the disk */
1133 error = CacheFlushRange(cache, offset, write_len, 1);
1134 if (error != EOK) {
1135 goto out;
1136 }
1137
1138 /* Calculate correct size of buffer to be written each time */
1139 io_count = (write_len < cache->BlockSize) ? write_len : cache->BlockSize;
1140
1141 /* Allocate temporary buffer to write data to disk */
1142 write_buffer = malloc (io_count);
1143 if (!write_buffer) {
1144 #if CACHE_DEBUG
1145 printf("%s(%d): malloc(%zd) failed\n", __FUNCTION__, __LINE__, (size_t)cache->BlockSize);
1146 #endif
1147 error = ENOMEM;
1148 goto out;
1149 }
1150
1151 /* Read offset in data buffer to be written to disk */
1152 buf_offset = 0;
1153
1154 while (write_len) {
1155 /* The last buffer might be less than io_count bytes */
1156 if (write_len < io_count) {
1157 io_count = write_len;
1158 }
1159
1160 /* Check whether data buffer was written completely to disk */
1161 if (buf_offset < buf_len) {
1162 /* Calculate the bytes from data buffer still to be written */
1163 bytes_remain = buf_len - buf_offset;
1164
1165 if (bytes_remain >= io_count) {
1166 /* Bytes remaining is greater than bytes written in one
1167 * IO request. Limit bytes read from data buffer in this
1168 * pass to the bytes written in one IO request
1169 */
1170 bytes_remain = io_count;
1171
1172 /* Copy data from data buffer to write buffer */
1173 memcpy (write_buffer, buffer, bytes_remain);
1174 } else {
1175 /* Bytes remaining is less than bytes written in one
1176 * IO request. Zero fill the remaining write buffer.
1177 */
1178
1179 /* Copy data from data buffer to write buffer */
1180 memcpy (write_buffer, buffer, bytes_remain);
1181
1182 /* Zero fill remain buffer, if any */
1183 memset (write_buffer + bytes_remain, 0, io_count - bytes_remain);
1184 }
1185
1186 buf_offset += bytes_remain;
1187 buffer += bytes_remain;
1188 } else {
1189 /* Do not zero fill the buffer if we have already done it */
1190 if (zero_fill == false) {
1191 /* Zero fill entire write buffer */
1192 memset (write_buffer, 0, io_count);
1193 zero_fill = true;
1194 }
1195 }
1196
1197 /* Write data */
1198 error = CacheRawWrite (cache, offset, io_count, write_buffer);
1199 if (error != EOK) goto out;
1200
1201 offset += io_count;
1202 write_len -= io_count;
1203 }
1204
1205 out:
1206 /* If we allocated a temporary buffer, deallocate it */
1207 if (write_buffer != NULL) {
1208 free (write_buffer);
1209 }
1210 return error;
1211 }
1212
1213 /*
1214 * CacheLookup
1215 *
1216 * Obtain a cache block. If one already exists, it is returned. Otherwise a
1217 * new one is created and inserted into the cache.
1218 */
1219 int CacheLookup (Cache_t *cache, uint64_t off, Tag_t **tag)
1220 {
1221 Tag_t * temp;
1222 uint32_t hash = off % cache->HashSize;
1223 int error;
1224
1225 *tag = NULL;
1226
1227 /* Search the hash table */
1228 error = 0;
1229 temp = cache->Hash[hash];
1230 while (temp != NULL) {
1231 if (temp->Offset == off) break;
1232 temp = temp->Next;
1233 }
1234
1235 /* If it's a hit */
1236 if (temp != NULL) {
1237 /* Perform MTF if necessary */
1238 if (cache->Hash[hash] != temp) {
1239 /* Disconnect the tag */
1240 if (temp->Next != NULL)
1241 temp->Next->Prev = temp->Prev;
1242 temp->Prev->Next = temp->Next;
1243 }
1244
1245 /* Otherwise, it's a miss */
1246 } else {
1247 /* Allocate a new tag */
1248 temp = (Tag_t *)calloc (sizeof (Tag_t), 1);/* We really only need to zero the
1249 LRU portion though */
1250 temp->Offset = off;
1251
1252 /* Kick the tag onto the LRU */
1253 //LRUHit (&cache->LRU, (LRUNode_t *)temp, 0);
1254 }
1255
1256 /* Insert at the head (if it's not already there) */
1257 if (cache->Hash[hash] != temp) {
1258 temp->Prev = NULL;
1259 temp->Next = cache->Hash[hash];
1260 if (temp->Next != NULL)
1261 temp->Next->Prev = temp;
1262 cache->Hash[hash] = temp;
1263 }
1264
1265 /* Make sure there's a buffer */
1266 if (temp->Buffer == NULL) {
1267 /* Find a free buffer */
1268 temp->Buffer = CacheAllocBlock (cache);
1269 if (temp->Buffer == NULL) {
1270 /* Try to evict a buffer */
1271 error = LRUEvict (&cache->LRU, (LRUNode_t *)temp);
1272 if (error != EOK) return (error);
1273
1274 /* Try again */
1275 temp->Buffer = CacheAllocBlock (cache);
1276 if (temp->Buffer == NULL) {
1277 #if CACHE_DEBUG
1278 printf("%s(%d): CacheAllocBlock failed (FreeHead = %p, FreeSize = %u)\n", __FUNCTION__, __LINE__, cache->FreeHead, cache->FreeSize);
1279 #endif
1280 return (ENOMEM);
1281 }
1282 }
1283
1284 /* Load the block from disk */
1285 error = CacheRawRead (cache, off, cache->BlockSize, temp->Buffer);
1286 if (error != EOK) return (error);
1287 }
1288
1289 #if 0
1290 if (temp && temp->Flags & kLockWrite) {
1291 fprintf(stderr, "CacheLookup(%p, %llu, %p): Found cache-locked block\n", cache, off, tag);
1292 }
1293 #endif
1294
1295 *tag = temp;
1296 return (EOK);
1297 }
1298
1299 /*
1300 * CacheRawRead
1301 *
1302 * Perform a direct read on the file.
1303 */
1304 int CacheRawRead (Cache_t *cache, uint64_t off, uint32_t len, void *buf)
1305 {
1306 uint64_t result;
1307 ssize_t nread;
1308
1309 /* Both offset and length must be multiples of the device block size */
1310 if (off % cache->DevBlockSize) return (EINVAL);
1311 if (len % cache->DevBlockSize) return (EINVAL);
1312
1313 /* Seek to the position */
1314 errno = 0;
1315 result = lseek (cache->FD_R, off, SEEK_SET);
1316 if (result == (off_t)-1 && errno != 0)
1317 return errno;
1318 if (result != off) return (ENXIO);
1319 /* Read into the buffer */
1320 #if CACHE_DEBUG
1321 printf("%s: offset %llu, len %u\n", __FUNCTION__, off, len);
1322 #endif
1323 nread = read (cache->FD_R, buf, len);
1324 if (nread == -1) return (errno);
1325 if (nread == 0) return (ENXIO);
1326
1327 /* Update counters */
1328 cache->DiskRead++;
1329
1330 return (EOK);
1331 }
1332
1333 /*
1334 * CacheRawWrite
1335 *
1336 * Perform a direct write on the file.
1337 */
1338 int CacheRawWrite (Cache_t *cache, uint64_t off, uint32_t len, void *buf)
1339 {
1340 uint64_t result;
1341 ssize_t nwritten;
1342
1343 /* Both offset and length must be multiples of the device block size */
1344 if (off % cache->DevBlockSize) return (EINVAL);
1345 if (len % cache->DevBlockSize) return (EINVAL);
1346
1347 /* Seek to the position */
1348 errno = 0;
1349 result = lseek (cache->FD_W, off, SEEK_SET);
1350 if (result == (off_t)-1 && errno != 0) return (errno);
1351 if (result != off) return (ENXIO);
1352
1353 /* Write into the buffer */
1354 nwritten = write (cache->FD_W, buf, len);
1355 if (nwritten == -1) return (errno);
1356 if (nwritten == 0) return (ENXIO);
1357
1358 /* Update counters */
1359 cache->DiskWrite++;
1360
1361 return (EOK);
1362 }
1363
1364
1365
1366 /*
1367 * LRUInit
1368 *
1369 * Initializes the LRU data structures.
1370 */
1371 static int LRUInit (LRU_t *lru)
1372 {
1373 /* Make the dummy nodes point to themselves */
1374 lru->Head.Next = &lru->Head;
1375 lru->Head.Prev = &lru->Head;
1376
1377 lru->Busy.Next = &lru->Busy;
1378 lru->Busy.Prev = &lru->Busy;
1379
1380 return (EOK);
1381 }
1382
1383 /*
1384 * LRUDestroy
1385 *
1386 * Shutdown the LRU.
1387 *
1388 * NOTE: This is typically a no-op, since the cache manager will be clearing
1389 * all cache tags.
1390 */
1391 static int LRUDestroy (LRU_t *lru)
1392 {
1393 /* Nothing to do */
1394 return (EOK);
1395 }
1396
1397 /*
1398 * LRUHit
1399 *
1400 * Registers data activity on the given node. If the node is already in the
1401 * LRU, it is moved to the front. Otherwise, it is inserted at the front.
1402 *
1403 * NOTE: If the node is not in the LRU, we assume that its pointers are NULL.
1404 */
1405 static int LRUHit (LRU_t *lru, LRUNode_t *node, int age)
1406 {
1407 /* Handle existing nodes */
1408 if ((node->Next != NULL) && (node->Prev != NULL)) {
1409 /* Detach the node */
1410 node->Next->Prev = node->Prev;
1411 node->Prev->Next = node->Next;
1412 }
1413
1414 /* If it's busy (we can't evict it) */
1415 if (((Tag_t *)node)->Refs) {
1416 /* Insert at the head of the Busy queue */
1417 node->Next = lru->Busy.Next;
1418 node->Prev = &lru->Busy;
1419
1420 } else if (age) {
1421 /* Insert at the head of the LRU */
1422 node->Next = &lru->Head;
1423 node->Prev = lru->Head.Prev;
1424
1425 } else {
1426 /* Insert at the head of the LRU */
1427 node->Next = lru->Head.Next;
1428 node->Prev = &lru->Head;
1429 }
1430
1431 node->Next->Prev = node;
1432 node->Prev->Next = node;
1433
1434 return (EOK);
1435 }
1436
1437 /*
1438 * LRUEvict
1439 *
1440 * Chooses a buffer to release.
1441 *
1442 * TODO: Under extreme conditions, it shoud be possible to release the buffer
1443 * of an actively referenced cache buffer, leaving the tag behind as a
1444 * placeholder. This would be required for implementing 2Q-LRU
1445 * replacement.
1446 *
1447 * NOTE: Make sure we never evict the node we're trying to find a buffer for!
1448 */
1449 static int LRUEvict (LRU_t *lru, LRUNode_t *node)
1450 {
1451 LRUNode_t * temp;
1452
1453 /* Find a victim */
1454 while (1) {
1455 /* Grab the tail */
1456 temp = lru->Head.Prev;
1457
1458 /* Stop if we're empty */
1459 if (temp == &lru->Head) {
1460 #if CACHE_DEBUG
1461 printf("%s(%d): empty?\n", __FUNCTION__, __LINE__);
1462 #endif
1463 return (ENOMEM);
1464 }
1465
1466 /* Detach the tail */
1467 temp->Next->Prev = temp->Prev;
1468 temp->Prev->Next = temp->Next;
1469
1470 /* If it's not busy, we have a victim */
1471 if (!((Tag_t *)temp)->Refs) break;
1472
1473 /* Insert at the head of the Busy queue */
1474 temp->Next = lru->Busy.Next;
1475 temp->Prev = &lru->Busy;
1476
1477 temp->Next->Prev = temp;
1478 temp->Prev->Next = temp;
1479
1480 /* Try again */
1481 }
1482
1483 /* Remove the tag */
1484 CacheRemove ((Cache_t *)lru, (Tag_t *)temp);
1485
1486 return (EOK);
1487 }
1488
1489 /*
1490 * Dump the cache contents.
1491 * If nobody else calls it, it gets optimized out. Annoying and yet
1492 * useful.
1493 */
1494 void
1495 dumpCache(Cache_t *cache)
1496 {
1497 int i;
1498 int numEntries = 0;
1499
1500 printf("Cache:\n");
1501 printf("\tDevBlockSize = %u\n", cache->DevBlockSize);
1502 printf("\tCache Block Size = %u\n", cache->BlockSize);
1503 printf("\tHash Size = %u\n", cache->HashSize);
1504 printf("\tHash Table:\n");
1505 for (i = 0; i < cache->HashSize; i++) {
1506 Tag_t *tag;
1507
1508 for (tag = cache->Hash[i]; tag; tag = tag->Next) {
1509 numEntries++;
1510 printf("\t\tOffset %llu, refs %u, Flags %#x (%skLazyWrite, %skLockWrite)\n",
1511 tag->Offset, tag->Refs, tag->Flags,
1512 (tag->Flags & kLazyWrite) ? "" : "no ",
1513 (tag->Flags & kLockWrite) ? "" : "no ");
1514 }
1515 }
1516 printf("\tNumber of entries: %u\n", numEntries);
1517 return;
1518 }
1519