]> git.saurik.com Git - apple/hfs.git/blob - fsck_hfs/cache.c
hfs-285.tar.gz
[apple/hfs.git] / fsck_hfs / cache.c
1 /*
2 * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/mman.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <sys/uio.h>
32 #include <unistd.h>
33 #include <string.h>
34
35 #include "fsck_hfs.h"
36 #include "cache.h"
37
38 #define true 1
39 #define false 0
40
41 #define CACHE_DEBUG 0
42
43 /*
44 * CacheAllocBlock
45 *
46 * Allocate an unused cache block.
47 */
48 void *CacheAllocBlock (Cache_t *cache);
49
50 /*
51 * CacheFreeBlock
52 *
53 * Release an active cache block.
54 */
55 static int
56 CacheFreeBlock( Cache_t *cache, Tag_t *tag );
57
58 /*
59 * CacheLookup
60 *
61 * Obtain a cache block. If one already exists, it is returned. Otherwise a
62 * new one is created and inserted into the cache.
63 */
64 int CacheLookup (Cache_t *cache, uint64_t off, Tag_t **tag);
65
66 /*
67 * CacheRawRead
68 *
69 * Perform a direct read on the file.
70 */
71 int CacheRawRead (Cache_t *cache, uint64_t off, uint32_t len, void *buf);
72
73 /*
74 * CacheRawWrite
75 *
76 * Perform a direct write on the file.
77 */
78 int CacheRawWrite (Cache_t *cache, uint64_t off, uint32_t len, void *buf);
79
80 /*
81 * CacheFlushRange
82 *
83 * Flush, and optionally remove, all cache blocks that intersect
84 * a given range.
85 */
86 static int
87 CacheFlushRange( Cache_t *cache, uint64_t start, uint64_t len, int remove);
88
89 /*
90 * LRUInit
91 *
92 * Initializes the LRU data structures.
93 */
94 static int LRUInit (LRU_t *lru);
95
96 /*
97 * LRUDestroy
98 *
99 * Shutdown the LRU.
100 *
101 * NOTE: This is typically a no-op, since the cache manager will be clearing
102 * all cache tags.
103 */
104 static int LRUDestroy (LRU_t *lru);
105
106 /*
107 * LRUHit
108 *
109 * Registers data activity on the given node. If the node is already in the
110 * LRU, it is moved to the front. Otherwise, it is inserted at the front.
111 *
112 * NOTE: If the node is not in the LRU, we assume that its pointers are NULL.
113 */
114 static int LRUHit (LRU_t *lru, LRUNode_t *node, int age);
115
116 /*
117 * LRUEvict
118 *
119 * Chooses a buffer to release.
120 *
121 * TODO: Under extreme conditions, it should be possible to release the buffer
122 * of an actively referenced cache buffer, leaving the tag behind as a
123 * placeholder. This would be required for implementing 2Q-LRU
124 * replacement.
125 */
126 static int LRUEvict (LRU_t *lru, LRUNode_t *node);
127
128 /*
129 * CalculateCacheSizes
130 *
131 * Determine the cache size values that should be used to initialize the cache.
132 * If the requested value does not validate according to the conditions described
133 * below, it is adjusted.
134 *
135 * If no input values are provided, use default values for cache size
136 * and cache block size.
137 *
138 * Cache size should be -
139 * a. greater than or equal to minimum cache size
140 * b. less than or equal to maximum cache size. The maximum cache size
141 * is limited by the maximum value that can be allocated using malloc
142 * or mmap (maximum value for size_t)
143 * c. multiple of cache block size
144 *
145 * Returns: void
146 * *calcBlockSize: the size of the blocks in the cache
147 * *calcTotalBlocks: the number of blocks in the cache
148 */
149 void CalculateCacheSizes(uint64_t cacheSize, uint32_t *calcBlockSize, uint32_t *calcTotalBlocks, char cache_debug)
150 {
151 uint32_t blockSize = DefaultCacheBlockSize;
152 const size_t max_size_t = ~0; /* Maximum value represented by size_t */
153
154 /* Simple case - no user cache size, use default values */
155 if (!cacheSize) {
156 *calcBlockSize = DefaultCacheBlockSize;
157 *calcTotalBlocks = DefaultCacheBlocks;
158 goto out;
159 }
160
161 /* User provided cache size - check with minimum and maximum values */
162 if (cacheSize < MinCacheSize) {
163 cacheSize = MinCacheSize;
164 }
165 if (cacheSize > max_size_t ||
166 cacheSize > MaxCacheSize) {
167 if (cache_debug) {
168 printf ("\tCache size should be greater than %uM and less than %luM\n", MinCacheSize/(1024*1024), max_size_t/(1024*1024));
169 }
170 cacheSize = MaxCacheSize;
171 }
172
173 /* Cache size should be multiple of cache block size */
174 if (cacheSize % blockSize) {
175 if (cache_debug) {
176 printf ("\tCache size should be multiple of cache block size (currently %uK)\n", blockSize/1024);
177 }
178 cacheSize = (cacheSize / blockSize) * blockSize;
179 }
180
181 *calcBlockSize = blockSize;
182 *calcTotalBlocks = cacheSize / blockSize;
183
184 out:
185 return;
186 }
187
188 /*
189 * CacheInit
190 *
191 * Initializes the cache for use. If preTouch is non-zero, the cache memory will
192 * be iterated through, with one byte per page touched. (This is to ensure that
193 * the memory is actually created, and is used to avoid deadlocking due to swapping
194 * during a live verify of the boot volume.)
195 */
196 int CacheInit (Cache_t *cache, int fdRead, int fdWrite, uint32_t devBlockSize,
197 uint32_t cacheBlockSize, uint32_t cacheTotalBlocks, uint32_t hashSize, int preTouch)
198 {
199 void ** temp;
200 uint32_t i;
201 Buf_t * buf;
202
203 memset (cache, 0x00, sizeof (Cache_t));
204
205 cache->FD_R = fdRead;
206 cache->FD_W = fdWrite;
207 cache->DevBlockSize = devBlockSize;
208 /* CacheFlush requires cleared cache->Hash */
209 cache->Hash = (Tag_t **) calloc( 1, (sizeof (Tag_t *) * hashSize) );
210 cache->HashSize = hashSize;
211 cache->BlockSize = cacheBlockSize;
212
213 /* Allocate the cache memory */
214 /* Break out of the loop on success, or when the proposed cache is < MinCacheSize */
215 while (1) {
216 cache->FreeHead = mmap (NULL,
217 cacheTotalBlocks * cacheBlockSize,
218 PROT_READ | PROT_WRITE,
219 MAP_ANON | MAP_PRIVATE,
220 -1,
221 0);
222 if (cache->FreeHead == (void *)-1) {
223 if ((cacheTotalBlocks * cacheBlockSize) <= MinCacheSize) {
224 if (debug)
225 printf("\tTried to allocate %dK, minimum is %dK\n",
226 (cacheTotalBlocks * cacheBlockSize) / 1024,
227 MinCacheSize / 1024);
228 break;
229 }
230 if (debug)
231 printf("\tFailed to allocate %uK for cache; trying %uK\n",
232 (cacheTotalBlocks * cacheBlockSize) / 1024,
233 (cacheTotalBlocks * cacheBlockSize / 2) / 1024);
234 CalculateCacheSizes((cacheTotalBlocks * cacheBlockSize) / 2, &cacheBlockSize, &cacheTotalBlocks, debug);
235 continue;
236 } else {
237 if (debug) {
238 printf ("\tUsing cacheBlockSize=%uK cacheTotalBlock=%u cacheSize=%uK.\n", cacheBlockSize/1024, cacheTotalBlocks, (cacheBlockSize/1024) * cacheTotalBlocks);
239 }
240 break;
241 }
242 }
243 if (cache->FreeHead == (void*)-1) {
244 #if CACHE_DEBUG
245 printf("%s(%d): FreeHead = -1\n", __FUNCTION__, __LINE__);
246 #endif
247 return (ENOMEM);
248 }
249
250
251 /* If necessary, touch a byte in each page */
252 if (preTouch) {
253 size_t pageSize = getpagesize();
254 unsigned char *ptr = (unsigned char *)cache->FreeHead;
255 unsigned char *end = ptr + (cacheTotalBlocks * cacheBlockSize);
256 while (ptr < end) {
257 *ptr = 0;
258 ptr += pageSize;
259 }
260 }
261
262 /* Initialize the cache memory free list */
263 temp = cache->FreeHead;
264 for (i = 0; i < cacheTotalBlocks - 1; i++) {
265 *temp = ((char *)temp + cacheBlockSize);
266 temp = (void **)((char *)temp + cacheBlockSize);
267 }
268 *temp = NULL;
269 cache->FreeSize = cacheTotalBlocks;
270
271 buf = (Buf_t *)malloc(sizeof(Buf_t) * MAXBUFS);
272 if (buf == NULL) {
273 #if CACHE_DEBUG
274 printf("%s(%d): malloc(%zu) failed\n", __FUNCTION__, __LINE__, sizeof(Buf_t) * MAXBUFS);
275 #endif
276 return (ENOMEM);
277 }
278
279 memset (&buf[0], 0x00, sizeof (Buf_t) * MAXBUFS);
280 for (i = 1 ; i < MAXBUFS ; i++) {
281 (&buf[i-1])->Next = &buf[i];
282 }
283 cache->FreeBufs = &buf[0];
284
285 #if CACHE_DEBUG
286 printf( "%s - cacheTotalBlocks %d cacheBlockSize %d hashSize %d \n",
287 __FUNCTION__, cacheTotalBlocks, cacheBlockSize, hashSize );
288 printf( "%s - cache memory %d \n", __FUNCTION__, (cacheTotalBlocks * cacheBlockSize) );
289 #endif
290
291 return (LRUInit (&cache->LRU));
292 }
293
294
295 /*
296 * CacheDestroy
297 *
298 * Shutdown the cache.
299 */
300 int CacheDestroy (Cache_t *cache)
301 {
302 CacheFlush( cache );
303
304 #if CACHE_DEBUG
305 /* Print cache report */
306 printf ("Cache Report:\n");
307 printf ("\tRead Requests: %d\n", cache->ReqRead);
308 printf ("\tWrite Requests: %d\n", cache->ReqWrite);
309 printf ("\tDisk Reads: %d\n", cache->DiskRead);
310 printf ("\tDisk Writes: %d\n", cache->DiskWrite);
311 printf ("\tSpans: %d\n", cache->Span);
312 #endif
313 /* Shutdown the LRU */
314 LRUDestroy (&cache->LRU);
315
316 /* I'm lazy, I'll come back to it :P */
317 return (EOK);
318 }
319
320 /*
321 * CacheRead
322 *
323 * Reads a range of bytes from the cache, returning a pointer to a buffer
324 * containing the requested bytes.
325 *
326 * NOTE: The returned buffer may directly refer to a cache block, or an
327 * anonymous buffer. Do not make any assumptions about the nature of
328 * the returned buffer, except that it is contiguous.
329 */
330 int CacheRead (Cache_t *cache, uint64_t off, uint32_t len, Buf_t **bufp)
331 {
332 Tag_t * tag;
333 Buf_t * searchBuf;
334 Buf_t * buf;
335 uint32_t coff = (off % cache->BlockSize);
336 uint64_t cblk = (off - coff);
337 int error;
338
339 /* Check for conflicts with other bufs */
340 searchBuf = cache->ActiveBufs;
341 while (searchBuf != NULL) {
342 if ((searchBuf->Offset >= off) && (searchBuf->Offset < off + len)) {
343 #if CACHE_DEBUG
344 printf ("ERROR: CacheRead: Deadlock (searchBuff = <%llu, %u>, off = %llu, off+len = %llu)\n", searchBuf->Offset, searchBuf->Length, off, off+len);
345 #endif
346 return (EDEADLK);
347 }
348
349 searchBuf = searchBuf->Next;
350 }
351
352 /* get a free buffer */
353 if ((buf = cache->FreeBufs) == NULL) {
354 #if CACHE_DEBUG
355 printf ("ERROR: CacheRead: no more bufs!\n");
356 #endif
357 return (ENOBUFS);
358 }
359 cache->FreeBufs = buf->Next;
360 *bufp = buf;
361
362 /* Clear the buf structure */
363 buf->Next = NULL;
364 buf->Prev = NULL;
365 buf->Flags = 0;
366 buf->Offset = off;
367 buf->Length = len;
368 buf->Buffer = NULL;
369
370 /* If this is unaligned or spans multiple cache blocks */
371 if ((cblk / cache->BlockSize) != ((off + len - 1) / cache->BlockSize)) {
372 buf->Flags |= BUF_SPAN;
373 }
374 /* Fetch the first cache block */
375 #if CACHE_DEBUG
376 printf("%s(%d): Looking up cache block %llu for offset %llu, cache blockSize %u\n", __FUNCTION__, __LINE__, cblk, off, cache->BlockSize);
377 #endif
378 error = CacheLookup (cache, cblk, &tag);
379 if (error != EOK) {
380 #if CACHE_DEBUG
381 printf ("ERROR: CacheRead: CacheLookup error %d\n", error);
382 #endif
383 return (error);
384 }
385
386 /* If we live nicely inside a cache block */
387 if (!(buf->Flags & BUF_SPAN)) {
388 /* Offset the buffer into the cache block */
389 buf->Buffer = tag->Buffer + coff;
390
391 /* Bump the cache block's reference count */
392 tag->Refs++;
393
394 /* Kick the node into the right queue */
395 LRUHit (&cache->LRU, (LRUNode_t *)tag, 0);
396
397 /* Otherwise, things get ugly */
398 } else {
399 uint32_t boff; /* Offset into the buffer */
400 uint32_t blen; /* Space to fill in the buffer */
401 uint32_t temp;
402
403 /* Allocate a temp buffer */
404 buf->Buffer = (void *)malloc (len);
405 if (buf->Buffer == NULL) {
406 #if CACHE_DEBUG
407 printf ("ERROR: CacheRead: No Memory\n");
408 #endif
409 return (ENOMEM);
410 }
411
412 /* Blit the first chunk into the buffer */
413 boff = cache->BlockSize - coff;
414 blen = len - boff;
415 #if CACHE_DEBUG
416 printf("INFO: memcpy(%p, %p + %u, %u)\n", buf->Buffer, tag->Buffer, coff, boff);
417 #endif
418 memcpy (buf->Buffer, tag->Buffer + coff, boff);
419
420 /* Bump the cache block's reference count */
421 tag->Refs++;
422
423 /* Kick the node into the right queue */
424 LRUHit (&cache->LRU, (LRUNode_t *)tag, 0);
425
426 /* Next cache block */
427 cblk += cache->BlockSize;
428
429 /* Read data a cache block at a time */
430 while (blen) {
431 /* Fetch the next cache block */
432 error = CacheLookup (cache, cblk, &tag);
433 if (error != EOK) {
434 /* Free the allocated buffer */
435 free (buf->Buffer);
436 buf->Buffer = NULL;
437
438 /* Release all the held tags */
439 cblk -= cache->BlockSize;
440 while (!boff) {
441 if (CacheLookup (cache, cblk, &tag) != EOK) {
442 fprintf (stderr, "CacheRead: Unrecoverable error\n");
443 exit (-1);
444 }
445 tag->Refs--;
446
447 /* Kick the node into the right queue */
448 LRUHit (&cache->LRU, (LRUNode_t *)tag, 0);
449 }
450
451 return (error);
452 }
453
454 /* Blit the cache block into the buffer */
455 temp = ((blen > cache->BlockSize) ? cache->BlockSize : blen);
456 #if CACHE_DEBUG
457 printf ("INFO: memcpy(%p + %u, %p, %u)\n", buf->Buffer, boff, tag->Buffer, temp);
458 #endif
459 memcpy (buf->Buffer + boff,
460 tag->Buffer,
461 temp);
462
463 /* Update counters */
464 boff += temp;
465 blen -= temp;
466 tag->Refs++;
467
468 /* Advance to the next cache block */
469 cblk += cache->BlockSize;
470
471 /* Kick the node into the right queue */
472 LRUHit (&cache->LRU, (LRUNode_t *)tag, 0);
473 }
474
475 /* Count the spanned access */
476 cache->Span++;
477 }
478
479 /* Attach to head of active buffers list */
480 if (cache->ActiveBufs != NULL) {
481 buf->Next = cache->ActiveBufs;
482 buf->Prev = NULL;
483
484 cache->ActiveBufs->Prev = buf;
485
486 } else {
487 cache->ActiveBufs = buf;
488 }
489
490 /* Update counters */
491 cache->ReqRead++;
492 return (EOK);
493 }
494
495 /*
496 * XXX
497 * All of the uses of kLockWrite need to be audited for
498 * when the journal replay is writing.
499 */
500 /*
501 * CacheWrite
502 *
503 * Writes a buffer through the cache.
504 */
505 int CacheWrite ( Cache_t *cache, Buf_t *buf, int age, uint32_t writeOptions )
506 {
507 Tag_t * tag;
508 uint32_t coff = (buf->Offset % cache->BlockSize);
509 uint64_t cblk = (buf->Offset - coff);
510 int error;
511
512 /* Fetch the first cache block */
513 error = CacheLookup (cache, cblk, &tag);
514 if (error != EOK) return (error);
515
516 /* If the buffer was a direct reference */
517 if (!(buf->Flags & BUF_SPAN)) {
518 /* Commit the dirty block */
519 if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 )
520 {
521 /* Copy flags to tag */
522 tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite));
523 }
524 else
525 {
526 error = CacheRawWrite (cache,
527 tag->Offset,
528 cache->BlockSize,
529 tag->Buffer);
530 if (error != EOK) return (error);
531 }
532
533 /* Release the reference */
534 if ((writeOptions & kLockWrite) == 0)
535 tag->Refs--;
536
537 /* Kick the node into the right queue */
538 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
539
540 /* Otherwise, we do the ugly thing again */
541 } else {
542 uint32_t boff; /* Offset into the buffer */
543 uint32_t blen; /* Space to fill in the buffer */
544 uint32_t temp;
545
546 /* Blit the first chunk back into the cache */
547 boff = cache->BlockSize - coff;
548 blen = buf->Length - boff;
549 memcpy (tag->Buffer + coff, buf->Buffer, boff);
550
551 /* Commit the dirty block */
552 if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 )
553 {
554 /* flag this for lazy write */
555 tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite));
556 }
557 else
558 {
559 error = CacheRawWrite (cache,
560 tag->Offset,
561 cache->BlockSize,
562 tag->Buffer);
563 if (error != EOK) return (error);
564 }
565
566 /* Release the cache block reference */
567 if ((writeOptions & kLockWrite) == 0)
568 tag->Refs--;
569
570 /* Kick the node into the right queue */
571 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
572
573 /* Next cache block */
574 cblk += cache->BlockSize;
575
576 /* Write data a cache block at a time */
577 while (blen) {
578 /* Fetch the next cache block */
579 error = CacheLookup (cache, cblk, &tag);
580 /* We must go through with the write regardless */
581
582 /* Blit the next buffer chunk back into the cache */
583 temp = ((blen > cache->BlockSize) ? cache->BlockSize : blen);
584 memcpy (tag->Buffer,
585 buf->Buffer + boff,
586 temp);
587
588 /* Commit the dirty block */
589 if ( (writeOptions & (kLazyWrite | kLockWrite)) != 0 )
590 {
591 /* flag this for lazy write */
592 tag->Flags |= (writeOptions & (kLazyWrite | kLockWrite));
593 }
594 else
595 {
596 error = CacheRawWrite (cache,
597 tag->Offset,
598 cache->BlockSize,
599 tag->Buffer);
600 if (error != EOK) return (error);
601 }
602
603 /* Update counters */
604 boff += temp;
605 blen -= temp;
606 if ((writeOptions & kLockWrite) == 0)
607 tag->Refs--;
608
609 /* Kick the node into the right queue */
610 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
611 /* And go to the next cache block */
612 cblk += cache->BlockSize;
613 }
614
615 /* Release the anonymous buffer */
616 free (buf->Buffer);
617 }
618
619 /* Detach the buffer */
620 if (buf->Next != NULL)
621 buf->Next->Prev = buf->Prev;
622 if (buf->Prev != NULL)
623 buf->Prev->Next = buf->Next;
624 if (cache->ActiveBufs == buf)
625 cache->ActiveBufs = buf->Next;
626
627 /* Clear the buffer and put it back on free list */
628 memset (buf, 0x00, sizeof (Buf_t));
629 buf->Next = cache->FreeBufs;
630 cache->FreeBufs = buf;
631
632 /* Update counters */
633 cache->ReqWrite++;
634
635 return (EOK);
636 }
637
638 /*
639 * CacheRelease
640 *
641 * Releases a clean buffer.
642 *
643 * NOTE: We don't verify whether it's dirty or not.
644 */
645 int CacheRelease (Cache_t *cache, Buf_t *buf, int age)
646 {
647 Tag_t * tag;
648 uint32_t coff = (buf->Offset % cache->BlockSize);
649 uint64_t cblk = (buf->Offset - coff);
650 int error;
651
652 /* Fetch the first cache block */
653 error = CacheLookup (cache, cblk, &tag);
654 if (error != EOK) {
655 #if CACHE_DEBUG
656 printf ("ERROR: CacheRelease: CacheLookup error\n");
657 #endif
658 return (error);
659 }
660
661 /* If the buffer was a direct reference */
662 if (!(buf->Flags & BUF_SPAN)) {
663 /* Release the reference */
664 if ((tag->Flags & kLockWrite) == 0) {
665 tag->Refs--;
666 }
667
668 /* Kick the node into the right queue */
669 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
670
671 /* Otherwise, we do the ugly thing again */
672 } else {
673 uint32_t blen; /* Space to fill in the buffer */
674
675 /* Blit the first chunk back into the cache */
676 blen = buf->Length - cache->BlockSize + coff;
677
678 /* Release the cache block reference */
679 if ((tag->Flags & kLockWrite) == 0) {
680 tag->Refs--;
681 }
682
683 /* Kick the node into the right queue */
684 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
685
686 /* Next cache block */
687 cblk += cache->BlockSize;
688
689 /* Release cache blocks one at a time */
690 while (blen) {
691 /* Fetch the next cache block */
692 error = CacheLookup (cache, cblk, &tag);
693 /* We must go through with the write regardless */
694
695 /* Update counters */
696 blen -= ((blen > cache->BlockSize) ? cache->BlockSize : blen);
697 if ((tag->Flags & kLockWrite) == 0)
698 tag->Refs--;
699
700 /* Kick the node into the right queue */
701 LRUHit (&cache->LRU, (LRUNode_t *)tag, age);
702 /* Advance to the next block */
703 cblk += cache->BlockSize;
704 }
705
706 /* Release the anonymous buffer */
707 free (buf->Buffer);
708 }
709
710 /* Detach the buffer */
711 if (buf->Next != NULL)
712 buf->Next->Prev = buf->Prev;
713 if (buf->Prev != NULL)
714 buf->Prev->Next = buf->Next;
715 if (cache->ActiveBufs == buf)
716 cache->ActiveBufs = buf->Next;
717
718 /* Clear the buffer and put it back on free list */
719 memset (buf, 0x00, sizeof (Buf_t));
720 buf->Next = cache->FreeBufs;
721 cache->FreeBufs = buf;
722
723 return (EOK);
724 }
725
726 /*
727 * CacheRemove
728 *
729 * Disposes of a particular buffer.
730 */
731 int CacheRemove (Cache_t *cache, Tag_t *tag)
732 {
733 int error;
734
735 /* Make sure it's not busy */
736 if (tag->Refs) return (EBUSY);
737
738 /* Detach the tag */
739 if (tag->Next != NULL)
740 tag->Next->Prev = tag->Prev;
741 if (tag->Prev != NULL)
742 tag->Prev->Next = tag->Next;
743 else
744 cache->Hash[tag->Offset % cache->HashSize] = tag->Next;
745
746 /* Make sure the head node doesn't have a back pointer */
747 if ((cache->Hash[tag->Offset % cache->HashSize] != NULL) &&
748 (cache->Hash[tag->Offset % cache->HashSize]->Prev != NULL)) {
749 #if CACHE_DEBUG
750 printf ("ERROR: CacheRemove: Corrupt hash chain\n");
751 #endif
752 }
753
754 /* Release it's buffer (if it has one) */
755 if (tag->Buffer != NULL)
756 {
757 error = CacheFreeBlock (cache, tag);
758 if ( EOK != error )
759 return( error );
760 }
761
762 /* Zero the tag (for easy debugging) */
763 memset (tag, 0x00, sizeof (Tag_t));
764
765 /* Free the tag */
766 free (tag);
767
768 return (EOK);
769 }
770
771 /*
772 * CacheEvict
773 *
774 * Only dispose of the buffer, leave the tag intact.
775 */
776 int CacheEvict (Cache_t *cache, Tag_t *tag)
777 {
778 int error;
779
780 /* Make sure it's not busy */
781 if (tag->Refs) return (EBUSY);
782
783 /* Release the buffer */
784 if (tag->Buffer != NULL)
785 {
786 error = CacheFreeBlock (cache, tag);
787 if ( EOK != error )
788 return( error );
789 }
790 tag->Buffer = NULL;
791
792 return (EOK);
793 }
794
795 /*
796 * CacheAllocBlock
797 *
798 * Allocate an unused cache block.
799 */
800 void *CacheAllocBlock (Cache_t *cache)
801 {
802 void * temp;
803
804 if (cache->FreeHead == NULL)
805 return (NULL);
806 if (cache->FreeSize == 0)
807 return (NULL);
808
809 temp = cache->FreeHead;
810 cache->FreeHead = *((void **)cache->FreeHead);
811 cache->FreeSize--;
812
813 return (temp);
814 }
815
816 /*
817 * CacheFreeBlock
818 *
819 * Release an active cache block.
820 */
821 static int
822 CacheFreeBlock( Cache_t *cache, Tag_t *tag )
823 {
824 int error;
825
826 if ( (tag->Flags & kLazyWrite) != 0 )
827 {
828 /* this cache block has been marked for lazy write - do it now */
829 error = CacheRawWrite( cache,
830 tag->Offset,
831 cache->BlockSize,
832 tag->Buffer );
833 if ( EOK != error )
834 {
835 #if CACHE_DEBUG
836 printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error );
837 #endif
838 return ( error );
839 }
840 tag->Flags &= ~kLazyWrite;
841 }
842
843 if ((tag->Flags & kLockWrite) == 0)
844 {
845 *((void **)tag->Buffer) = cache->FreeHead;
846 cache->FreeHead = (void **)tag->Buffer;
847 cache->FreeSize++;
848 }
849 return( EOK );
850 }
851
852
853 /*
854 * CacheFlush
855 *
856 * Write out any blocks that are marked for lazy write.
857 */
858 int
859 CacheFlush( Cache_t *cache )
860 {
861 int error;
862 int i;
863 Tag_t * myTagPtr;
864
865 for ( i = 0; i < cache->HashSize; i++ )
866 {
867 myTagPtr = cache->Hash[ i ];
868
869 while ( NULL != myTagPtr )
870 {
871 if ( (myTagPtr->Flags & kLazyWrite) != 0 )
872 {
873 /* this cache block has been marked for lazy write - do it now */
874 error = CacheRawWrite( cache,
875 myTagPtr->Offset,
876 cache->BlockSize,
877 myTagPtr->Buffer );
878 if ( EOK != error )
879 {
880 #if CACHE_DEBUG
881 printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error );
882 #endif
883 return( error );
884 }
885 myTagPtr->Flags &= ~kLazyWrite;
886 }
887 myTagPtr = myTagPtr->Next;
888 } /* while */
889 } /* for */
890
891 return( EOK );
892
893 } /* CacheFlush */
894
895
896 /*
897 * RangeIntersect
898 *
899 * Return true if the two given ranges intersect.
900 *
901 */
902 static int
903 RangeIntersect(uint64_t start1, uint64_t len1, uint64_t start2, uint64_t len2)
904 {
905 uint64_t end1 = start1 + len1 - 1;
906 uint64_t end2 = start2 + len2 - 1;
907
908 if (end1 < start2 || start1 > end2)
909 return 0;
910 else
911 return 1;
912 }
913
914
915 /*
916 * CacheFlushRange
917 *
918 * Flush, and optionally remove, all cache blocks that intersect
919 * a given range.
920 */
921 static int
922 CacheFlushRange( Cache_t *cache, uint64_t start, uint64_t len, int remove)
923 {
924 int error;
925 int i;
926 Tag_t *currentTag, *nextTag;
927
928 for ( i = 0; i < cache->HashSize; i++ )
929 {
930 currentTag = cache->Hash[ i ];
931
932 while ( NULL != currentTag )
933 {
934 /* Keep track of the next block, in case we remove the current block */
935 nextTag = currentTag->Next;
936
937 if ( currentTag->Flags & kLazyWrite &&
938 RangeIntersect(currentTag->Offset, cache->BlockSize, start, len))
939 {
940 error = CacheRawWrite( cache,
941 currentTag->Offset,
942 cache->BlockSize,
943 currentTag->Buffer );
944 if ( EOK != error )
945 {
946 #if CACHE_DEBUG
947 printf( "%s - CacheRawWrite failed with error %d \n", __FUNCTION__, error );
948 #endif
949 return error;
950 }
951 currentTag->Flags &= ~kLazyWrite;
952
953 if ( remove && ((currentTag->Flags & kLockWrite) == 0))
954 CacheRemove( cache, currentTag );
955 }
956
957 currentTag = nextTag;
958 } /* while */
959 } /* for */
960
961 return EOK;
962 } /* CacheFlushRange */
963
964 /* Function: CacheCopyDiskBlocks
965 *
966 * Description: Perform direct disk block copy from from_offset to to_offset
967 * of given length.
968 *
969 * The function flushes the cache blocks intersecting with disk blocks
970 * belonging to from_offset. Invalidating the disk blocks belonging to
971 * to_offset from the cache would have been sufficient, but its block
972 * start and end might not lie on cache block size boundary. Therefore we
973 * flush the disk blocks belonging to to_offset on the disk .
974 *
975 * The function performs raw read and write on the disk of cache block size,
976 * with exception of last operation.
977 *
978 * Note that the data written to disk does not exist in cache after
979 * this function. This function however ensures that if the device
980 * offset being read/written on disk existed in cache, it is invalidated and
981 * written to disk before performing any read/write operation.
982 *
983 * Input:
984 * 1. cache - pointer to cache.
985 * 2. from_offset - disk offset to copy from.
986 * 3. to_offset - disk offset to copy to.
987 * 4. len - length in bytes to be copied. Note that this length should be
988 * a multiple of disk block size, else read/write will return error.
989 *
990 * Output:
991 * zero (EOK) on success.
992 * On failure, non-zero value.
993 * Known error values:
994 * ENOMEM - insufficient memory to allocate intermediate copy buffer.
995 * EINVAL - the length of data to read/write is not multiple of
996 * device block size, or
997 * the device offset is not multiple of device block size, or
998 * ENXIO - invalid disk offset
999 */
1000 int CacheCopyDiskBlocks (Cache_t *cache, uint64_t from_offset, uint64_t to_offset, uint32_t len)
1001 {
1002 int i;
1003 int error;
1004 char *tmpBuffer = NULL;
1005 uint32_t ioReqCount;
1006 uint32_t numberOfBuffersToWrite;
1007
1008 /* Return error if length of data to be written on disk is
1009 * less than the length of the buffer to be written, or
1010 * disk offsets are not multiple of device block size
1011 */
1012 if ((len % cache->DevBlockSize) ||
1013 (from_offset % cache->DevBlockSize) ||
1014 (to_offset % cache->DevBlockSize)) {
1015 error = EINVAL;
1016 goto out;
1017 }
1018
1019 /* Flush contents of from_offset on the disk */
1020 error = CacheFlushRange(cache, from_offset, len, 1);
1021 if (error != EOK) goto out;
1022
1023 /* Flush contents of to_offset on the disk */
1024 error = CacheFlushRange(cache, to_offset, len, 1);
1025 if (error != EOK) goto out;
1026
1027 /* Allocate temporary buffer for reading/writing, currently
1028 * set to block size of cache.
1029 */
1030 tmpBuffer = malloc(cache->BlockSize);
1031 if (!tmpBuffer) {
1032 #if CACHE_DEBUG
1033 printf("%s(%d): malloc(%zd) failed\n", __FUNCTION__, __LINE__, (size_t)cache->BlockSize);
1034 #endif
1035 error = ENOMEM;
1036 goto out;
1037 }
1038
1039 ioReqCount = cache->BlockSize;
1040 numberOfBuffersToWrite = (len + ioReqCount - 1) / ioReqCount;
1041
1042 for (i=0; i<numberOfBuffersToWrite; i++) {
1043 if (i == (numberOfBuffersToWrite - 1)) {
1044 /* last buffer */
1045 ioReqCount = len - (i * cache->BlockSize);
1046 }
1047
1048 /* Read data */
1049 error = CacheRawRead (cache, from_offset, ioReqCount, tmpBuffer);
1050 if (error != EOK) goto out;
1051
1052 /* Write data */
1053 error = CacheRawWrite (cache, to_offset, ioReqCount, tmpBuffer);
1054 if (error != EOK) goto out;
1055
1056 #if 0
1057 printf ("%s: Copying %d bytes from %qd to %qd\n", __FUNCTION__, ioReqCount, from_offset, to_offset);
1058 #endif
1059
1060 /* Increment offsets with data read/written */
1061 from_offset += ioReqCount;
1062 to_offset += ioReqCount;
1063 }
1064
1065 out:
1066 if (tmpBuffer) {
1067 free (tmpBuffer);
1068 }
1069 return error;
1070 }
1071
1072 /* Function: CacheWriteBufferToDisk
1073 *
1074 * Description: Write data on disk starting at given offset for upto write_len.
1075 * The data from given buffer upto buf_len is written to the disk starting
1076 * at given offset. If the amount of data written on disk is greater than
1077 * the length of buffer, all the remaining data is written as zeros.
1078 *
1079 * If no buffer is provided or if length of buffer is zero, the function
1080 * writes zeros on disk from offset upto write_len bytes.
1081 *
1082 * The function requires the length of buffer is either equal to or less
1083 * than the data to be written on disk. It also requires that the length
1084 * of data to be written on disk is a multiple of device block size.
1085 *
1086 * Note that the data written to disk does not exist in cache after
1087 * this function. This function however ensures that if the device
1088 * offset being written on disk existed in cache, it is invalidated and
1089 * written to disk before performing any read/write operation.
1090 *
1091 * Input:
1092 * 1. cache - pointer to cache
1093 * 2. offset - offset on disk to write data of buffer
1094 * 3. buffer - pointer to data to be written on disk
1095 * 4. len - length of buffer to be written on disk.
1096 *
1097 * Output:
1098 * zero (EOK) on success.
1099 * On failure, non-zero value.
1100 * Known error values:
1101 * ENOMEM - insufficient memory to allocate intermediate copy buffer.
1102 * EINVAL - the length of data to read/write is not multiple of
1103 * device block size, or
1104 * the device offset is not multiple of device block size, or
1105 * the length of data to be written on disk is less than
1106 * the length of buffer.
1107 * ENXIO - invalid disk offset
1108 */
1109 int CacheWriteBufferToDisk (Cache_t *cache, uint64_t offset, uint32_t write_len, u_char *buffer, uint32_t buf_len)
1110 {
1111 int error;
1112 u_char *write_buffer = NULL;
1113 uint32_t io_count;
1114 uint32_t buf_offset;
1115 uint32_t bytes_remain;
1116 uint8_t zero_fill = false;
1117
1118 /* Check if buffer is provided */
1119 if (buffer == NULL) {
1120 buf_len = 0;
1121 }
1122
1123 /* Return error if length of data to be written on disk is,
1124 * less than the length of the buffer to be written, or
1125 * is not a multiple of device block size, or offset to write
1126 * is not multiple of device block size
1127 */
1128 if ((write_len % cache->DevBlockSize) ||
1129 (offset % cache->DevBlockSize) ||
1130 (write_len < buf_len)) {
1131 error = EINVAL;
1132 goto out;
1133 }
1134
1135 /* Flush cache contents of offset range to be written on the disk */
1136 error = CacheFlushRange(cache, offset, write_len, 1);
1137 if (error != EOK) {
1138 goto out;
1139 }
1140
1141 /* Calculate correct size of buffer to be written each time */
1142 io_count = (write_len < cache->BlockSize) ? write_len : cache->BlockSize;
1143
1144 /* Allocate temporary buffer to write data to disk */
1145 write_buffer = malloc (io_count);
1146 if (!write_buffer) {
1147 #if CACHE_DEBUG
1148 printf("%s(%d): malloc(%zd) failed\n", __FUNCTION__, __LINE__, (size_t)cache->BlockSize);
1149 #endif
1150 error = ENOMEM;
1151 goto out;
1152 }
1153
1154 /* Read offset in data buffer to be written to disk */
1155 buf_offset = 0;
1156
1157 while (write_len) {
1158 /* The last buffer might be less than io_count bytes */
1159 if (write_len < io_count) {
1160 io_count = write_len;
1161 }
1162
1163 /* Check whether data buffer was written completely to disk */
1164 if (buf_offset < buf_len) {
1165 /* Calculate the bytes from data buffer still to be written */
1166 bytes_remain = buf_len - buf_offset;
1167
1168 if (bytes_remain >= io_count) {
1169 /* Bytes remaining is greater than bytes written in one
1170 * IO request. Limit bytes read from data buffer in this
1171 * pass to the bytes written in one IO request
1172 */
1173 bytes_remain = io_count;
1174
1175 /* Copy data from data buffer to write buffer */
1176 memcpy (write_buffer, buffer, bytes_remain);
1177 } else {
1178 /* Bytes remaining is less than bytes written in one
1179 * IO request. Zero fill the remaining write buffer.
1180 */
1181
1182 /* Copy data from data buffer to write buffer */
1183 memcpy (write_buffer, buffer, bytes_remain);
1184
1185 /* Zero fill remain buffer, if any */
1186 memset (write_buffer + bytes_remain, 0, io_count - bytes_remain);
1187 }
1188
1189 buf_offset += bytes_remain;
1190 buffer += bytes_remain;
1191 } else {
1192 /* Do not zero fill the buffer if we have already done it */
1193 if (zero_fill == false) {
1194 /* Zero fill entire write buffer */
1195 memset (write_buffer, 0, io_count);
1196 zero_fill = true;
1197 }
1198 }
1199
1200 /* Write data */
1201 error = CacheRawWrite (cache, offset, io_count, write_buffer);
1202 if (error != EOK) goto out;
1203
1204 offset += io_count;
1205 write_len -= io_count;
1206 }
1207
1208 out:
1209 /* If we allocated a temporary buffer, deallocate it */
1210 if (write_buffer != NULL) {
1211 free (write_buffer);
1212 }
1213 return error;
1214 }
1215
1216 /*
1217 * CacheLookup
1218 *
1219 * Obtain a cache block. If one already exists, it is returned. Otherwise a
1220 * new one is created and inserted into the cache.
1221 */
1222 int CacheLookup (Cache_t *cache, uint64_t off, Tag_t **tag)
1223 {
1224 Tag_t * temp;
1225 uint32_t hash = off % cache->HashSize;
1226 int error;
1227
1228 *tag = NULL;
1229
1230 /* Search the hash table */
1231 error = 0;
1232 temp = cache->Hash[hash];
1233 while (temp != NULL) {
1234 if (temp->Offset == off) break;
1235 temp = temp->Next;
1236 }
1237
1238 /* If it's a hit */
1239 if (temp != NULL) {
1240 /* Perform MTF if necessary */
1241 if (cache->Hash[hash] != temp) {
1242 /* Disconnect the tag */
1243 if (temp->Next != NULL)
1244 temp->Next->Prev = temp->Prev;
1245 temp->Prev->Next = temp->Next;
1246 }
1247
1248 /* Otherwise, it's a miss */
1249 } else {
1250 /* Allocate a new tag */
1251 temp = (Tag_t *)calloc (sizeof (Tag_t), 1);/* We really only need to zero the
1252 LRU portion though */
1253 temp->Offset = off;
1254
1255 /* Kick the tag onto the LRU */
1256 //LRUHit (&cache->LRU, (LRUNode_t *)temp, 0);
1257 }
1258
1259 /* Insert at the head (if it's not already there) */
1260 if (cache->Hash[hash] != temp) {
1261 temp->Prev = NULL;
1262 temp->Next = cache->Hash[hash];
1263 if (temp->Next != NULL)
1264 temp->Next->Prev = temp;
1265 cache->Hash[hash] = temp;
1266 }
1267
1268 /* Make sure there's a buffer */
1269 if (temp->Buffer == NULL) {
1270 /* Find a free buffer */
1271 temp->Buffer = CacheAllocBlock (cache);
1272 if (temp->Buffer == NULL) {
1273 /* Try to evict a buffer */
1274 error = LRUEvict (&cache->LRU, (LRUNode_t *)temp);
1275 if (error != EOK) return (error);
1276
1277 /* Try again */
1278 temp->Buffer = CacheAllocBlock (cache);
1279 if (temp->Buffer == NULL) {
1280 #if CACHE_DEBUG
1281 printf("%s(%d): CacheAllocBlock failed (FreeHead = %p, FreeSize = %u)\n", __FUNCTION__, __LINE__, cache->FreeHead, cache->FreeSize);
1282 #endif
1283 return (ENOMEM);
1284 }
1285 }
1286
1287 /* Load the block from disk */
1288 error = CacheRawRead (cache, off, cache->BlockSize, temp->Buffer);
1289 if (error != EOK) return (error);
1290 }
1291
1292 #if 0
1293 if (temp && temp->Flags & kLockWrite) {
1294 fprintf(stderr, "CacheLookup(%p, %llu, %p): Found cache-locked block\n", cache, off, tag);
1295 }
1296 #endif
1297
1298 *tag = temp;
1299 return (EOK);
1300 }
1301
1302 /*
1303 * CacheRawRead
1304 *
1305 * Perform a direct read on the file.
1306 */
1307 int CacheRawRead (Cache_t *cache, uint64_t off, uint32_t len, void *buf)
1308 {
1309 uint64_t result;
1310 ssize_t nread;
1311
1312 /* Both offset and length must be multiples of the device block size */
1313 if (off % cache->DevBlockSize) return (EINVAL);
1314 if (len % cache->DevBlockSize) return (EINVAL);
1315
1316 /* Seek to the position */
1317 errno = 0;
1318 result = lseek (cache->FD_R, off, SEEK_SET);
1319 if (result == (off_t)-1 && errno != 0)
1320 return errno;
1321 if (result != off) return (ENXIO);
1322 /* Read into the buffer */
1323 #if CACHE_DEBUG
1324 printf("%s: offset %llu, len %u\n", __FUNCTION__, off, len);
1325 #endif
1326 nread = read (cache->FD_R, buf, len);
1327 if (nread == -1) return (errno);
1328 if (nread == 0) return (ENXIO);
1329
1330 /* Update counters */
1331 cache->DiskRead++;
1332
1333 return (EOK);
1334 }
1335
1336 /*
1337 * CacheRawWrite
1338 *
1339 * Perform a direct write on the file.
1340 */
1341 int CacheRawWrite (Cache_t *cache, uint64_t off, uint32_t len, void *buf)
1342 {
1343 uint64_t result;
1344 ssize_t nwritten;
1345
1346 /* Both offset and length must be multiples of the device block size */
1347 if (off % cache->DevBlockSize) return (EINVAL);
1348 if (len % cache->DevBlockSize) return (EINVAL);
1349
1350 /* Seek to the position */
1351 errno = 0;
1352 result = lseek (cache->FD_W, off, SEEK_SET);
1353 if (result == (off_t)-1 && errno != 0) return (errno);
1354 if (result != off) return (ENXIO);
1355
1356 /* Write into the buffer */
1357 nwritten = write (cache->FD_W, buf, len);
1358 if (nwritten == -1) return (errno);
1359 if (nwritten == 0) return (ENXIO);
1360
1361 /* Update counters */
1362 cache->DiskWrite++;
1363
1364 return (EOK);
1365 }
1366
1367
1368
1369 /*
1370 * LRUInit
1371 *
1372 * Initializes the LRU data structures.
1373 */
1374 static int LRUInit (LRU_t *lru)
1375 {
1376 /* Make the dummy nodes point to themselves */
1377 lru->Head.Next = &lru->Head;
1378 lru->Head.Prev = &lru->Head;
1379
1380 lru->Busy.Next = &lru->Busy;
1381 lru->Busy.Prev = &lru->Busy;
1382
1383 return (EOK);
1384 }
1385
1386 /*
1387 * LRUDestroy
1388 *
1389 * Shutdown the LRU.
1390 *
1391 * NOTE: This is typically a no-op, since the cache manager will be clearing
1392 * all cache tags.
1393 */
1394 static int LRUDestroy (LRU_t *lru)
1395 {
1396 /* Nothing to do */
1397 return (EOK);
1398 }
1399
1400 /*
1401 * LRUHit
1402 *
1403 * Registers data activity on the given node. If the node is already in the
1404 * LRU, it is moved to the front. Otherwise, it is inserted at the front.
1405 *
1406 * NOTE: If the node is not in the LRU, we assume that its pointers are NULL.
1407 */
1408 static int LRUHit (LRU_t *lru, LRUNode_t *node, int age)
1409 {
1410 /* Handle existing nodes */
1411 if ((node->Next != NULL) && (node->Prev != NULL)) {
1412 /* Detach the node */
1413 node->Next->Prev = node->Prev;
1414 node->Prev->Next = node->Next;
1415 }
1416
1417 /* If it's busy (we can't evict it) */
1418 if (((Tag_t *)node)->Refs) {
1419 /* Insert at the head of the Busy queue */
1420 node->Next = lru->Busy.Next;
1421 node->Prev = &lru->Busy;
1422
1423 } else if (age) {
1424 /* Insert at the head of the LRU */
1425 node->Next = &lru->Head;
1426 node->Prev = lru->Head.Prev;
1427
1428 } else {
1429 /* Insert at the head of the LRU */
1430 node->Next = lru->Head.Next;
1431 node->Prev = &lru->Head;
1432 }
1433
1434 node->Next->Prev = node;
1435 node->Prev->Next = node;
1436
1437 return (EOK);
1438 }
1439
1440 /*
1441 * LRUEvict
1442 *
1443 * Chooses a buffer to release.
1444 *
1445 * TODO: Under extreme conditions, it shoud be possible to release the buffer
1446 * of an actively referenced cache buffer, leaving the tag behind as a
1447 * placeholder. This would be required for implementing 2Q-LRU
1448 * replacement.
1449 *
1450 * NOTE: Make sure we never evict the node we're trying to find a buffer for!
1451 */
1452 static int LRUEvict (LRU_t *lru, LRUNode_t *node)
1453 {
1454 LRUNode_t * temp;
1455
1456 /* Find a victim */
1457 while (1) {
1458 /* Grab the tail */
1459 temp = lru->Head.Prev;
1460
1461 /* Stop if we're empty */
1462 if (temp == &lru->Head) {
1463 #if CACHE_DEBUG
1464 printf("%s(%d): empty?\n", __FUNCTION__, __LINE__);
1465 #endif
1466 return (ENOMEM);
1467 }
1468
1469 /* Detach the tail */
1470 temp->Next->Prev = temp->Prev;
1471 temp->Prev->Next = temp->Next;
1472
1473 /* If it's not busy, we have a victim */
1474 if (!((Tag_t *)temp)->Refs) break;
1475
1476 /* Insert at the head of the Busy queue */
1477 temp->Next = lru->Busy.Next;
1478 temp->Prev = &lru->Busy;
1479
1480 temp->Next->Prev = temp;
1481 temp->Prev->Next = temp;
1482
1483 /* Try again */
1484 }
1485
1486 /* Remove the tag */
1487 CacheRemove ((Cache_t *)lru, (Tag_t *)temp);
1488
1489 return (EOK);
1490 }
1491
1492 /*
1493 * Dump the cache contents.
1494 * If nobody else calls it, it gets optimized out. Annoying and yet
1495 * useful.
1496 */
1497 void
1498 dumpCache(Cache_t *cache)
1499 {
1500 int i;
1501 int numEntries = 0;
1502
1503 printf("Cache:\n");
1504 printf("\tDevBlockSize = %u\n", cache->DevBlockSize);
1505 printf("\tCache Block Size = %u\n", cache->BlockSize);
1506 printf("\tHash Size = %u\n", cache->HashSize);
1507 printf("\tHash Table:\n");
1508 for (i = 0; i < cache->HashSize; i++) {
1509 Tag_t *tag;
1510
1511 for (tag = cache->Hash[i]; tag; tag = tag->Next) {
1512 numEntries++;
1513 printf("\t\tOffset %llu, refs %u, Flags %#x (%skLazyWrite, %skLockWrite)\n",
1514 tag->Offset, tag->Refs, tag->Flags,
1515 (tag->Flags & kLazyWrite) ? "" : "no ",
1516 (tag->Flags & kLockWrite) ? "" : "no ");
1517 }
1518 }
1519 printf("\tNumber of entries: %u\n", numEntries);
1520 return;
1521 }
1522