2 // lf_hfs_generic_buf.c
5 // Created by Yakov Ben Zaken on 22/03/2018.
8 #include "lf_hfs_generic_buf.h"
9 #include "lf_hfs_vfsutils.h"
10 #include "lf_hfs_raw_read_write.h"
11 #include "lf_hfs_rangelist.h"
12 #include "lf_hfs_locks.h"
13 #include "lf_hfs_logger.h"
14 #include <sys/queue.h>
17 #define GEN_BUF_ALLOC_DEBUG 0
19 TAILQ_HEAD(buf_cache_head
, buf_cache_entry
);
21 struct buf_cache_entry
{
22 TAILQ_ENTRY(buf_cache_entry
) buf_cache_link
;
26 boolean_t buf_cache_state
= false;
27 struct buf_cache_head buf_cache_list
;
28 pthread_mutex_t buf_cache_mutex
; /* protects access to buffer cache data */
31 #define BUF_CACHE_MAX_ENTRIES_UPPER_LIMIT (140)
32 #define BUF_CACHE_MAX_ENTRIES_LOWER_LIMIT (128)
33 #define BUF_CACHE_MAX_DATA_UPPER_LIMIT (1536*1024)
34 #define BUF_CACHE_MAX_DATA_LOWER_LIMIT (1024*1024)
36 CacheStats_S gCacheStat
= {0};
38 #define IGNORE_MOUNT_FD (INT_MAX)
40 void lf_hfs_generic_buf_cache_init( void );
41 void lf_hfs_generic_buf_cache_deinit( void );
42 struct buf_cache_entry
*lf_hfs_generic_buf_cache_find( GenericLFBufPtr psBuf
);
43 struct buf_cache_entry
*lf_hfs_generic_buf_cache_find_by_phy_cluster(int iFD
, uint64_t uPhyCluster
, uint64_t uBlockSize
);
44 struct buf_cache_entry
*lf_hfs_generic_buf_cache_find_gen_buf(GenericLFBufPtr psBuf
);
45 GenericLFBuf
*lf_hfs_generic_buf_cache_add( GenericLFBuf
*psBuf
);
46 void lf_hfs_generic_buf_cache_update( GenericLFBufPtr psBuf
);
47 void lf_hfs_generic_buf_cache_copy( struct buf_cache_entry
*entry
, GenericLFBufPtr psBuf
);
48 void lf_hfs_generic_buf_cache_remove( struct buf_cache_entry
*entry
);
49 void lf_hfs_generic_buf_cache_remove_all( int iFD
);
50 void lf_hfs_generic_buf_ref(GenericLFBuf
*psBuf
);
51 void lf_hfs_generic_buf_rele(GenericLFBuf
*psBuf
);
53 // lf_hfs_generic_buf_take_ownership
54 // Take ownership on this buff.
55 // When the function returns zero, we own the buffer it is locked by our thread.
56 // When EAGAIN is returned, another thread raced us to own this buffer. Try again.
57 // ETIMEDOUT indicates that we timeout waiting for the buffer owner to release it
58 int lf_hfs_generic_buf_take_ownership(GenericLFBuf
*psBuf
, pthread_mutex_t
*pSem
) {
59 lf_lck_mtx_lock(&psBuf
->sLock
);
61 if ((psBuf
->uUseCnt
) && (psBuf
->sOwnerThread
!= pthread_self())) {
63 // Someone else owns the buffer
65 lf_lck_mtx_unlock(pSem
);
68 // Wait for the buffer to get released
69 struct timespec sWaitTime
= {.tv_sec
= 3, .tv_nsec
= 0};
71 int iWaitErr
= lf_cond_wait_relative(&psBuf
->sOwnerCond
, &psBuf
->sLock
, &sWaitTime
);
72 if (iWaitErr
== ETIMEDOUT
) {
73 LFHFS_LOG(LEVEL_ERROR
, "lf_hfs_generic_buf_take_ownership_retry: ETIMEDOUT on %p", psBuf
);
75 } else if (iWaitErr
) {
76 LFHFS_LOG(LEVEL_ERROR
, "lf_hfs_generic_buf_take_ownership_retry: lf_cond_wait_relative returned %d on %p", iWaitErr
, psBuf
);
80 // Buffer owner change, Retry.
81 lf_lck_mtx_unlock(&psBuf
->sLock
);
86 assert(psBuf
->uLockCnt
== 0);
87 assert(psBuf
->uUseCnt
== 0);
88 psBuf
->pLockingThread
= pthread_self();
89 psBuf
->sOwnerThread
= pthread_self();
95 // Function: lf_hfs_generic_buf_allocate
96 // Allocate GenericBuff structure and if exists, attach to a previously allocated buffer of the same physical block.
97 GenericLFBufPtr
lf_hfs_generic_buf_allocate( vnode_t psVnode
, daddr64_t uBlockN
, uint32_t uBlockSize
, uint64_t uFlags
) {
99 uint64_t uPhyCluster
= 0;
100 uint64_t uInClusterOffset
= 0;
101 GenericLFBufPtr psBuf
= NULL
;
102 GenericLFBuf sBuf
= {0};
103 struct buf_cache_entry
*psCacheEntry
= NULL
;
107 if (uFlags
& GEN_BUF_PHY_BLOCK
) {
108 uPhyCluster
= uBlockN
;
110 // Determine PHY block number
111 uint64_t uStartCluster
= 0;
112 int iError
= raw_readwrite_get_cluster_from_offset(psVnode
,
118 panic("Error calculating uPhyCluster");
121 uint64_t uReadOffset
= (HFSTOVCB(psVnode
->sFSParams
.vnfs_mp
->psHfsmount
)->hfsPlusIOPosOffset
+
122 uStartCluster
* HFSTOVCB(psVnode
->sFSParams
.vnfs_mp
->psHfsmount
)->blockSize
) + uInClusterOffset
;
124 uPhyCluster
= uReadOffset
/ HFSTOVCB(psVnode
->sFSParams
.vnfs_mp
->psHfsmount
)->hfs_physical_block_size
;
127 #if GEN_BUF_ALLOC_DEBUG
128 printf("lf_hfs_generic_buf_allocate: psVnode %p, uBlockN %llu, uBlockSize %u, uFlags 0x%llx, uPhyCluster %llu: ",
129 psVnode
, uBlockN
, uBlockSize
, uFlags
, uPhyCluster
);
132 // Check buffer cache, if a memory buffer already allocated for this physical block
133 if ( buf_cache_state
&& !(uFlags
& GEN_BUF_NON_CACHED
)) {
135 lf_lck_mtx_lock(&buf_cache_mutex
);
137 psCacheEntry
= lf_hfs_generic_buf_cache_find_by_phy_cluster(VNODE_TO_IFD(psVnode
), uPhyCluster
, uBlockSize
);
139 // buffer exists, share.
140 TAILQ_REMOVE(&buf_cache_list
, psCacheEntry
, buf_cache_link
);
141 TAILQ_INSERT_HEAD(&buf_cache_list
, psCacheEntry
, buf_cache_link
);
143 psBuf
= &psCacheEntry
->sBuf
;
144 #if GEN_BUF_ALLOC_DEBUG
145 printf("Already in cache: %p (UseCnt %u uCacheFlags 0x%llx)\n", psBuf
, psBuf
->uUseCnt
, psBuf
->uCacheFlags
);
147 int iRet
= lf_hfs_generic_buf_take_ownership(psBuf
, &buf_cache_mutex
);
148 if (iRet
== EAGAIN
) {
151 LFHFS_LOG(LEVEL_ERROR
, "lf_hfs_generic_buf_allocate: lf_hfs_generic_buf_take_ownership returned %d.\n", iRet
);
155 lf_hfs_generic_buf_unlock(psBuf
);
156 lf_lck_mtx_unlock(&buf_cache_mutex
);
160 lf_lck_mtx_unlock(&buf_cache_mutex
);
163 // Not found in cache, need to create a GenBuf
164 sBuf
.uBlockN
= uBlockN
;
165 sBuf
.uDataSize
= uBlockSize
;
166 sBuf
.psVnode
= psVnode
;
167 sBuf
.uPhyCluster
= uPhyCluster
;
168 sBuf
.uCacheFlags
= uFlags
;
170 sBuf
.sOwnerThread
= pthread_self();
172 if ( buf_cache_state
&& !(uFlags
& GEN_BUF_NON_CACHED
)) {
175 lf_lck_mtx_lock(&buf_cache_mutex
);
177 GenericLFBufPtr psCachedBuf
= lf_hfs_generic_buf_cache_add(&sBuf
);
179 lf_cond_init(&psCachedBuf
->sOwnerCond
);
180 lf_lck_mtx_init(&psCachedBuf
->sLock
);
183 if (uFlags
& (GEN_BUF_IS_UPTODATE
| GEN_BUF_LITTLE_ENDIAN
)) {
184 lf_hfs_generic_buf_lock(psCachedBuf
);
185 lf_hfs_generic_buf_set_cache_flag(psCachedBuf
, uFlags
& (GEN_BUF_IS_UPTODATE
| GEN_BUF_LITTLE_ENDIAN
));
186 lf_hfs_generic_buf_unlock(psCachedBuf
);
190 lf_lck_mtx_unlock(&buf_cache_mutex
);
191 #if GEN_BUF_ALLOC_DEBUG
192 printf("Added to cache %p\n", psCachedBuf
);
197 // Alloc memomry for a non-cached buffer
198 psBuf
= hfs_mallocz(sizeof(GenericLFBuf
));
202 memcpy(psBuf
, &sBuf
, sizeof(*psBuf
));
203 psBuf
->pvData
= hfs_mallocz(psBuf
->uDataSize
);
204 if (!psBuf
->pvData
) {
208 lf_cond_init(&psBuf
->sOwnerCond
);
209 lf_lck_mtx_init(&psBuf
->sLock
);
211 gCacheStat
.gen_buf_uncached
++;
212 if (gCacheStat
.gen_buf_uncached
> gCacheStat
.max_gen_buf_uncached
) {
213 gCacheStat
.max_gen_buf_uncached
= gCacheStat
.gen_buf_uncached
;
215 if (uFlags
& (GEN_BUF_IS_UPTODATE
| GEN_BUF_LITTLE_ENDIAN
)) {
216 lf_hfs_generic_buf_lock(psBuf
);
217 lf_hfs_generic_buf_set_cache_flag(psBuf
, uFlags
& (GEN_BUF_IS_UPTODATE
| GEN_BUF_LITTLE_ENDIAN
));
218 lf_hfs_generic_buf_unlock(psBuf
);
221 #if GEN_BUF_ALLOC_DEBUG
222 printf("Provided uncached %p\n", psBuf
);
228 if (psBuf
&& psBuf
->pvData
) {
229 hfs_free(psBuf
->pvData
);
237 errno_t
lf_hfs_generic_buf_read( GenericLFBufPtr psBuf
)
240 uint64_t uActuallyRead
= 0;
241 uint64_t uReadStartCluster
= 0;
243 #if GEN_BUF_ALLOC_DEBUG
244 printf("lf_hfs_generic_buf_read: psBuf %p, psVnode %p, uBlockN %llu, uBlockSize %u, uFlags 0x%llx, uPhyCluster %llu: ",
245 psBuf
, psBuf
->psVnode
, psBuf
->uBlockN
, psBuf
->uDataSize
, psBuf
->uCacheFlags
, psBuf
->uPhyCluster
);
252 if ( buf_cache_state
&& !(psBuf
->uCacheFlags
& GEN_BUF_NON_CACHED
))
254 lf_lck_mtx_lock(&buf_cache_mutex
);
255 lf_hfs_generic_buf_cache_update(psBuf
);
256 lf_lck_mtx_unlock(&buf_cache_mutex
);
259 lf_hfs_generic_buf_lock(psBuf
);
261 assert(psBuf
->uUseCnt
!= 0);
262 assert(psBuf
->sOwnerThread
== pthread_self());
264 if (psBuf
->uCacheFlags
& GEN_BUF_IS_UPTODATE
) {
266 // The buffer already contains data equals or newer than media.
267 #if GEN_BUF_ALLOC_DEBUG
268 printf("already up-to-date.\n");
273 // Cache is disabled or buffer wasn't found, read data from media
274 iErr
= raw_readwrite_read_mount(psBuf
->psVnode
,
276 HFSTOVCB(psBuf
->psVnode
->sFSParams
.vnfs_mp
->psHfsmount
)->hfs_physical_block_size
,
283 psBuf
->uValidBytes
= (uint32_t)uActuallyRead
;
284 lf_hfs_generic_buf_set_cache_flag(psBuf
, GEN_BUF_IS_UPTODATE
);
286 #if GEN_BUF_ALLOC_DEBUG
287 uint32_t *puData
= psBuf
->pvData
;
288 printf("Success. uPhyCluster %llu, Data: 0x%x, 0x%x, 0x%x, 0x%x\n", psBuf
->uPhyCluster
, puData
[0], puData
[2], puData
[2], puData
[3]);
293 #if GEN_BUF_ALLOC_DEBUG
294 printf("Error. uPhyCluster %llu, iErr %d.\n", psBuf
->uPhyCluster
, iErr
);
298 lf_hfs_generic_buf_unlock(psBuf
);
302 errno_t
lf_hfs_generic_buf_write( GenericLFBufPtr psBuf
) {
305 lf_hfs_generic_buf_lock(psBuf
);
307 #if GEN_BUF_ALLOC_DEBUG
308 printf("lf_hfs_generic_buf_write: psBuf %p psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n", psBuf
, psBuf
->psVnode
, psBuf
->uBlockN
, psBuf
->uDataSize
, psBuf
->uFlags
, psBuf
->uPhyCluster
, psBuf
->uUseCnt
);
309 uint32_t *puData
= psBuf
->pvData
;
310 printf("psBuf uPhyCluster %llu, Data: 0x%x, 0x%x, 0x%x, 0x%x\n", psBuf
->uPhyCluster
, puData
[0], puData
[2], puData
[2], puData
[3]);
313 assert(psBuf
->uUseCnt
!= 0);
314 assert(!(psBuf
->uCacheFlags
& GEN_BUF_WRITE_LOCK
));
315 assert(psBuf
->sOwnerThread
== pthread_self());
317 iErr
= raw_readwrite_write_mount(psBuf
->psVnode
,
319 HFSTOVCB(psBuf
->psVnode
->sFSParams
.vnfs_mp
->psHfsmount
)->hfs_physical_block_size
,
324 lf_hfs_generic_buf_unlock(psBuf
);
328 void lf_hfs_generic_buf_clear( GenericLFBufPtr psBuf
) {
329 memset(psBuf
->pvData
,0,sizeof(psBuf
->uDataSize
));
332 void lf_hfs_generic_buf_invalidate( GenericLFBuf
*psBuf
) {
333 struct buf_cache_entry
*psCacheEntry
;
335 #if GEN_BUF_ALLOC_DEBUG
336 printf("lf_hfs_generic_buf_invalidate: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
337 psBuf
, psBuf
->psVnode
, psBuf
->uBlockN
, psBuf
->uDataSize
, psBuf
->uCacheFlags
, psBuf
->uPhyCluster
, psBuf
->uUseCnt
);
340 lf_hfs_generic_buf_lock(psBuf
);
341 lf_hfs_generic_buf_rele(psBuf
);
343 assert(psBuf
->uUseCnt
== 0);
344 assert(psBuf
->sOwnerThread
== NULL
);
346 // Check buffer cache, if a memory buffer already allocated for this physical block
347 if ( buf_cache_state
&& !(psBuf
->uCacheFlags
& GEN_BUF_NON_CACHED
)) {
349 lf_lck_mtx_lock(&buf_cache_mutex
);
350 psCacheEntry
= lf_hfs_generic_buf_cache_find_gen_buf(psBuf
);
353 lf_hfs_generic_buf_cache_remove(psCacheEntry
);
355 panic("A buffer is marked Cached, but was not found in Cache");
358 lf_lck_mtx_unlock(&buf_cache_mutex
);
361 // This is a non-cached buffer
362 gCacheStat
.gen_buf_uncached
--;
363 lf_hfs_generic_buf_unlock(psBuf
);
364 lf_cond_destroy(&psBuf
->sOwnerCond
);
365 lf_lck_mtx_destroy(&psBuf
->sLock
);
366 hfs_free(psBuf
->pvData
);
371 void lf_hfs_generic_buf_ref(GenericLFBuf
*psBuf
) {
372 lf_hfs_generic_buf_lock(psBuf
);
373 assert(psBuf
->sOwnerThread
== pthread_self());
375 lf_hfs_generic_buf_unlock(psBuf
);
378 int lf_hfs_generic_buf_validate_owner(GenericLFBuf
*psBuf
) {
380 return(psBuf
->sOwnerThread
== pthread_self());
383 void lf_hfs_generic_buf_rele(GenericLFBuf
*psBuf
) {
384 lf_hfs_generic_buf_lock(psBuf
);
385 assert(psBuf
->uUseCnt
!= 0);
386 assert(psBuf
->sOwnerThread
== pthread_self());
388 if (psBuf
->uUseCnt
== 0) {
389 psBuf
->sOwnerThread
= NULL
;
390 lf_cond_wakeup(&psBuf
->sOwnerCond
);
392 lf_hfs_generic_buf_unlock(psBuf
);
395 void lf_hfs_generic_buf_lock(GenericLFBufPtr psBuf
) {
396 #if GEN_BUF_ALLOC_DEBUG
397 printf("lf_hfs_generic_buf_lock: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
398 psBuf
, psBuf
->psVnode
, psBuf
->uBlockN
, psBuf
->uDataSize
, psBuf
->uCacheFlags
, psBuf
->uPhyCluster
, psBuf
->uUseCnt
);
401 if (psBuf
->pLockingThread
== pthread_self()) {
404 lf_lck_mtx_lock(&psBuf
->sLock
);
405 assert(psBuf
->uLockCnt
== 0);
407 psBuf
->pLockingThread
= pthread_self();
411 void lf_hfs_generic_buf_unlock(GenericLFBufPtr psBuf
) {
412 #if GEN_BUF_ALLOC_DEBUG
413 printf("lf_hfs_generic_buf_unlock: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
414 psBuf
, psBuf
->psVnode
, psBuf
->uBlockN
, psBuf
->uDataSize
, psBuf
->uCacheFlags
, psBuf
->uPhyCluster
, psBuf
->uUseCnt
);
417 assert(psBuf
->pLockingThread
== pthread_self());
418 assert(psBuf
->uLockCnt
);
421 if (!psBuf
->uLockCnt
) {
422 psBuf
->pLockingThread
= NULL
;
423 lf_lck_mtx_unlock(&psBuf
->sLock
);
427 void lf_hfs_generic_buf_set_cache_flag(GenericLFBufPtr psBuf
, uint64_t uCacheFlags
) {
428 lf_hfs_generic_buf_lock(psBuf
);
429 psBuf
->uCacheFlags
|= uCacheFlags
;
430 lf_hfs_generic_buf_unlock(psBuf
);
433 void lf_hfs_generic_buf_clear_cache_flag(GenericLFBufPtr psBuf
, uint64_t uCacheFlags
) {
434 lf_hfs_generic_buf_lock(psBuf
);
435 psBuf
->uCacheFlags
&= ~uCacheFlags
;
436 lf_hfs_generic_buf_unlock(psBuf
);
439 static void lf_hfs_buf_free_unused()
441 //We want to free more then we actually need, so that we won't have to come here every new buf that we allocate
442 while ( gCacheStat
.buf_cache_size
> BUF_CACHE_MAX_ENTRIES_LOWER_LIMIT
||
443 gCacheStat
.buf_total_allocated_size
> BUF_CACHE_MAX_DATA_LOWER_LIMIT
)
445 struct buf_cache_entry
*last
;
447 last
= TAILQ_LAST(&buf_cache_list
, buf_cache_head
);
453 lf_hfs_generic_buf_lock(&last
->sBuf
);
455 if ((last
->sBuf
.uUseCnt
) || (last
->sBuf
.uCacheFlags
& GEN_BUF_WRITE_LOCK
)) {
456 // Last buffer in buffer cache is in use.
457 // Nothing more to free
458 lf_hfs_generic_buf_unlock(&last
->sBuf
);
462 ++gCacheStat
.buf_cache_cleanup
;
463 lf_hfs_generic_buf_cache_remove(last
);
467 void lf_hfs_generic_buf_release( GenericLFBufPtr psBuf
)
469 #if GEN_BUF_ALLOC_DEBUG
470 printf("lf_hfs_generic_buf_release: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
471 psBuf
, psBuf
->psVnode
, psBuf
->uBlockN
, psBuf
->uDataSize
, psBuf
->uCacheFlags
, psBuf
->uPhyCluster
, psBuf
->uUseCnt
);
478 lf_hfs_generic_buf_rele(psBuf
);
480 // If Unused and UnCached, free.
481 if ((psBuf
->uCacheFlags
& GEN_BUF_NON_CACHED
) && (psBuf
->uUseCnt
== 0)) {
482 // Buffer not in cache - free it
483 gCacheStat
.gen_buf_uncached
--;
484 lf_cond_destroy(&psBuf
->sOwnerCond
);
485 lf_lck_mtx_destroy(&psBuf
->sLock
);
486 hfs_free(psBuf
->pvData
);
491 // Cleanup unused entries in the cache
492 int iTry
= lf_lck_mtx_try_lock(&buf_cache_mutex
);
497 //We want to free more then we actually need, so that we won't have to come here every new buf that we allocate
498 lf_hfs_buf_free_unused();
499 lf_lck_mtx_unlock(&buf_cache_mutex
);
502 // Buffer Cache functions
504 void lf_hfs_generic_buf_cache_init( void ) {
505 gCacheStat
.buf_cache_size
= 0;
506 gCacheStat
.max_gen_buf_uncached
= 0;
507 gCacheStat
.gen_buf_uncached
= 0;
508 lf_lck_mtx_init(&buf_cache_mutex
);
509 TAILQ_INIT(&buf_cache_list
);
510 buf_cache_state
= true;
513 void lf_hfs_generic_buf_cache_deinit( void )
515 lf_hfs_generic_buf_cache_remove_all(IGNORE_MOUNT_FD
);
517 assert(gCacheStat
.buf_cache_size
== 0);
518 assert(gCacheStat
.gen_buf_uncached
== 0);
520 buf_cache_state
= false;
521 lf_lck_mtx_destroy(&buf_cache_mutex
);
524 void lf_hfs_generic_buf_cache_clear_by_iFD( int iFD
)
526 lf_hfs_generic_buf_cache_remove_all(iFD
);
529 boolean_t
lf_hfs_generic_buf_match_range( struct buf_cache_entry
*entry
, GenericLFBufPtr psBuf
)
531 if ( VTOF(entry
->sBuf
.psVnode
) != VTOF(psBuf
->psVnode
) )
536 uint64_t size_1
= entry
->sBuf
.uDataSize
;
537 uint64_t start_1
= entry
->sBuf
.uBlockN
* size_1
;
538 uint64_t end_1
= start_1
+ size_1
- 1;
539 uint64_t size_2
= psBuf
->uDataSize
;
540 uint64_t start_2
= psBuf
->uBlockN
* size_2
;
541 uint64_t end_2
= start_2
+ size_2
- 1;
543 enum rl_overlaptype overlap
;
544 struct rl_entry entry_range
= {.rl_start
= start_1
, .rl_end
= end_1
};
546 overlap
= rl_overlap(&entry_range
, start_2
, end_2
);
550 case RL_MATCHINGOVERLAP
:
552 case RL_OVERLAPCONTAINSRANGE
:
553 // Make sure we have same start though
554 assert(start_1
== start_2
);
557 case RL_OVERLAPISCONTAINED
:
559 case RL_OVERLAPSTARTSBEFORE
:
560 case RL_OVERLAPENDSAFTER
:
561 LFHFS_LOG(LEVEL_ERROR
, " lf_hfs_generic_buf_match_range : cache overlap [%d]", overlap
);
566 struct buf_cache_entry
* lf_hfs_generic_buf_cache_find( GenericLFBufPtr psBuf
)
568 struct buf_cache_entry
*entry
, *entry_next
;
570 TAILQ_FOREACH_SAFE(entry
, &buf_cache_list
, buf_cache_link
, entry_next
)
572 if ( lf_hfs_generic_buf_match_range(entry
, psBuf
) )
581 // Run the function pfCallback on all buffers that belongs to node psVnode.
582 int lf_hfs_generic_buf_write_iterate(vnode_t psVnode
, IterateCallback pfCallback
, uint32_t uFlags
, void *pvArgs
) {
584 struct buf_cache_entry
*psCacheEntry
, *psNextCacheEntry
;
585 int iFD
= VNODE_TO_IFD(psVnode
);
587 TAILQ_FOREACH_SAFE(psCacheEntry
, &buf_cache_list
, buf_cache_link
, psNextCacheEntry
) {
588 int iEntryFD
= VNODE_TO_IFD(psCacheEntry
->sBuf
.psVnode
);
590 if ( (iFD
== iEntryFD
) && (psCacheEntry
->sBuf
.psVnode
== psVnode
)) {
591 if ((uFlags
& BUF_SKIP_LOCKED
) && (psCacheEntry
->sBuf
.uCacheFlags
& GEN_BUF_WRITE_LOCK
)) {
594 if ((uFlags
& BUF_SKIP_NONLOCKED
) && !(psCacheEntry
->sBuf
.uCacheFlags
& GEN_BUF_WRITE_LOCK
)) {
597 pfCallback(&psCacheEntry
->sBuf
, pvArgs
);
604 struct buf_cache_entry
*lf_hfs_generic_buf_cache_find_by_phy_cluster(int iFD
, uint64_t uPhyCluster
, uint64_t uBlockSize
) {
606 struct buf_cache_entry
*psCacheEntry
, *psNextCacheEntry
;
608 TAILQ_FOREACH_SAFE(psCacheEntry
, &buf_cache_list
, buf_cache_link
, psNextCacheEntry
) {
609 if (psCacheEntry
->sBuf
.psVnode
)
611 int iEntryFD
= VNODE_TO_IFD(psCacheEntry
->sBuf
.psVnode
);
612 if ( (psCacheEntry
->sBuf
.uPhyCluster
== uPhyCluster
) &&
613 (iEntryFD
== iFD
) &&
614 (psCacheEntry
->sBuf
.uDataSize
>= uBlockSize
) ) {
620 LFHFS_LOG(LEVEL_ERROR
, "lf_hfs_generic_buf_cache_find_by_phy_cluster: got buf with vnode == NULL, cache_flags: 0x%llx, uUseCnt %d", psCacheEntry
->sBuf
.uCacheFlags
, psCacheEntry
->sBuf
.uUseCnt
);
628 struct buf_cache_entry
*lf_hfs_generic_buf_cache_find_gen_buf(GenericLFBufPtr psBuf
) {
630 struct buf_cache_entry
*psCacheEntry
, *psNextCacheEntry
;
632 TAILQ_FOREACH_SAFE(psCacheEntry
, &buf_cache_list
, buf_cache_link
, psNextCacheEntry
) {
633 if ( &psCacheEntry
->sBuf
== psBuf
) {
640 GenericLFBufPtr
lf_hfs_generic_buf_cache_add( GenericLFBufPtr psBuf
)
642 struct buf_cache_entry
*entry
;
644 //Check if we have enough space to alloc this buffer, unless need to evict something
645 if (gCacheStat
.buf_total_allocated_size
+ psBuf
->uDataSize
> BUF_CACHE_MAX_DATA_UPPER_LIMIT
||
646 gCacheStat
.buf_cache_size
+ 1 == BUF_CACHE_MAX_ENTRIES_UPPER_LIMIT
)
648 lf_hfs_buf_free_unused();
651 entry
= hfs_mallocz(sizeof(*entry
));
656 memcpy(&entry
->sBuf
, (void*)psBuf
, sizeof(*psBuf
));
657 entry
->sBuf
.uCacheFlags
&= ~GEN_BUF_NON_CACHED
;
659 entry
->sBuf
.pvData
= hfs_mallocz(psBuf
->uDataSize
);
660 if (!entry
->sBuf
.pvData
) {
664 TAILQ_INSERT_HEAD(&buf_cache_list
, entry
, buf_cache_link
);
666 gCacheStat
.buf_cache_size
++;
667 gCacheStat
.buf_total_allocated_size
+=psBuf
->uDataSize
;
669 if (gCacheStat
.buf_cache_size
> gCacheStat
.max_buf_cache_size
) {
670 gCacheStat
.max_buf_cache_size
= gCacheStat
.buf_cache_size
;
673 return(&entry
->sBuf
);
677 if (entry
->sBuf
.pvData
) {
678 hfs_free(entry
->sBuf
.pvData
);
685 void lf_hfs_generic_buf_cache_update( GenericLFBufPtr psBuf
)
687 struct buf_cache_entry
*entry
;
689 #if GEN_BUF_ALLOC_DEBUG
690 printf("lf_hfs_generic_buf_cache_update: psBuf %p\n", psBuf
);
693 // Check that cache entry still exists and hasn't thrown away
694 entry
= lf_hfs_generic_buf_cache_find(psBuf
);
699 TAILQ_REMOVE(&buf_cache_list
, entry
, buf_cache_link
);
700 TAILQ_INSERT_HEAD(&buf_cache_list
, entry
, buf_cache_link
);
703 void lf_hfs_generic_buf_cache_copy( struct buf_cache_entry
*entry
, __unused GenericLFBufPtr psBuf
)
705 #if GEN_BUF_ALLOC_DEBUG
706 printf("lf_hfs_generic_buf_cache_copy: psBuf %p\n", psBuf
);
709 TAILQ_REMOVE(&buf_cache_list
, entry
, buf_cache_link
);
710 TAILQ_INSERT_HEAD(&buf_cache_list
, entry
, buf_cache_link
);
713 void lf_hfs_generic_buf_cache_remove( struct buf_cache_entry
*entry
) {
715 if (entry
->sBuf
.uUseCnt
!= 0) {
716 LFHFS_LOG(LEVEL_ERROR
, "lf_hfs_generic_buf_cache_remove: remove buffer %p with uUseCnt %u", &entry
->sBuf
, entry
->sBuf
.uUseCnt
);
719 #if GEN_BUF_ALLOC_DEBUG
720 GenericLFBuf
*psBuf
= &entry
->sBuf
;
721 printf("lf_hfs_generic_buf_cache_remove: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
722 psBuf
, psBuf
->psVnode
, psBuf
->uBlockN
, psBuf
->uDataSize
, psBuf
->uCacheFlags
, psBuf
->uPhyCluster
, psBuf
->uUseCnt
);
725 TAILQ_REMOVE(&buf_cache_list
, entry
, buf_cache_link
);
726 --gCacheStat
.buf_cache_size
;
727 ++gCacheStat
.buf_cache_remove
;
728 gCacheStat
.buf_total_allocated_size
-= entry
->sBuf
.uDataSize
;
730 assert(entry
->sBuf
.uLockCnt
== 1);
732 lf_lck_mtx_unlock(&entry
->sBuf
.sLock
);
733 lf_cond_destroy(&entry
->sBuf
.sOwnerCond
);
734 lf_lck_mtx_destroy(&entry
->sBuf
.sLock
);
736 hfs_free(entry
->sBuf
.pvData
);
740 void lf_hfs_generic_buf_cache_remove_all( int iFD
) {
741 struct buf_cache_entry
*entry
, *entry_next
;
743 lf_lck_mtx_lock(&buf_cache_mutex
);
745 TAILQ_FOREACH_SAFE(entry
, &buf_cache_list
, buf_cache_link
, entry_next
)
747 if ( (iFD
== IGNORE_MOUNT_FD
) || ( VNODE_TO_IFD(entry
->sBuf
.psVnode
) == iFD
) )
749 if (iFD
== IGNORE_MOUNT_FD
) {
750 // Media no longer available, force remove all
751 TAILQ_REMOVE(&buf_cache_list
, entry
, buf_cache_link
);
752 --gCacheStat
.buf_cache_size
;
753 ++gCacheStat
.buf_cache_remove
;
754 gCacheStat
.buf_total_allocated_size
-= entry
->sBuf
.uDataSize
;
756 lf_hfs_generic_buf_lock(&entry
->sBuf
);
757 lf_hfs_generic_buf_cache_remove(entry
);
762 lf_lck_mtx_unlock(&buf_cache_mutex
);
765 /* buf_cache_mutex Should get locked from the caller using lf_hfs_generic_buf_cache_LockBufCache*/
766 void lf_hfs_generic_buf_cache_remove_vnode(vnode_t vp
) {
768 struct buf_cache_entry
*entry
, *entry_next
;
770 #if GEN_BUF_ALLOC_DEBUG
771 printf("lf_hfs_generic_buf_cache_remove_vnode: vp %p: ", vp
);
774 TAILQ_FOREACH_SAFE(entry
, &buf_cache_list
, buf_cache_link
, entry_next
) {
776 if ( entry
->sBuf
.psVnode
== vp
) {
778 #if GEN_BUF_ALLOC_DEBUG
779 printf("&sBuf %p, ", &entry
->sBuf
);
782 lf_hfs_generic_buf_lock(&entry
->sBuf
);
783 lf_hfs_generic_buf_cache_remove(entry
);
787 #if GEN_BUF_ALLOC_DEBUG
792 void lf_hfs_generic_buf_cache_LockBufCache(void)
794 lf_lck_mtx_lock(&buf_cache_mutex
);
797 void lf_hfs_generic_buf_cache_UnLockBufCache(void)
799 lf_lck_mtx_unlock(&buf_cache_mutex
);