]> git.saurik.com Git - apple/hfs.git/blob - livefiles_hfs_plugin/lf_hfs_generic_buf.c
hfs-522.100.5.tar.gz
[apple/hfs.git] / livefiles_hfs_plugin / lf_hfs_generic_buf.c
1 //
2 // lf_hfs_generic_buf.c
3 // livefiles_hfs
4 //
5 // Created by Yakov Ben Zaken on 22/03/2018.
6 //
7
8 #include "lf_hfs_generic_buf.h"
9 #include "lf_hfs_vfsutils.h"
10 #include "lf_hfs_raw_read_write.h"
11 #include "lf_hfs_rangelist.h"
12 #include "lf_hfs_locks.h"
13 #include "lf_hfs_logger.h"
14 #include <sys/queue.h>
15 #include <assert.h>
16
17 #define GEN_BUF_ALLOC_DEBUG 0
18
19 TAILQ_HEAD(buf_cache_head, buf_cache_entry);
20
21 struct buf_cache_entry {
22 TAILQ_ENTRY(buf_cache_entry) buf_cache_link;
23 GenericLFBuf sBuf;
24 };
25
26 boolean_t buf_cache_state = false;
27 struct buf_cache_head buf_cache_list;
28 pthread_mutex_t buf_cache_mutex; /* protects access to buffer cache data */
29
30
31 #define BUF_CACHE_MAX_ENTRIES_UPPER_LIMIT (140)
32 #define BUF_CACHE_MAX_ENTRIES_LOWER_LIMIT (128)
33 #define BUF_CACHE_MAX_DATA_UPPER_LIMIT (1536*1024)
34 #define BUF_CACHE_MAX_DATA_LOWER_LIMIT (1024*1024)
35
36 CacheStats_S gCacheStat = {0};
37
38 #define IGNORE_MOUNT_FD (INT_MAX)
39
40 void lf_hfs_generic_buf_cache_init( void );
41 void lf_hfs_generic_buf_cache_deinit( void );
42 struct buf_cache_entry *lf_hfs_generic_buf_cache_find( GenericLFBufPtr psBuf );
43 struct buf_cache_entry *lf_hfs_generic_buf_cache_find_by_phy_cluster(int iFD, uint64_t uPhyCluster, uint64_t uBlockSize);
44 struct buf_cache_entry *lf_hfs_generic_buf_cache_find_gen_buf(GenericLFBufPtr psBuf);
45 GenericLFBuf *lf_hfs_generic_buf_cache_add( GenericLFBuf *psBuf );
46 void lf_hfs_generic_buf_cache_update( GenericLFBufPtr psBuf );
47 void lf_hfs_generic_buf_cache_copy( struct buf_cache_entry *entry, GenericLFBufPtr psBuf );
48 void lf_hfs_generic_buf_cache_remove( struct buf_cache_entry *entry );
49 void lf_hfs_generic_buf_cache_remove_all( int iFD );
50 void lf_hfs_generic_buf_ref(GenericLFBuf *psBuf);
51 void lf_hfs_generic_buf_rele(GenericLFBuf *psBuf);
52
53 // lf_hfs_generic_buf_take_ownership
54 // Take ownership on this buff.
55 // When the function returns zero, we own the buffer it is locked by our thread.
56 // When EAGAIN is returned, another thread raced us to own this buffer. Try again.
57 // ETIMEDOUT indicates that we timeout waiting for the buffer owner to release it
58 int lf_hfs_generic_buf_take_ownership(GenericLFBuf *psBuf, pthread_mutex_t *pSem) {
59 lf_lck_mtx_lock(&psBuf->sLock);
60
61 if ((psBuf->uUseCnt) && (psBuf->sOwnerThread != pthread_self())) {
62
63 // Someone else owns the buffer
64 if (pSem) {
65 lf_lck_mtx_unlock(pSem);
66 }
67
68 // Wait for the buffer to get released
69 struct timespec sWaitTime = {.tv_sec = 3, .tv_nsec = 0};
70
71 int iWaitErr = lf_cond_wait_relative(&psBuf->sOwnerCond, &psBuf->sLock, &sWaitTime);
72 if (iWaitErr == ETIMEDOUT) {
73 LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_take_ownership_retry: ETIMEDOUT on %p", psBuf);
74 return(ETIMEDOUT);
75 } else if (iWaitErr) {
76 LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_take_ownership_retry: lf_cond_wait_relative returned %d on %p", iWaitErr, psBuf);
77 return(EINVAL);
78 }
79
80 // Buffer owner change, Retry.
81 lf_lck_mtx_unlock(&psBuf->sLock);
82 return(EAGAIN);
83 }
84
85 // We own the buffer
86 assert(psBuf->uLockCnt == 0);
87 assert(psBuf->uUseCnt == 0);
88 psBuf->pLockingThread = pthread_self();
89 psBuf->sOwnerThread = pthread_self();
90 psBuf->uUseCnt++;
91 psBuf->uLockCnt++;
92 return(0);
93 }
94
95 // Function: lf_hfs_generic_buf_allocate
96 // Allocate GenericBuff structure and if exists, attach to a previously allocated buffer of the same physical block.
97 GenericLFBufPtr lf_hfs_generic_buf_allocate( vnode_t psVnode, daddr64_t uBlockN, uint32_t uBlockSize, uint64_t uFlags ) {
98
99 uint64_t uPhyCluster = 0;
100 uint64_t uInClusterOffset = 0;
101 GenericLFBufPtr psBuf = NULL;
102 GenericLFBuf sBuf = {0};
103 struct buf_cache_entry *psCacheEntry = NULL;
104
105 assert(psVnode);
106
107 if (uFlags & GEN_BUF_PHY_BLOCK) {
108 uPhyCluster = uBlockN;
109 } else {
110 // Determine PHY block number
111 uint64_t uStartCluster = 0;
112 int iError = raw_readwrite_get_cluster_from_offset(psVnode,
113 uBlockSize*uBlockN,
114 &uStartCluster,
115 &uInClusterOffset,
116 NULL );
117 if (iError != 0) {
118 panic("Error calculating uPhyCluster");
119 }
120
121 uint64_t uReadOffset = (HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->hfsPlusIOPosOffset +
122 uStartCluster * HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->blockSize) + uInClusterOffset;
123
124 uPhyCluster = uReadOffset / HFSTOVCB(psVnode->sFSParams.vnfs_mp->psHfsmount)->hfs_physical_block_size;
125 }
126
127 #if GEN_BUF_ALLOC_DEBUG
128 printf("lf_hfs_generic_buf_allocate: psVnode %p, uBlockN %llu, uBlockSize %u, uFlags 0x%llx, uPhyCluster %llu: ",
129 psVnode, uBlockN, uBlockSize, uFlags, uPhyCluster);
130 #endif
131
132 // Check buffer cache, if a memory buffer already allocated for this physical block
133 if ( buf_cache_state && !(uFlags & GEN_BUF_NON_CACHED)) {
134 retry:
135 lf_lck_mtx_lock(&buf_cache_mutex);
136
137 psCacheEntry = lf_hfs_generic_buf_cache_find_by_phy_cluster(VNODE_TO_IFD(psVnode), uPhyCluster, uBlockSize);
138 if (psCacheEntry) {
139 // buffer exists, share.
140 TAILQ_REMOVE(&buf_cache_list, psCacheEntry, buf_cache_link);
141 TAILQ_INSERT_HEAD(&buf_cache_list, psCacheEntry, buf_cache_link);
142
143 psBuf = &psCacheEntry->sBuf;
144 #if GEN_BUF_ALLOC_DEBUG
145 printf("Already in cache: %p (UseCnt %u uCacheFlags 0x%llx)\n", psBuf, psBuf->uUseCnt, psBuf->uCacheFlags);
146 #endif
147 int iRet = lf_hfs_generic_buf_take_ownership(psBuf, &buf_cache_mutex);
148 if (iRet == EAGAIN) {
149 goto retry;
150 } else if (iRet) {
151 LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_allocate: lf_hfs_generic_buf_take_ownership returned %d.\n", iRet);
152 return(NULL);
153 }
154
155 lf_hfs_generic_buf_unlock(psBuf);
156 lf_lck_mtx_unlock(&buf_cache_mutex);
157 return(psBuf);
158 }
159
160 lf_lck_mtx_unlock(&buf_cache_mutex);
161 }
162
163 // Not found in cache, need to create a GenBuf
164 sBuf.uBlockN = uBlockN;
165 sBuf.uDataSize = uBlockSize;
166 sBuf.psVnode = psVnode;
167 sBuf.uPhyCluster = uPhyCluster;
168 sBuf.uCacheFlags = uFlags;
169 sBuf.uUseCnt = 1;
170 sBuf.sOwnerThread = pthread_self();
171
172 if ( buf_cache_state && !(uFlags & GEN_BUF_NON_CACHED)) {
173
174 // Add to cache
175 lf_lck_mtx_lock(&buf_cache_mutex);
176
177 GenericLFBufPtr psCachedBuf = lf_hfs_generic_buf_cache_add(&sBuf);
178
179 lf_cond_init(&psCachedBuf->sOwnerCond);
180 lf_lck_mtx_init(&psCachedBuf->sLock);
181
182 if (psCachedBuf) {
183 if (uFlags & (GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN)) {
184 lf_hfs_generic_buf_lock(psCachedBuf);
185 lf_hfs_generic_buf_set_cache_flag(psCachedBuf, uFlags & (GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN));
186 lf_hfs_generic_buf_unlock(psCachedBuf);
187 }
188 }
189
190 lf_lck_mtx_unlock(&buf_cache_mutex);
191 #if GEN_BUF_ALLOC_DEBUG
192 printf("Added to cache %p\n", psCachedBuf);
193 #endif
194 return psCachedBuf;
195
196 } else {
197 // Alloc memomry for a non-cached buffer
198 psBuf = hfs_mallocz(sizeof(GenericLFBuf));
199 if (!psBuf) {
200 goto error;
201 }
202 memcpy(psBuf, &sBuf, sizeof(*psBuf));
203 psBuf->pvData = hfs_mallocz(psBuf->uDataSize);
204 if (!psBuf->pvData) {
205 goto error;
206 }
207
208 lf_cond_init(&psBuf->sOwnerCond);
209 lf_lck_mtx_init(&psBuf->sLock);
210
211 gCacheStat.gen_buf_uncached++;
212 if (gCacheStat.gen_buf_uncached > gCacheStat.max_gen_buf_uncached) {
213 gCacheStat.max_gen_buf_uncached = gCacheStat.gen_buf_uncached;
214 }
215 if (uFlags & (GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN)) {
216 lf_hfs_generic_buf_lock(psBuf);
217 lf_hfs_generic_buf_set_cache_flag(psBuf, uFlags & (GEN_BUF_IS_UPTODATE | GEN_BUF_LITTLE_ENDIAN));
218 lf_hfs_generic_buf_unlock(psBuf);
219 }
220
221 #if GEN_BUF_ALLOC_DEBUG
222 printf("Provided uncached %p\n", psBuf);
223 #endif
224
225 return psBuf;
226 }
227 error:
228 if (psBuf && psBuf->pvData) {
229 hfs_free(psBuf->pvData);
230 }
231 if (psBuf) {
232 hfs_free(psBuf);
233 }
234 return(NULL);
235 }
236
237 errno_t lf_hfs_generic_buf_read( GenericLFBufPtr psBuf )
238 {
239 errno_t iErr = 0;
240 uint64_t uActuallyRead = 0;
241 uint64_t uReadStartCluster = 0;
242
243 #if GEN_BUF_ALLOC_DEBUG
244 printf("lf_hfs_generic_buf_read: psBuf %p, psVnode %p, uBlockN %llu, uBlockSize %u, uFlags 0x%llx, uPhyCluster %llu: ",
245 psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster);
246 #endif
247
248 if (!psBuf) {
249 return(EINVAL);
250 }
251
252 if ( buf_cache_state && !(psBuf->uCacheFlags & GEN_BUF_NON_CACHED))
253 {
254 lf_lck_mtx_lock(&buf_cache_mutex);
255 lf_hfs_generic_buf_cache_update(psBuf);
256 lf_lck_mtx_unlock(&buf_cache_mutex);
257 }
258
259 lf_hfs_generic_buf_lock(psBuf);
260
261 assert(psBuf->uUseCnt != 0);
262 assert(psBuf->sOwnerThread == pthread_self());
263
264 if (psBuf->uCacheFlags & GEN_BUF_IS_UPTODATE) {
265
266 // The buffer already contains data equals or newer than media.
267 #if GEN_BUF_ALLOC_DEBUG
268 printf("already up-to-date.\n");
269 #endif
270 goto exit;
271 }
272
273 // Cache is disabled or buffer wasn't found, read data from media
274 iErr = raw_readwrite_read_mount(psBuf->psVnode,
275 psBuf->uPhyCluster,
276 HFSTOVCB(psBuf->psVnode->sFSParams.vnfs_mp->psHfsmount)->hfs_physical_block_size,
277 psBuf->pvData,
278 psBuf->uDataSize,
279 &uActuallyRead,
280 &uReadStartCluster);
281
282 if ( iErr == 0 ) {
283 psBuf->uValidBytes = (uint32_t)uActuallyRead;
284 lf_hfs_generic_buf_set_cache_flag(psBuf, GEN_BUF_IS_UPTODATE);
285
286 #if GEN_BUF_ALLOC_DEBUG
287 uint32_t *puData = psBuf->pvData;
288 printf("Success. uPhyCluster %llu, Data: 0x%x, 0x%x, 0x%x, 0x%x\n", psBuf->uPhyCluster, puData[0], puData[2], puData[2], puData[3]);
289 #endif
290
291 } else {
292
293 #if GEN_BUF_ALLOC_DEBUG
294 printf("Error. uPhyCluster %llu, iErr %d.\n", psBuf->uPhyCluster, iErr);
295 #endif
296 }
297 exit:
298 lf_hfs_generic_buf_unlock(psBuf);
299 return iErr;
300 }
301
302 errno_t lf_hfs_generic_buf_write( GenericLFBufPtr psBuf ) {
303 errno_t iErr = 0;
304
305 lf_hfs_generic_buf_lock(psBuf);
306
307 #if GEN_BUF_ALLOC_DEBUG
308 printf("lf_hfs_generic_buf_write: psBuf %p psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n", psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uFlags, psBuf->uPhyCluster, psBuf->uUseCnt);
309 uint32_t *puData = psBuf->pvData;
310 printf("psBuf uPhyCluster %llu, Data: 0x%x, 0x%x, 0x%x, 0x%x\n", psBuf->uPhyCluster, puData[0], puData[2], puData[2], puData[3]);
311 #endif
312
313 assert(psBuf->uUseCnt != 0);
314 assert(!(psBuf->uCacheFlags & GEN_BUF_WRITE_LOCK));
315 assert(psBuf->sOwnerThread == pthread_self());
316
317 iErr = raw_readwrite_write_mount(psBuf->psVnode,
318 psBuf->uPhyCluster,
319 HFSTOVCB(psBuf->psVnode->sFSParams.vnfs_mp->psHfsmount)->hfs_physical_block_size,
320 psBuf->pvData,
321 psBuf->uDataSize,
322 NULL, NULL);
323
324 lf_hfs_generic_buf_unlock(psBuf);
325 return iErr;
326 }
327
328 void lf_hfs_generic_buf_clear( GenericLFBufPtr psBuf ) {
329 memset(psBuf->pvData,0,sizeof(psBuf->uDataSize));
330 }
331
332 void lf_hfs_generic_buf_invalidate( GenericLFBuf *psBuf ) {
333 struct buf_cache_entry *psCacheEntry;
334
335 #if GEN_BUF_ALLOC_DEBUG
336 printf("lf_hfs_generic_buf_invalidate: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
337 psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt);
338 #endif
339
340 lf_hfs_generic_buf_lock(psBuf);
341 lf_hfs_generic_buf_rele(psBuf);
342
343 assert(psBuf->uUseCnt == 0);
344 assert(psBuf->sOwnerThread == NULL);
345
346 // Check buffer cache, if a memory buffer already allocated for this physical block
347 if ( buf_cache_state && !(psBuf->uCacheFlags & GEN_BUF_NON_CACHED)) {
348
349 lf_lck_mtx_lock(&buf_cache_mutex);
350 psCacheEntry = lf_hfs_generic_buf_cache_find_gen_buf(psBuf);
351
352 if (psCacheEntry) {
353 lf_hfs_generic_buf_cache_remove(psCacheEntry);
354 } else {
355 panic("A buffer is marked Cached, but was not found in Cache");
356 }
357
358 lf_lck_mtx_unlock(&buf_cache_mutex);
359
360 } else {
361 // This is a non-cached buffer
362 gCacheStat.gen_buf_uncached--;
363 lf_hfs_generic_buf_unlock(psBuf);
364 lf_cond_destroy(&psBuf->sOwnerCond);
365 lf_lck_mtx_destroy(&psBuf->sLock);
366 hfs_free(psBuf->pvData);
367 hfs_free(psBuf);
368 }
369 }
370
371 void lf_hfs_generic_buf_ref(GenericLFBuf *psBuf) {
372 lf_hfs_generic_buf_lock(psBuf);
373 assert(psBuf->sOwnerThread == pthread_self());
374 psBuf->uUseCnt++;
375 lf_hfs_generic_buf_unlock(psBuf);
376 }
377
378 int lf_hfs_generic_buf_validate_owner(GenericLFBuf *psBuf) {
379
380 return(psBuf->sOwnerThread == pthread_self());
381 }
382
383 void lf_hfs_generic_buf_rele(GenericLFBuf *psBuf) {
384 lf_hfs_generic_buf_lock(psBuf);
385 assert(psBuf->uUseCnt != 0);
386 assert(psBuf->sOwnerThread == pthread_self());
387 psBuf->uUseCnt--;
388 if (psBuf->uUseCnt == 0) {
389 psBuf->sOwnerThread = NULL;
390 lf_cond_wakeup(&psBuf->sOwnerCond);
391 }
392 lf_hfs_generic_buf_unlock(psBuf);
393 }
394
395 void lf_hfs_generic_buf_lock(GenericLFBufPtr psBuf) {
396 #if GEN_BUF_ALLOC_DEBUG
397 printf("lf_hfs_generic_buf_lock: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
398 psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt);
399 #endif
400
401 if (psBuf->pLockingThread == pthread_self()) {
402 psBuf->uLockCnt++;
403 } else {
404 lf_lck_mtx_lock(&psBuf->sLock);
405 assert(psBuf->uLockCnt == 0);
406 psBuf->uLockCnt = 1;
407 psBuf->pLockingThread = pthread_self();
408 }
409 }
410
411 void lf_hfs_generic_buf_unlock(GenericLFBufPtr psBuf) {
412 #if GEN_BUF_ALLOC_DEBUG
413 printf("lf_hfs_generic_buf_unlock: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
414 psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt);
415 #endif
416
417 assert(psBuf->pLockingThread == pthread_self());
418 assert(psBuf->uLockCnt);
419
420 psBuf->uLockCnt--;
421 if (!psBuf->uLockCnt) {
422 psBuf->pLockingThread = NULL;
423 lf_lck_mtx_unlock(&psBuf->sLock);
424 }
425 }
426
427 void lf_hfs_generic_buf_set_cache_flag(GenericLFBufPtr psBuf, uint64_t uCacheFlags) {
428 lf_hfs_generic_buf_lock(psBuf);
429 psBuf->uCacheFlags |= uCacheFlags;
430 lf_hfs_generic_buf_unlock(psBuf);
431 }
432
433 void lf_hfs_generic_buf_clear_cache_flag(GenericLFBufPtr psBuf, uint64_t uCacheFlags) {
434 lf_hfs_generic_buf_lock(psBuf);
435 psBuf->uCacheFlags &= ~uCacheFlags;
436 lf_hfs_generic_buf_unlock(psBuf);
437 }
438
439 static void lf_hfs_buf_free_unused()
440 {
441 //We want to free more then we actually need, so that we won't have to come here every new buf that we allocate
442 while ( gCacheStat.buf_cache_size > BUF_CACHE_MAX_ENTRIES_LOWER_LIMIT ||
443 gCacheStat.buf_total_allocated_size > BUF_CACHE_MAX_DATA_LOWER_LIMIT)
444 {
445 struct buf_cache_entry *last;
446
447 last = TAILQ_LAST(&buf_cache_list, buf_cache_head);
448
449 if (!last) {
450 break;
451 }
452
453 lf_hfs_generic_buf_lock(&last->sBuf);
454
455 if ((last->sBuf.uUseCnt) || (last->sBuf.uCacheFlags & GEN_BUF_WRITE_LOCK)) {
456 // Last buffer in buffer cache is in use.
457 // Nothing more to free
458 lf_hfs_generic_buf_unlock(&last->sBuf);
459 break;
460 }
461
462 ++gCacheStat.buf_cache_cleanup;
463 lf_hfs_generic_buf_cache_remove(last);
464 }
465 }
466
467 void lf_hfs_generic_buf_release( GenericLFBufPtr psBuf )
468 {
469 #if GEN_BUF_ALLOC_DEBUG
470 printf("lf_hfs_generic_buf_release: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
471 psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt);
472 #endif
473
474 if (!psBuf) {
475 return;
476 }
477
478 lf_hfs_generic_buf_rele(psBuf);
479
480 // If Unused and UnCached, free.
481 if ((psBuf->uCacheFlags & GEN_BUF_NON_CACHED) && (psBuf->uUseCnt == 0)) {
482 // Buffer not in cache - free it
483 gCacheStat.gen_buf_uncached--;
484 lf_cond_destroy(&psBuf->sOwnerCond);
485 lf_lck_mtx_destroy(&psBuf->sLock);
486 hfs_free(psBuf->pvData);
487 hfs_free(psBuf);
488 return;
489 }
490
491 // Cleanup unused entries in the cache
492 int iTry = lf_lck_mtx_try_lock(&buf_cache_mutex);
493 if (iTry) {
494 return;
495 }
496
497 //We want to free more then we actually need, so that we won't have to come here every new buf that we allocate
498 lf_hfs_buf_free_unused();
499 lf_lck_mtx_unlock(&buf_cache_mutex);
500 }
501
502 // Buffer Cache functions
503
504 void lf_hfs_generic_buf_cache_init( void ) {
505 gCacheStat.buf_cache_size = 0;
506 gCacheStat.max_gen_buf_uncached = 0;
507 gCacheStat.gen_buf_uncached = 0;
508 lf_lck_mtx_init(&buf_cache_mutex);
509 TAILQ_INIT(&buf_cache_list);
510 buf_cache_state = true;
511 }
512
513 void lf_hfs_generic_buf_cache_deinit( void )
514 {
515 lf_hfs_generic_buf_cache_remove_all(IGNORE_MOUNT_FD);
516
517 assert(gCacheStat.buf_cache_size == 0);
518 assert(gCacheStat.gen_buf_uncached == 0);
519
520 buf_cache_state = false;
521 lf_lck_mtx_destroy(&buf_cache_mutex);
522 }
523
524 void lf_hfs_generic_buf_cache_clear_by_iFD( int iFD )
525 {
526 lf_hfs_generic_buf_cache_remove_all(iFD);
527 }
528
529 boolean_t lf_hfs_generic_buf_match_range( struct buf_cache_entry *entry, GenericLFBufPtr psBuf )
530 {
531 if ( VTOF(entry->sBuf.psVnode) != VTOF(psBuf->psVnode) )
532 {
533 return false;
534 }
535
536 uint64_t size_1 = entry->sBuf.uDataSize;
537 uint64_t start_1 = entry->sBuf.uBlockN * size_1;
538 uint64_t end_1 = start_1 + size_1 - 1;
539 uint64_t size_2 = psBuf->uDataSize;
540 uint64_t start_2 = psBuf->uBlockN * size_2;
541 uint64_t end_2 = start_2 + size_2 - 1;
542
543 enum rl_overlaptype overlap;
544 struct rl_entry entry_range = {.rl_start = start_1, .rl_end = end_1};
545
546 overlap = rl_overlap(&entry_range, start_2, end_2);
547
548 switch (overlap)
549 {
550 case RL_MATCHINGOVERLAP:
551 return true;
552 case RL_OVERLAPCONTAINSRANGE:
553 // Make sure we have same start though
554 assert(start_1 == start_2);
555 return true;
556 case RL_NOOVERLAP:
557 case RL_OVERLAPISCONTAINED:
558 return false;
559 case RL_OVERLAPSTARTSBEFORE:
560 case RL_OVERLAPENDSAFTER:
561 LFHFS_LOG(LEVEL_ERROR, " lf_hfs_generic_buf_match_range : cache overlap [%d]", overlap);
562 assert(0);
563 }
564 }
565
566 struct buf_cache_entry * lf_hfs_generic_buf_cache_find( GenericLFBufPtr psBuf )
567 {
568 struct buf_cache_entry *entry, *entry_next;
569
570 TAILQ_FOREACH_SAFE(entry, &buf_cache_list, buf_cache_link, entry_next)
571 {
572 if ( lf_hfs_generic_buf_match_range(entry, psBuf) )
573 {
574 break;
575 }
576 }
577
578 return entry;
579 }
580
581 // Run the function pfCallback on all buffers that belongs to node psVnode.
582 int lf_hfs_generic_buf_write_iterate(vnode_t psVnode, IterateCallback pfCallback, uint32_t uFlags, void *pvArgs) {
583
584 struct buf_cache_entry *psCacheEntry, *psNextCacheEntry;
585 int iFD = VNODE_TO_IFD(psVnode);
586
587 TAILQ_FOREACH_SAFE(psCacheEntry, &buf_cache_list, buf_cache_link, psNextCacheEntry) {
588 int iEntryFD = VNODE_TO_IFD(psCacheEntry->sBuf.psVnode);
589
590 if ( (iFD == iEntryFD) && (psCacheEntry->sBuf.psVnode == psVnode)) {
591 if ((uFlags & BUF_SKIP_LOCKED) && (psCacheEntry->sBuf.uCacheFlags & GEN_BUF_WRITE_LOCK)) {
592 continue;
593 }
594 if ((uFlags & BUF_SKIP_NONLOCKED) && !(psCacheEntry->sBuf.uCacheFlags & GEN_BUF_WRITE_LOCK)) {
595 continue;
596 }
597 pfCallback(&psCacheEntry->sBuf, pvArgs);
598 }
599 }
600 return(0);
601 }
602
603
604 struct buf_cache_entry *lf_hfs_generic_buf_cache_find_by_phy_cluster(int iFD, uint64_t uPhyCluster, uint64_t uBlockSize) {
605
606 struct buf_cache_entry *psCacheEntry, *psNextCacheEntry;
607
608 TAILQ_FOREACH_SAFE(psCacheEntry, &buf_cache_list, buf_cache_link, psNextCacheEntry) {
609 if (psCacheEntry->sBuf.psVnode)
610 {
611 int iEntryFD = VNODE_TO_IFD(psCacheEntry->sBuf.psVnode);
612 if ( (psCacheEntry->sBuf.uPhyCluster == uPhyCluster) &&
613 (iEntryFD == iFD ) &&
614 (psCacheEntry->sBuf.uDataSize >= uBlockSize ) ) {
615 break;
616 }
617 }
618 else
619 {
620 LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_cache_find_by_phy_cluster: got buf with vnode == NULL, cache_flags: 0x%llx, uUseCnt %d", psCacheEntry->sBuf.uCacheFlags, psCacheEntry->sBuf.uUseCnt);
621 assert(0);
622 }
623
624 }
625 return psCacheEntry;
626 }
627
628 struct buf_cache_entry *lf_hfs_generic_buf_cache_find_gen_buf(GenericLFBufPtr psBuf) {
629
630 struct buf_cache_entry *psCacheEntry, *psNextCacheEntry;
631
632 TAILQ_FOREACH_SAFE(psCacheEntry, &buf_cache_list, buf_cache_link, psNextCacheEntry) {
633 if ( &psCacheEntry->sBuf == psBuf ) {
634 break;
635 }
636 }
637 return psCacheEntry;
638 }
639
640 GenericLFBufPtr lf_hfs_generic_buf_cache_add( GenericLFBufPtr psBuf )
641 {
642 struct buf_cache_entry *entry;
643
644 //Check if we have enough space to alloc this buffer, unless need to evict something
645 if (gCacheStat.buf_total_allocated_size + psBuf->uDataSize > BUF_CACHE_MAX_DATA_UPPER_LIMIT ||
646 gCacheStat.buf_cache_size + 1 == BUF_CACHE_MAX_ENTRIES_UPPER_LIMIT)
647 {
648 lf_hfs_buf_free_unused();
649 }
650
651 entry = hfs_mallocz(sizeof(*entry));
652 if (!entry) {
653 goto error;
654 }
655
656 memcpy(&entry->sBuf, (void*)psBuf, sizeof(*psBuf));
657 entry->sBuf.uCacheFlags &= ~GEN_BUF_NON_CACHED;
658
659 entry->sBuf.pvData = hfs_mallocz(psBuf->uDataSize);
660 if (!entry->sBuf.pvData) {
661 goto error;
662 }
663
664 TAILQ_INSERT_HEAD(&buf_cache_list, entry, buf_cache_link);
665
666 gCacheStat.buf_cache_size++;
667 gCacheStat.buf_total_allocated_size+=psBuf->uDataSize;
668
669 if (gCacheStat.buf_cache_size > gCacheStat.max_buf_cache_size) {
670 gCacheStat.max_buf_cache_size = gCacheStat.buf_cache_size;
671 }
672
673 return(&entry->sBuf);
674
675 error:
676 if (entry) {
677 if (entry->sBuf.pvData) {
678 hfs_free(entry->sBuf.pvData);
679 }
680 hfs_free(entry);
681 }
682 return(NULL);
683 }
684
685 void lf_hfs_generic_buf_cache_update( GenericLFBufPtr psBuf )
686 {
687 struct buf_cache_entry *entry;
688
689 #if GEN_BUF_ALLOC_DEBUG
690 printf("lf_hfs_generic_buf_cache_update: psBuf %p\n", psBuf);
691 #endif
692
693 // Check that cache entry still exists and hasn't thrown away
694 entry = lf_hfs_generic_buf_cache_find(psBuf);
695 if (!entry) {
696 return;
697 }
698
699 TAILQ_REMOVE(&buf_cache_list, entry, buf_cache_link);
700 TAILQ_INSERT_HEAD(&buf_cache_list, entry, buf_cache_link);
701 }
702
703 void lf_hfs_generic_buf_cache_copy( struct buf_cache_entry *entry, __unused GenericLFBufPtr psBuf )
704 {
705 #if GEN_BUF_ALLOC_DEBUG
706 printf("lf_hfs_generic_buf_cache_copy: psBuf %p\n", psBuf);
707 #endif
708
709 TAILQ_REMOVE(&buf_cache_list, entry, buf_cache_link);
710 TAILQ_INSERT_HEAD(&buf_cache_list, entry, buf_cache_link);
711 }
712
713 void lf_hfs_generic_buf_cache_remove( struct buf_cache_entry *entry ) {
714
715 if (entry->sBuf.uUseCnt != 0) {
716 LFHFS_LOG(LEVEL_ERROR, "lf_hfs_generic_buf_cache_remove: remove buffer %p with uUseCnt %u", &entry->sBuf, entry->sBuf.uUseCnt);
717 }
718
719 #if GEN_BUF_ALLOC_DEBUG
720 GenericLFBuf *psBuf = &entry->sBuf;
721 printf("lf_hfs_generic_buf_cache_remove: psBuf %p, psVnode %p, uBlockN %llu, uDataSize %u, uFlags 0x%llx, uPhyCluster %llu, uUseCnt %u\n",
722 psBuf, psBuf->psVnode, psBuf->uBlockN, psBuf->uDataSize, psBuf->uCacheFlags, psBuf->uPhyCluster, psBuf->uUseCnt);
723 #endif
724
725 TAILQ_REMOVE(&buf_cache_list, entry, buf_cache_link);
726 --gCacheStat.buf_cache_size;
727 ++gCacheStat.buf_cache_remove;
728 gCacheStat.buf_total_allocated_size -= entry->sBuf.uDataSize;
729
730 assert(entry->sBuf.uLockCnt == 1);
731
732 lf_lck_mtx_unlock(&entry->sBuf.sLock);
733 lf_cond_destroy(&entry->sBuf.sOwnerCond);
734 lf_lck_mtx_destroy(&entry->sBuf.sLock);
735
736 hfs_free(entry->sBuf.pvData);
737 hfs_free(entry);
738 }
739
740 void lf_hfs_generic_buf_cache_remove_all( int iFD ) {
741 struct buf_cache_entry *entry, *entry_next;
742
743 lf_lck_mtx_lock(&buf_cache_mutex);
744
745 TAILQ_FOREACH_SAFE(entry, &buf_cache_list, buf_cache_link, entry_next)
746 {
747 if ( (iFD == IGNORE_MOUNT_FD) || ( VNODE_TO_IFD(entry->sBuf.psVnode) == iFD ) )
748 {
749 if (iFD == IGNORE_MOUNT_FD) {
750 // Media no longer available, force remove all
751 TAILQ_REMOVE(&buf_cache_list, entry, buf_cache_link);
752 --gCacheStat.buf_cache_size;
753 ++gCacheStat.buf_cache_remove;
754 gCacheStat.buf_total_allocated_size -= entry->sBuf.uDataSize;
755 } else {
756 lf_hfs_generic_buf_lock(&entry->sBuf);
757 lf_hfs_generic_buf_cache_remove(entry);
758 }
759 }
760 }
761
762 lf_lck_mtx_unlock(&buf_cache_mutex);
763 }
764
765 /* buf_cache_mutex Should get locked from the caller using lf_hfs_generic_buf_cache_LockBufCache*/
766 void lf_hfs_generic_buf_cache_remove_vnode(vnode_t vp) {
767
768 struct buf_cache_entry *entry, *entry_next;
769
770 #if GEN_BUF_ALLOC_DEBUG
771 printf("lf_hfs_generic_buf_cache_remove_vnode: vp %p: ", vp);
772 #endif
773
774 TAILQ_FOREACH_SAFE(entry, &buf_cache_list, buf_cache_link, entry_next) {
775
776 if ( entry->sBuf.psVnode == vp ) {
777
778 #if GEN_BUF_ALLOC_DEBUG
779 printf("&sBuf %p, ", &entry->sBuf);
780 #endif
781
782 lf_hfs_generic_buf_lock(&entry->sBuf);
783 lf_hfs_generic_buf_cache_remove(entry);
784 }
785 }
786
787 #if GEN_BUF_ALLOC_DEBUG
788 printf("Done.\n");
789 #endif
790 }
791
792 void lf_hfs_generic_buf_cache_LockBufCache(void)
793 {
794 lf_lck_mtx_lock(&buf_cache_mutex);
795 }
796
797 void lf_hfs_generic_buf_cache_UnLockBufCache(void)
798 {
799 lf_lck_mtx_unlock(&buf_cache_mutex);
800 }