summaryrefslogtreecommitdiffstats
path: root/src/server/integrity.c
diff options
context:
space:
mode:
authorSimon Rettberg2019-08-29 14:49:18 +0200
committerSimon Rettberg2019-08-29 14:49:18 +0200
commit88695877f085af475a6ca8a01c2fbb08eb5b15da (patch)
tree7e196a12eca25c72802c255f42dac77af13cf484 /src/server/integrity.c
parent[SERVER] reference: Fix error msg usage (diff)
downloaddnbd3-88695877f085af475a6ca8a01c2fbb08eb5b15da.tar.gz
dnbd3-88695877f085af475a6ca8a01c2fbb08eb5b15da.tar.xz
dnbd3-88695877f085af475a6ca8a01c2fbb08eb5b15da.zip
[SERVER] Use weakref for cache maps
Gets rid of a bunch of locking, especially the hot path in net.c where clients are requesting data. Many clients unsing the same incomplete image previously created a bottleneck here.
Diffstat (limited to 'src/server/integrity.c')
-rw-r--r--src/server/integrity.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/src/server/integrity.c b/src/server/integrity.c
index 1fcb558..a9fbae6 100644
--- a/src/server/integrity.c
+++ b/src/server/integrity.c
@@ -181,10 +181,12 @@ static void* integrity_main(void * data UNUSED)
const uint64_t end = MIN( (uint64_t)(blocks[0] + 1) * HASH_BLOCK_SIZE, image->virtualFilesize );
bool complete = true;
if ( qCount == CHECK_ALL ) {
- // When checking full image, skip incomplete blocks, otherwise assume block is complete
- mutex_lock( &image->lock );
- complete = image_isHashBlockComplete( image->cache_map, blocks[0], fileSize );
- mutex_unlock( &image->lock );
+ dnbd3_cache_map_t *cache = ref_get_cachemap( image );
+ if ( cache != NULL ) {
+ // When checking full image, skip incomplete blocks, otherwise assume block is complete
+ complete = image_isHashBlockComplete( cache->map, blocks[0], fileSize );
+ ref_put( &cache->reference );
+ }
}
#if defined(linux) || defined(__linux)
while ( sync_file_range( fd, start, end - start, SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER ) == -1 )