summaryrefslogtreecommitdiffstats
path: root/src/server/image.c
diff options
context:
space:
mode:
authorSimon Rettberg2020-03-04 12:17:40 +0100
committerSimon Rettberg2020-03-04 12:17:40 +0100
commit5c92010d74451a46064e85484a6969a8a2f2cf82 (patch)
treeb2dd3cb72cb12f8d7311c31a4743cd345b8c75c8 /src/server/image.c
parent[SERVER] Get rid of two loops in image_updateCacheMap (diff)
downloaddnbd3-5c92010d74451a46064e85484a6969a8a2f2cf82.tar.gz
dnbd3-5c92010d74451a46064e85484a6969a8a2f2cf82.tar.xz
dnbd3-5c92010d74451a46064e85484a6969a8a2f2cf82.zip
[SERVER] Likewise, get rid of same loops in client handler
Diffstat (limited to 'src/server/image.c')
-rw-r--r--src/server/image.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/src/server/image.c b/src/server/image.c
index 886bf33..3583f86 100644
--- a/src/server/image.c
+++ b/src/server/image.c
@@ -121,8 +121,15 @@ void image_updateCachemap(dnbd3_image_t *image, uint64_t start, uint64_t end, co
// First and last byte masks
const uint8_t fb = (uint8_t)(0xff << ((start >> 12) & 7));
const uint8_t lb = (uint8_t)(~(0xff << ((((end - 1) >> 12) & 7) + 1)));
- atomic_thread_fence( memory_order_acquire );
- if ( firstByteInMap != lastByteInMap ) {
+ if ( firstByteInMap == lastByteInMap ) {
+ if ( set ) {
+ uint8_t o = atomic_fetch_or( &cache->map[firstByteInMap], (uint8_t)(fb & lb) );
+ setNewBlocks = o != ( o | (fb & lb) );
+ } else {
+ atomic_fetch_and( &cache->map[firstByteInMap], (uint8_t)~(fb & lb) );
+ }
+ } else {
+ atomic_thread_fence( memory_order_acquire );
if ( set ) {
uint8_t fo = atomic_fetch_or_explicit( &cache->map[firstByteInMap], fb, memory_order_relaxed );
uint8_t lo = atomic_fetch_or_explicit( &cache->map[lastByteInMap], lb, memory_order_relaxed );
@@ -131,22 +138,15 @@ void image_updateCachemap(dnbd3_image_t *image, uint64_t start, uint64_t end, co
atomic_fetch_and_explicit( &cache->map[firstByteInMap], (uint8_t)~fb, memory_order_relaxed );
atomic_fetch_and_explicit( &cache->map[lastByteInMap], (uint8_t)~lb, memory_order_relaxed );
}
- } else {
- if ( set ) {
- uint8_t o = atomic_fetch_or_explicit( &cache->map[firstByteInMap], (uint8_t)(fb & lb), memory_order_relaxed );
- setNewBlocks = o != ( o | (fb & lb) );
- } else {
- atomic_fetch_and_explicit( &cache->map[firstByteInMap], (uint8_t)~(fb & lb), memory_order_relaxed );
- }
- }
- const uint8_t nval = set ? 0xff : 0;
- // Everything in between
- for ( pos = firstByteInMap + 1; pos < lastByteInMap; ++pos ) {
- if ( atomic_exchange_explicit( &cache->map[pos], nval, memory_order_relaxed ) != nval && set ) {
- setNewBlocks = true;
+ // Everything in between
+ const uint8_t nval = set ? 0xff : 0;
+ for ( pos = firstByteInMap + 1; pos < lastByteInMap; ++pos ) {
+ if ( atomic_exchange_explicit( &cache->map[pos], nval, memory_order_relaxed ) != nval && set ) {
+ setNewBlocks = true;
+ }
}
+ atomic_thread_fence( memory_order_release );
}
- atomic_thread_fence( memory_order_release );
if ( setNewBlocks && image->crc32 != NULL ) {
// If setNewBlocks is set, at least one of the blocks was not cached before, so queue all hash blocks
// for checking, even though this might lead to checking some hash block again, if it was