From 5c92010d74451a46064e85484a6969a8a2f2cf82 Mon Sep 17 00:00:00 2001 From: Simon Rettberg Date: Wed, 4 Mar 2020 12:17:40 +0100 Subject: [SERVER] Likewise, get rid of same loops in client handler --- src/server/image.c | 32 +++++++++++++++--------------- src/server/net.c | 58 ++++++++++++++++++++++-------------------------------- 2 files changed, 39 insertions(+), 51 deletions(-) diff --git a/src/server/image.c b/src/server/image.c index 886bf33..3583f86 100644 --- a/src/server/image.c +++ b/src/server/image.c @@ -121,8 +121,15 @@ void image_updateCachemap(dnbd3_image_t *image, uint64_t start, uint64_t end, co // First and last byte masks const uint8_t fb = (uint8_t)(0xff << ((start >> 12) & 7)); const uint8_t lb = (uint8_t)(~(0xff << ((((end - 1) >> 12) & 7) + 1))); - atomic_thread_fence( memory_order_acquire ); - if ( firstByteInMap != lastByteInMap ) { + if ( firstByteInMap == lastByteInMap ) { + if ( set ) { + uint8_t o = atomic_fetch_or( &cache->map[firstByteInMap], (uint8_t)(fb & lb) ); + setNewBlocks = o != ( o | (fb & lb) ); + } else { + atomic_fetch_and( &cache->map[firstByteInMap], (uint8_t)~(fb & lb) ); + } + } else { + atomic_thread_fence( memory_order_acquire ); if ( set ) { uint8_t fo = atomic_fetch_or_explicit( &cache->map[firstByteInMap], fb, memory_order_relaxed ); uint8_t lo = atomic_fetch_or_explicit( &cache->map[lastByteInMap], lb, memory_order_relaxed ); @@ -131,22 +138,15 @@ void image_updateCachemap(dnbd3_image_t *image, uint64_t start, uint64_t end, co atomic_fetch_and_explicit( &cache->map[firstByteInMap], (uint8_t)~fb, memory_order_relaxed ); atomic_fetch_and_explicit( &cache->map[lastByteInMap], (uint8_t)~lb, memory_order_relaxed ); } - } else { - if ( set ) { - uint8_t o = atomic_fetch_or_explicit( &cache->map[firstByteInMap], (uint8_t)(fb & lb), memory_order_relaxed ); - setNewBlocks = o != ( o | (fb & lb) ); - } else { - atomic_fetch_and_explicit( &cache->map[firstByteInMap], (uint8_t)~(fb & lb), memory_order_relaxed ); - } - } - const uint8_t nval = set ? 0xff : 0; - // Everything in between - for ( pos = firstByteInMap + 1; pos < lastByteInMap; ++pos ) { - if ( atomic_exchange_explicit( &cache->map[pos], nval, memory_order_relaxed ) != nval && set ) { - setNewBlocks = true; + // Everything in between + const uint8_t nval = set ? 0xff : 0; + for ( pos = firstByteInMap + 1; pos < lastByteInMap; ++pos ) { + if ( atomic_exchange_explicit( &cache->map[pos], nval, memory_order_relaxed ) != nval && set ) { + setNewBlocks = true; + } } + atomic_thread_fence( memory_order_release ); } - atomic_thread_fence( memory_order_release ); if ( setNewBlocks && image->crc32 != NULL ) { // If setNewBlocks is set, at least one of the blocks was not cached before, so queue all hash blocks // for checking, even though this might lead to checking some hash block again, if it was diff --git a/src/server/net.c b/src/server/net.c index 0f7e169..01056e0 100644 --- a/src/server/net.c +++ b/src/server/net.c @@ -216,7 +216,6 @@ void* net_handleNewConnection(void *clientPtr) serialized_buffer_t payload; uint16_t rid, client_version; - uint64_t start, end; dnbd3_server_entry_t server_list[NUMBER_SERVERS]; @@ -343,46 +342,35 @@ void* net_handleNewConnection(void *clientPtr) if ( request.size != 0 && cache != NULL ) { // This is a proxyed image, check if we need to relay the request... - start = offset & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1); - end = (offset + request.size + DNBD3_BLOCK_SIZE - 1) & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1); - bool isCached = true; + const uint64_t start = offset & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1); + const uint64_t end = (offset + request.size + DNBD3_BLOCK_SIZE - 1) & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1); const uint64_t firstByteInMap = start >> 15; const uint64_t lastByteInMap = (end - 1) >> 15; + const uint8_t fb = (uint8_t)(0xff << ((start >> 12) & 7)); + const uint8_t lb = (uint8_t)(~(0xff << ((((end - 1) >> 12) & 7) + 1))); uint64_t pos; uint8_t b; - atomic_thread_fence( memory_order_acquire ); - // Middle - quick checking - if ( isCached ) { - for ( pos = firstByteInMap + 1; pos < lastByteInMap; ++pos ) { - if ( atomic_load_explicit( &cache->map[pos], memory_order_relaxed ) != 0xff ) { - isCached = false; - break; - } + bool isCached; + if ( firstByteInMap == lastByteInMap ) { // Single byte to check, much simpler + b = cache->map[firstByteInMap]; + isCached = ( b & ( fb & lb ) ) == ( fb & lb ); + } else { + isCached = true; + atomic_thread_fence( memory_order_acquire ); + // First byte + if ( isCached ) { + b = atomic_load_explicit( &cache->map[firstByteInMap], memory_order_relaxed ); + isCached = ( ( b & fb ) == fb ); } - } - // First byte - if ( isCached ) { - b = atomic_load_explicit( &cache->map[firstByteInMap], memory_order_relaxed ); - if ( b != 0xff ) { - for ( pos = start; firstByteInMap == (pos >> 15) && pos < end; pos += DNBD3_BLOCK_SIZE ) { - const int map_x = (pos >> 12) & 7; // mod 8 - const uint8_t bit_mask = (uint8_t)( 1 << map_x ); - if ( (b & bit_mask) == 0 ) { - isCached = false; - break; - } - } + // Last byte + if ( isCached ) { + b = atomic_load_explicit( &cache->map[lastByteInMap], memory_order_relaxed ); + isCached = ( ( b & lb ) == lb ); } - } - // Last byte - only check if request spans multiple bytes in cache map - if ( isCached && firstByteInMap != lastByteInMap ) { - b = atomic_load_explicit( &cache->map[lastByteInMap], memory_order_relaxed ); - if ( b != 0xff ) { - for ( pos = lastByteInMap << 15; pos < end; pos += DNBD3_BLOCK_SIZE ) { - assert( lastByteInMap == (pos >> 15) ); - const int map_x = (pos >> 12) & 7; // mod 8 - const uint8_t bit_mask = (uint8_t)( 1 << map_x ); - if ( (b & bit_mask) == 0 ) { + // Middle, must be all bits set (0xff) + if ( isCached ) { + for ( pos = firstByteInMap + 1; pos < lastByteInMap; ++pos ) { + if ( atomic_load_explicit( &cache->map[pos], memory_order_relaxed ) != 0xff ) { isCached = false; break; } -- cgit v1.2.3-55-g7522