summaryrefslogtreecommitdiffstats
path: root/src/server/net.c
diff options
context:
space:
mode:
authorSimon Rettberg2020-03-03 16:46:36 +0100
committerSimon Rettberg2020-03-03 16:48:26 +0100
commit49a9cd2d89dd586db5e08c9d3e96b88a8e8346d7 (patch)
tree25022482e9ea992a1e42a23cc6c66b09f003aee8 /src/server/net.c
parent[SERVER] Expose image->problem bools as bitmask in RPC json data (diff)
downloaddnbd3-49a9cd2d89dd586db5e08c9d3e96b88a8e8346d7.tar.gz
dnbd3-49a9cd2d89dd586db5e08c9d3e96b88a8e8346d7.tar.xz
dnbd3-49a9cd2d89dd586db5e08c9d3e96b88a8e8346d7.zip
[SERVER] Optimize client handler for CMD_GET_BLOCK
Move CMD_GET_BLOCK out of switch block and mark as likely. Don't acquire and release cache map for every single request, but keep reference around and only release when a message other than CMD_GET_BLOCK arrives. On idle links, this should happen through CMD_KEEPALIVE every now and then.
Diffstat (limited to 'src/server/net.c')
-rw-r--r--src/server/net.c68
1 files changed, 43 insertions, 25 deletions
diff --git a/src/server/net.c b/src/server/net.c
index a478e0c..0f7e169 100644
--- a/src/server/net.c
+++ b/src/server/net.c
@@ -207,6 +207,7 @@ void* net_handleNewConnection(void *clientPtr)
dnbd3_reply_t reply;
dnbd3_image_t *image = NULL;
+ dnbd3_cache_map_t *cache = NULL;
int image_file = -1;
int num;
@@ -315,9 +316,8 @@ void* net_handleNewConnection(void *clientPtr)
// client handling mainloop
while ( recv_request_header( client->sock, &request ) ) {
if ( _shutdown ) break;
- switch ( request.cmd ) {
+ if ( likely ( request.cmd == CMD_GET_BLOCK ) ) {
- case CMD_GET_BLOCK:;
const uint64_t offset = request.offset_small; // Copy to full uint64 to prevent repeated masking
reply.handle = request.handle;
if ( unlikely( offset >= image->virtualFilesize ) ) {
@@ -326,7 +326,7 @@ void* net_handleNewConnection(void *clientPtr)
reply.size = 0;
reply.cmd = CMD_ERROR;
send_reply( client->sock, &reply, NULL );
- break;
+ continue;
}
if ( unlikely( offset + request.size > image->virtualFilesize ) ) {
// Sanity check
@@ -334,11 +334,14 @@ void* net_handleNewConnection(void *clientPtr)
reply.size = 0;
reply.cmd = CMD_ERROR;
send_reply( client->sock, &reply, NULL );
- break;
+ continue;
+ }
+
+ if ( cache == NULL && image->uplinkref != NULL ) {
+ cache = ref_get_cachemap( image );
}
- dnbd3_cache_map_t *cache;
- if ( request.size != 0 && ( cache = ref_get_cachemap( image ) ) != NULL ) {
+ if ( request.size != 0 && cache != NULL ) {
// This is a proxyed image, check if we need to relay the request...
start = offset & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1);
end = (offset + request.size + DNBD3_BLOCK_SIZE - 1) & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1);
@@ -360,36 +363,39 @@ void* net_handleNewConnection(void *clientPtr)
// First byte
if ( isCached ) {
b = atomic_load_explicit( &cache->map[firstByteInMap], memory_order_relaxed );
- for ( pos = start; firstByteInMap == (pos >> 15) && pos < end; pos += DNBD3_BLOCK_SIZE ) {
- const int map_x = (pos >> 12) & 7; // mod 8
- const uint8_t bit_mask = (uint8_t)( 1 << map_x );
- if ( (b & bit_mask) == 0 ) {
- isCached = false;
- break;
+ if ( b != 0xff ) {
+ for ( pos = start; firstByteInMap == (pos >> 15) && pos < end; pos += DNBD3_BLOCK_SIZE ) {
+ const int map_x = (pos >> 12) & 7; // mod 8
+ const uint8_t bit_mask = (uint8_t)( 1 << map_x );
+ if ( (b & bit_mask) == 0 ) {
+ isCached = false;
+ break;
+ }
}
}
}
// Last byte - only check if request spans multiple bytes in cache map
if ( isCached && firstByteInMap != lastByteInMap ) {
b = atomic_load_explicit( &cache->map[lastByteInMap], memory_order_relaxed );
- for ( pos = lastByteInMap << 15; pos < end; pos += DNBD3_BLOCK_SIZE ) {
- assert( lastByteInMap == (pos >> 15) );
- const int map_x = (pos >> 12) & 7; // mod 8
- const uint8_t bit_mask = (uint8_t)( 1 << map_x );
- if ( (b & bit_mask) == 0 ) {
- isCached = false;
- break;
+ if ( b != 0xff ) {
+ for ( pos = lastByteInMap << 15; pos < end; pos += DNBD3_BLOCK_SIZE ) {
+ assert( lastByteInMap == (pos >> 15) );
+ const int map_x = (pos >> 12) & 7; // mod 8
+ const uint8_t bit_mask = (uint8_t)( 1 << map_x );
+ if ( (b & bit_mask) == 0 ) {
+ isCached = false;
+ break;
+ }
}
}
}
- ref_put( &cache->reference );
if ( !isCached ) {
if ( !uplink_request( client, request.handle, offset, request.size, request.hops ) ) {
logadd( LOG_DEBUG1, "Could not relay uncached request from %s to upstream proxy for image %s:%d",
client->hostName, image->name, image->rid );
goto exit_client_cleanup;
}
- break; // DONE, exit request.cmd switch
+ continue; // Reply arrives on uplink some time later, handle next request now
}
}
@@ -474,7 +480,16 @@ void* net_handleNewConnection(void *clientPtr)
if ( lock ) mutex_unlock( &client->sendMutex );
// Global per-client counter
client->bytesSent += request.size; // Increase counter for statistics.
- break;
+ continue;
+ }
+ // Any other command
+ // Release cache map every now and then, in case the image was replicated
+ // entirely. Will be re-grabbed on next CMD_GET_BLOCK otherwise.
+ if ( cache != NULL ) {
+ ref_put( &cache->reference );
+ cache = NULL;
+ }
+ switch ( request.cmd ) {
case CMD_GET_SERVERS:
// Build list of known working alt servers
@@ -523,9 +538,9 @@ set_name: ;
logadd( LOG_ERROR, "Unknown command from client %s: %d", client->hostName, (int)request.cmd );
break;
- }
- }
- }
+ } // end switch
+ } // end loop
+ } // end bOk
exit_client_cleanup: ;
// First remove from list, then add to counter to prevent race condition
removeFromList( client );
@@ -536,6 +551,9 @@ exit_client_cleanup: ;
timing_get( &image->atime );
mutex_unlock( &image->lock );
}
+ if ( cache != NULL ) {
+ ref_put( &cache->reference );
+ }
freeClientStruct( client ); // This will also call image_release on client->image
return NULL ;
fail_preadd: ;