summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/bench/connection.c138
-rw-r--r--src/bench/connection.h2
-rw-r--r--src/bench/helper.h1
-rw-r--r--src/bench/main.c33
-rw-r--r--src/server/altservers.c740
-rw-r--r--src/server/altservers.h16
-rw-r--r--src/server/globals.c2
-rw-r--r--src/server/globals.h80
-rw-r--r--src/server/image.c216
-rw-r--r--src/server/integrity.c38
-rw-r--r--src/server/locks.c320
-rw-r--r--src/server/locks.h36
-rw-r--r--src/server/net.c112
-rw-r--r--src/server/net.h2
-rw-r--r--src/server/reference.c33
-rw-r--r--src/server/reference.h54
-rw-r--r--src/server/reftypes.h25
-rw-r--r--src/server/rpc.c27
-rw-r--r--src/server/server.c169
-rw-r--r--src/server/server.h2
-rw-r--r--src/server/threadpool.c137
-rw-r--r--src/server/threadpool.h5
-rw-r--r--src/server/uplink.c831
-rw-r--r--src/server/uplink.h6
-rw-r--r--src/serverconfig.h10
-rw-r--r--src/shared/sockhelper.c8
26 files changed, 1682 insertions, 1361 deletions
diff --git a/src/bench/connection.c b/src/bench/connection.c
index 129ae3c..498bc62 100644
--- a/src/bench/connection.c
+++ b/src/bench/connection.c
@@ -18,23 +18,10 @@ static const size_t SHORTBUF = 100;
#define SOCKET_KEEPALIVE_TIMEOUT (3)
#define MAX_ALTS (8)
#define MAX_HOSTS_PER_ADDRESS (2)
-// If a server wasn't reachable this many times, we slowly start skipping it on measurements
-static const int FAIL_BACKOFF_START_COUNT = 8;
#define RTT_COUNT (4)
/* Module variables */
-
-// Init guard
-static bool connectionInitDone = false;
-static bool keepRunning = true;
-
-static struct {
- int sockFd;
- pthread_mutex_t sendMutex;
- dnbd3_signal_t* panicSignal;
- dnbd3_host_t currentServer;
- uint64_t startupTime;
-} connection;
+static char trash[4096];
// Known alt servers
typedef struct _alt_server {
@@ -54,13 +41,14 @@ bool connection_init_n_times(
const char *lowerImage,
const uint16_t rid,
int ntimes,
- BenchCounters* counters,
- bool closeSockets
+ int blockSize,
+ BenchCounters* counters
) {
for (int run_i = 0; run_i < ntimes; ++run_i) {
counters->attempts++;
- printf(".");
+ putchar('.');
+ fflush(stdout);
int sock = -1;
char host[SHORTBUF];
serialized_buffer_t buffer;
@@ -68,66 +56,84 @@ bool connection_init_n_times(
char *remoteName;
uint64_t remoteSize;
- if ( !connectionInitDone && keepRunning ) {
- dnbd3_host_t tempHosts[MAX_HOSTS_PER_ADDRESS];
- const char *current, *end;
- int altIndex = 0;
- memset( altservers, 0, sizeof altservers );
- connection.sockFd = -1;
- current = hosts;
- do {
- // Get next host from string
- while ( *current == ' ' ) current++;
- end = strchr( current, ' ' );
- size_t len = (end == NULL ? SHORTBUF : (size_t)( end - current ) + 1);
- if ( len > SHORTBUF ) len = SHORTBUF;
- snprintf( host, len, "%s", current );
- int newHosts = sock_resolveToDnbd3Host( host, tempHosts, MAX_HOSTS_PER_ADDRESS );
- for ( int i = 0; i < newHosts; ++i ) {
- if ( altIndex >= MAX_ALTS )
+ dnbd3_host_t tempHosts[MAX_HOSTS_PER_ADDRESS];
+ const char *current, *end;
+ int altIndex = 0;
+ memset( altservers, 0, sizeof altservers );
+ current = hosts;
+ do {
+ // Get next host from string
+ while ( *current == ' ' ) current++;
+ end = strchr( current, ' ' );
+ size_t len = (end == NULL ? SHORTBUF : (size_t)( end - current ) + 1);
+ if ( len > SHORTBUF ) len = SHORTBUF;
+ snprintf( host, len, "%s", current );
+ int newHosts = sock_resolveToDnbd3Host( host, tempHosts, MAX_HOSTS_PER_ADDRESS );
+ for ( int i = 0; i < newHosts; ++i ) {
+ if ( altIndex >= MAX_ALTS )
+ break;
+ altservers[altIndex].host = tempHosts[i];
+ altIndex += 1;
+ }
+ current = end + 1;
+ } while ( end != NULL && altIndex < MAX_ALTS );
+ // Connect
+ for ( int i = 0; i < altIndex; ++i ) {
+ if ( altservers[i].host.type == 0 )
+ continue;
+ // Try to connect
+ dnbd3_reply_t reply;
+ sock = sock_connect( &altservers[i].host, 3500, 10000 );
+ if ( sock == -1 ) {
+ counters->fails++;
+ logadd( LOG_ERROR, "Could not connect to host (errno=%d)", errno );
+ } else if ( !dnbd3_select_image( sock, lowerImage, rid, 0 ) ) {
+ counters->fails++;
+ logadd( LOG_ERROR, "Could not send select image" );
+ } else if ( !dnbd3_select_image_reply( &buffer, sock, &remoteVersion, &remoteName, &remoteRid, &remoteSize ) ) {
+ counters->fails++;
+ logadd( LOG_ERROR, "Could not read select image reply (%d)", errno );
+ } else if ( rid != 0 && rid != remoteRid ) {
+ counters->fails++;
+ logadd( LOG_ERROR, "rid mismatch" );
+ } else if ( !dnbd3_get_block( sock, run_i * blockSize, blockSize, 0, 0 ) ) {
+ counters->fails++;
+ logadd( LOG_ERROR, "send: get block failed" );
+ } else if ( !dnbd3_get_reply( sock, &reply ) ) {
+ counters->fails++;
+ logadd( LOG_ERROR, "recv: get block header failed" );
+ } else if ( reply.cmd != CMD_GET_BLOCK ) {
+ counters->fails++;
+ logadd( LOG_ERROR, "recv: get block reply is not CMD_GET_BLOCK" );
+ } else {
+ int rv, togo = blockSize;
+ do {
+ rv = recv( sock, trash, MIN( sizeof(trash), togo ), MSG_WAITALL|MSG_NOSIGNAL );
+ if ( rv == -1 && errno == EINTR )
+ continue;
+ if ( rv <= 0 )
break;
- altservers[altIndex].host = tempHosts[i];
- altIndex += 1;
- }
- current = end + 1;
- } while ( end != NULL && altIndex < MAX_ALTS );
- logadd( LOG_INFO, "Got %d servers from init call", altIndex );
- // Connect
- for ( int i = 0; i < altIndex; ++i ) {
- if ( altservers[i].host.type == 0 )
- continue;
- // Try to connect
- sock = sock_connect( &altservers[i].host, 500, SOCKET_KEEPALIVE_TIMEOUT * 1000 );
- if ( sock == -1 ) {
- counters->fails++;
- logadd( LOG_ERROR, "Could not connect to host" );
- } else if ( !dnbd3_select_image( sock, lowerImage, rid, 0 ) ) {
- counters->fails++;
- logadd( LOG_ERROR, "Could not send select image" );
- } else if ( !dnbd3_select_image_reply( &buffer, sock, &remoteVersion, &remoteName, &remoteRid, &remoteSize ) ) {
+ togo -= rv;
+ } while ( togo > 0 );
+ if ( togo != 0 ) {
counters->fails++;
- logadd( LOG_ERROR, "Could not read select image reply (%d)", errno );
- } else if ( rid != 0 && rid != remoteRid ) {
- counters->fails++;
- logadd( LOG_ERROR, "rid mismatch" );
+ logadd( LOG_ERROR, "recv: get block payload failed (remaining %d)", togo );
} else {
counters->success++;
- break;
- }
- // Failed
- logadd( LOG_DEBUG1, "Server does not offer requested image... " );
- if ( sock != -1 ) {
close( sock );
sock = -1;
+ continue;
}
}
+ // Failed
if ( sock != -1 ) {
- // connectionInitDone = true;
- if (closeSockets) {
- close( sock );
- }
+ close( sock );
+ sock = -1;
}
}
+ if ( sock != -1 ) {
+ close( sock );
+ }
}
return true;
}
diff --git a/src/bench/connection.h b/src/bench/connection.h
index 9cb59ef..69207ff 100644
--- a/src/bench/connection.h
+++ b/src/bench/connection.h
@@ -19,7 +19,7 @@ typedef struct _dnbd3_async {
} dnbd3_async_t;
-bool connection_init_n_times(const char *hosts, const char *image, const uint16_t rid, int ntimes, BenchCounters* counters, bool closeSockets);
+bool connection_init_n_times(const char *hosts, const char *image, const uint16_t rid, int ntimes, int blockSize, BenchCounters* counters);
bool connection_init(const char *hosts, const char *image, const uint16_t rid);
diff --git a/src/bench/helper.h b/src/bench/helper.h
index 8342a79..e0c0262 100644
--- a/src/bench/helper.h
+++ b/src/bench/helper.h
@@ -29,6 +29,7 @@ typedef struct BenchThreadData {
char* server_address;
char * image_name;
int runs;
+ int bs;
int threadNumber;
bool closeSockets;
} BenchThreadData;
diff --git a/src/bench/main.c b/src/bench/main.c
index 2f32dbf..f8c55c3 100644
--- a/src/bench/main.c
+++ b/src/bench/main.c
@@ -17,10 +17,6 @@
#define debugf(...) do { logadd( LOG_DEBUG1, __VA_ARGS__ ); } while (0)
-/* Debug/Benchmark variables */
-static bool useDebug = false;
-
-
static void printUsage(char *argv0, int exitCode)
{
printf( "Usage: %s [--debug] --host <serverAddress(es)> --image <imageName> [--rid revision]\n", argv0 );
@@ -30,19 +26,18 @@ static void printUsage(char *argv0, int exitCode)
printf( " -r --rid Revision to use (omit or pass 0 for latest)\n" );
printf( " -n --runs Number of connection attempts per thread\n" );
printf( " -t --threads number of threads\n" );
- printf( " -l --log Write log to given location\n" );
- printf( " -d --debug Don't fork and print debug output (fuse > stderr, dnbd3 > stdout)\n" );
- // // fuse_main( 2, arg, &dnbd3_fuse_no_operations, NULL );
+ printf( " -b --blocksize Size of blocks to request (def. 4096)\n" );
exit( exitCode );
}
-static const char *optString = "h:i:n:t:HvVd";
+static const char *optString = "b:h:i:n:t:Hv";
static const struct option longOpts[] = {
{ "host", required_argument, NULL, 'h' },
{ "image", required_argument, NULL, 'i' },
{ "nruns", optional_argument, NULL, 'n' },
- { "threads", optional_argument, NULL, 't' },
- { "help", optional_argument, NULL, 'H' },
+ { "threads", required_argument, NULL, 't' },
+ { "blocksize", required_argument, NULL, 'b' },
+ { "help", no_argument, NULL, 'H' },
{ "version", no_argument, NULL, 'v' },
{ 0, 0, 0, 0 }
};
@@ -59,11 +54,11 @@ void* runBenchThread(void* t) {
BenchThreadData* data = t;
connection_init_n_times(
data->server_address,
- data->server_address,
+ data->image_name,
0,
data->runs,
- data->counter,
- data->closeSockets);
+ data->bs,
+ data->counter);
printf("Thread #%d finished\n", data->threadNumber);
return NULL;
}
@@ -77,6 +72,7 @@ int main(int argc, char *argv[])
bool closeSockets = false;
int n_runs = 100;
int n_threads = 1;
+ int bs = 4096;
if ( argc <= 1 || strcmp( argv[1], "--help" ) == 0 || strcmp( argv[1], "--usage" ) == 0 ) {
printUsage( argv[0], 0 );
@@ -85,10 +81,10 @@ int main(int argc, char *argv[])
while ( ( opt = getopt_long( argc, argv, optString, longOpts, &lidx ) ) != -1 ) {
switch ( opt ) {
case 'h':
- server_address = optarg;
+ server_address = strdup(optarg);
break;
case 'i':
- image_Name = optarg;
+ image_Name = strdup(optarg);
break;
case 'n':
n_runs = atoi(optarg);
@@ -96,15 +92,15 @@ int main(int argc, char *argv[])
case 't':
n_threads = atoi(optarg);
break;
+ case 'b':
+ bs = atoi(optarg);
+ break;
case 'c':
closeSockets = true;
break;
case 'H':
printUsage( argv[0], 0 );
break;
- case 'd':
- useDebug = true;
- break;
default:
printUsage( argv[0], EXIT_FAILURE );
}
@@ -126,6 +122,7 @@ int main(int argc, char *argv[])
server_address,
image_Name,
n_runs,
+ bs,
i,
closeSockets};
threadData[i] = tmp2;
diff --git a/src/server/altservers.c b/src/server/altservers.c
index bbbc584..ff3c95b 100644
--- a/src/server/altservers.c
+++ b/src/server/altservers.c
@@ -1,11 +1,14 @@
#include "altservers.h"
#include "locks.h"
+#include "threadpool.h"
#include "helper.h"
#include "image.h"
#include "fileutil.h"
#include "../shared/protocol.h"
#include "../shared/timing.h"
#include "../serverconfig.h"
+#include "reference.h"
+
#include <assert.h>
#include <inttypes.h>
#include <jansson.h>
@@ -14,53 +17,21 @@
#define LOG_GOTO(jumplabel, lvl, ...) do { LOG(lvl, __VA_ARGS__); goto jumplabel; } while (0);
#define ERROR_GOTO(jumplabel, ...) LOG_GOTO(jumplabel, LOG_ERROR, __VA_ARGS__)
-static dnbd3_connection_t *pending[SERVER_MAX_PENDING_ALT_CHECKS];
-static pthread_mutex_t pendingLockWrite; // Lock for adding something to pending. (NULL -> nonNULL)
-static pthread_mutex_t pendingLockConsume; // Lock for removing something (nonNULL -> NULL)
-static dnbd3_signal_t* runSignal = NULL;
-
static dnbd3_alt_server_t altServers[SERVER_MAX_ALTS];
-static int numAltServers = 0;
+static atomic_int numAltServers = 0;
static pthread_mutex_t altServersLock;
-static pthread_t altThread;
-
-static void *altservers_main(void *data);
-static unsigned int altservers_updateRtt(const dnbd3_host_t * const host, const unsigned int rtt);
+static void *altservers_runCheck(void *data);
+static int altservers_getListForUplink(dnbd3_uplink_t *uplink, int *servers, int size, int current);
+static void altservers_findUplinkInternal(dnbd3_uplink_t *uplink);
+static uint32_t altservers_updateRtt(dnbd3_uplink_t *uplink, int index, uint32_t rtt);
+static void altservers_imageFailed(dnbd3_uplink_t *uplink, int server);
void altservers_init()
{
srand( (unsigned int)time( NULL ) );
- // Init spinlock
- mutex_init( &pendingLockWrite );
- mutex_init( &pendingLockConsume );
- mutex_init( &altServersLock );
- // Init signal
- runSignal = signal_new();
- if ( runSignal == NULL ) {
- logadd( LOG_ERROR, "Error creating signal object. Uplink feature unavailable." );
- exit( EXIT_FAILURE );
- }
- memset( altServers, 0, SERVER_MAX_ALTS * sizeof(dnbd3_alt_server_t) );
- if ( 0 != thread_create( &altThread, NULL, &altservers_main, (void *)NULL ) ) {
- logadd( LOG_ERROR, "Could not start altservers connector thread" );
- exit( EXIT_FAILURE );
- }
- // Init waiting links queue -- this is currently a global static array so
- // it will already be zero, but in case we refactor later do it explicitly
- // while also holding the write lock so thread sanitizer is happy
- mutex_lock( &pendingLockWrite );
- for (int i = 0; i < SERVER_MAX_PENDING_ALT_CHECKS; ++i) {
- pending[i] = NULL;
- }
- mutex_unlock( &pendingLockWrite );
-}
-
-void altservers_shutdown()
-{
- if ( runSignal == NULL ) return;
- signal_call( runSignal ); // Wake altservers thread up
- thread_join( altThread, NULL );
+ // Init lock
+ mutex_init( &altServersLock, LOCK_ALT_SERVER_LIST );
}
static void addalt(int argc, char **argv, void *data)
@@ -128,54 +99,27 @@ bool altservers_add(dnbd3_host_t *host, const char *comment, const int isPrivate
/**
* ONLY called from the passed uplink's main thread
*/
-void altservers_findUplink(dnbd3_connection_t *uplink)
+void altservers_findUplinkAsync(dnbd3_uplink_t *uplink)
{
- int i;
+ if ( uplink->shutdown )
+ return;
+ if ( uplink->current.fd != -1 && numAltServers <= 1 )
+ return;
// if betterFd != -1 it means the uplink is supposed to switch to another
// server. As this function here is called by the uplink thread, it can
// never be that the uplink is supposed to switch, but instead calls
// this function.
- assert( uplink->betterFd == -1 );
- mutex_lock( &pendingLockWrite );
+ assert( uplink->better.fd == -1 );
// it is however possible that an RTT measurement is currently in progress,
// so check for that case and do nothing if one is in progress
- if ( uplink->rttTestResult == RTT_INPROGRESS ) {
- for (i = 0; i < SERVER_MAX_PENDING_ALT_CHECKS; ++i) {
- if ( pending[i] != uplink ) continue;
- // Yep, measuring right now
- mutex_unlock( &pendingLockWrite );
- return;
+ if ( uplink->rttTestResult != RTT_INPROGRESS ) {
+ dnbd3_uplink_t *current = ref_get_uplink( &uplink->image->uplinkref );
+ if ( current == uplink ) {
+ threadpool_run( &altservers_runCheck, uplink );
+ } else if ( current != NULL ) {
+ ref_put( &current->reference );
}
}
- // Find free slot for measurement
- for (i = 0; i < SERVER_MAX_PENDING_ALT_CHECKS; ++i) {
- if ( pending[i] != NULL ) continue;
- pending[i] = uplink;
- uplink->rttTestResult = RTT_INPROGRESS;
- mutex_unlock( &pendingLockWrite );
- signal_call( runSignal ); // Wake altservers thread up
- return;
- }
- // End of loop - no free slot
- mutex_unlock( &pendingLockWrite );
- logadd( LOG_WARNING, "No more free RTT measurement slots, ignoring a request..." );
-}
-
-/**
- * The given uplink is about to disappear, so remove it from any queues
- */
-void altservers_removeUplink(dnbd3_connection_t *uplink)
-{
- mutex_lock( &pendingLockConsume );
- mutex_lock( &pendingLockWrite );
- for (int i = 0; i < SERVER_MAX_PENDING_ALT_CHECKS; ++i) {
- if ( pending[i] == uplink ) {
- uplink->rttTestResult = RTT_NOT_REACHABLE;
- pending[i] = NULL;
- }
- }
- mutex_unlock( &pendingLockWrite );
- mutex_unlock( &pendingLockConsume );
}
/**
@@ -189,90 +133,124 @@ int altservers_getListForClient(dnbd3_host_t *host, dnbd3_server_entry_t *output
if ( host == NULL || host->type == 0 || numAltServers == 0 || output == NULL || size <= 0 ) return 0;
int i, j;
int count = 0;
- int scores[size];
- int score;
- mutex_lock( &altServersLock );
+ uint16_t scores[SERVER_MAX_ALTS] = { 0 };
if ( size > numAltServers ) size = numAltServers;
- for (i = 0; i < numAltServers; ++i) {
- if ( altServers[i].host.type == 0 ) continue; // Slot is empty
- if ( altServers[i].isPrivate ) continue; // Do not tell clients about private servers
+ mutex_lock( &altServersLock );
+ for ( i = 0; i < numAltServers; ++i ) {
+ if ( altServers[i].host.type == 0 || altServers[i].isPrivate )
+ continue; // Slot is empty or uplink is for replication only
if ( host->type == altServers[i].host.type ) {
- score = altservers_netCloseness( host, &altServers[i].host ) - altServers[i].numFails;
+ scores[i] = 10 + altservers_netCloseness( host, &altServers[i].host );
} else {
- score = -( altServers[i].numFails + 128 ); // Wrong address family
+ scores[i] = 1; // Wrong address family
}
- if ( count == 0 ) {
- // Trivial - this is the first entry
- output[0].host = altServers[i].host;
- output[0].failures = 0;
- scores[0] = score;
- count++;
- } else {
- // Other entries already exist, insert in proper position
- for (j = 0; j < size; ++j) {
- if ( j < count && score <= scores[j] ) continue;
- if ( j > count ) break; // Should never happen but just in case...
- if ( j < count && j + 1 < size ) {
- // Check if we're in the middle and need to move other entries...
- memmove( &output[j + 1], &output[j], sizeof(dnbd3_server_entry_t) * (size - j - 1) );
- memmove( &scores[j + 1], &scores[j], sizeof(int) * (size - j - 1) );
- }
- if ( count < size ) {
- count++;
- }
- output[j].host = altServers[i].host;
- output[j].failures = 0;
- scores[j] = score;
- break;
+ }
+ while ( count < size ) {
+ i = -1;
+ for ( j = 0; j < numAltServers; ++j ) {
+ if ( scores[j] == 0 )
+ continue;
+ if ( i == -1 || scores[j] > scores[i] ) {
+ i = j;
}
}
+ if ( i == -1 )
+ break;
+ output[count].host = altServers[i].host;
+ output[count].failures = 0;
+ count++;
}
mutex_unlock( &altServersLock );
return count;
}
+bool altservers_toString(int server, char *buffer, size_t len)
+{
+ return host_to_string( &altServers[server].host, buffer, len );
+}
+
+static bool isUsableForUplink( dnbd3_uplink_t *uplink, int server, ticks *now )
+{
+ dnbd3_alt_local_t *local = ( uplink == NULL ? NULL : &uplink->altData[server] );
+ dnbd3_alt_server_t *global = &altServers[server];
+ if ( global->isClientOnly || ( !global->isPrivate && _proxyPrivateOnly ) )
+ return false;
+ // Blocked locally (image not found on server...)
+ if ( local != NULL && local->blocked ) {
+ if ( --local->fails > 0 )
+ return false;
+ local->blocked = false;
+ }
+ if ( global->blocked ) {
+ if ( timing_diff( &global->lastFail, now ) < SERVER_GLOBAL_DUP_TIME )
+ return false;
+ global->lastFail = *now;
+ if ( --global->fails > 0 )
+ return false;
+ global->blocked = false;
+ }
+ // Not blocked, depend on both fail counters
+ int fails = ( local == NULL ? 0 : local->fails ) + global->fails;
+ return fails < SERVER_BAD_UPLINK_MIN || ( rand() % fails ) < SERVER_BAD_UPLINK_MIN;
+}
+
+int altservers_getHostListForReplication(dnbd3_host_t *servers, int size)
+{
+ int idx[size];
+ int num = altservers_getListForUplink( NULL, idx, size, -1 );
+ for ( int i = 0; i < num; ++i ) {
+ servers[i] = altServers[i].host;
+ }
+ return num;
+}
+
/**
* Get <size> alt servers. If there are more alt servers than
* requested, random servers will be picked.
* This function is suited for finding uplink servers as
* it includes private servers and ignores any "client only" servers
+ * @param current index of server for current connection, or -1 in panic mode
*/
-int altservers_getListForUplink(dnbd3_host_t *output, int size, int emergency)
+static int altservers_getListForUplink(dnbd3_uplink_t *uplink, int *servers, int size, int current)
{
- if ( size <= 0 ) return 0;
- int count = 0, i;
- ticks now;
- timing_get( &now );
+ if ( size <= 0 )
+ return 0;
+ int count = 0;
+ declare_now;
mutex_lock( &altServersLock );
- // Flip first server in list with a random one every time this is called
- if ( numAltServers > 1 ) {
- const dnbd3_alt_server_t tmp = altServers[0];
- do {
- i = rand() % numAltServers;
- } while ( i == 0 );
- altServers[0] = altServers[i];
- altServers[i] = tmp;
- }
- // We iterate over the list twice. First run adds servers with 0 failures only,
- // second one also considers those that failed (not too many times)
- if ( size > numAltServers ) size = numAltServers;
- for (i = 0; i < numAltServers * 2; ++i) {
- dnbd3_alt_server_t *srv = &altServers[i % numAltServers];
- if ( srv->host.type == 0 ) continue; // Slot is empty
- if ( _proxyPrivateOnly && !srv->isPrivate ) continue; // Config says to consider private alt-servers only? ignore!
- if ( srv->isClientOnly ) continue;
- bool first = ( i < numAltServers );
- if ( first ) {
- if ( srv->numFails > 0 ) continue;
- } else {
- if ( srv->numFails == 0 ) continue; // Already added in first iteration
- if ( !emergency && srv->numFails > SERVER_BAD_UPLINK_THRES // server failed X times in a row
- && timing_diff( &srv->lastFail, &now ) < SERVER_BAD_UPLINK_IGNORE ) continue; // and last fail was not too long ago? ignore!
- if ( !emergency ) srv->numFails--;
+ // If we don't have enough servers to randomize, take a shortcut
+ if ( numAltServers <= size ) {
+ for ( int i = 0; i < numAltServers; ++i ) {
+ if ( current == -1 || i == current || isUsableForUplink( uplink, i, &now ) ) {
+ servers[count++] = i;
+ }
+ }
+ } else {
+ // Plenty of alt servers; randomize
+ uint8_t state[SERVER_MAX_ALTS] = { 0 };
+ if ( current != -1 ) { // Make sure we also test the current server
+ servers[count++] = current;
+ state[current] = 2;
+ }
+ for ( int tr = size * 10; tr > 0 && count < size; --tr ) {
+ int idx = rand() % numAltServers;
+ if ( state[idx] != 0 )
+ continue;
+ if ( isUsableForUplink( uplink, idx, &now ) ) {
+ servers[count++] = idx;
+ state[idx] = 2; // Used
+ } else {
+ state[idx] = 1; // Potential
+ }
+ }
+ // If panic mode, consider others too
+ for ( int tr = size * 10; current == -1 && tr > 0 && count < size; --tr ) {
+ int idx = rand() % numAltServers;
+ if ( state[idx] == 2 )
+ continue;
+ servers[count++] = idx;
+ state[idx] = 2; // Used
}
- // server seems ok, include in output and decrease its fail counter
- output[count++] = srv->host;
- if ( count >= size ) break;
}
mutex_unlock( &altServersLock );
return count;
@@ -300,7 +278,7 @@ json_t* altservers_toJson()
"rtt", rtts,
"isPrivate", (int)src[i].isPrivate,
"isClientOnly", (int)src[i].isClientOnly,
- "numFails", src[i].numFails
+ "numFails", src[i].fails
);
json_array_append_new( list, server );
}
@@ -308,33 +286,27 @@ json_t* altservers_toJson()
}
/**
- * Update rtt history of given server - returns the new average for that server
+ * Update rtt history of given server - returns the new average for that server.
*/
-static unsigned int altservers_updateRtt(const dnbd3_host_t * const host, const unsigned int rtt)
+static uint32_t altservers_updateRtt(dnbd3_uplink_t *uplink, int index, uint32_t rtt)
{
- unsigned int avg = rtt;
- int i;
+ uint32_t avg = 0, j;
+ dnbd3_alt_local_t *local = &uplink->altData[index];
mutex_lock( &altServersLock );
- for (i = 0; i < numAltServers; ++i) {
- if ( !isSameAddressPort( host, &altServers[i].host ) ) continue;
- altServers[i].rtt[++altServers[i].rttIndex % SERVER_RTT_PROBES] = rtt;
-#if SERVER_RTT_PROBES == 5
- avg = (altServers[i].rtt[0] + altServers[i].rtt[1] + altServers[i].rtt[2]
- + altServers[i].rtt[3] + altServers[i].rtt[4]) / SERVER_RTT_PROBES;
-#else
-#warning You might want to change the code in altservers_update_rtt if you changed SERVER_RTT_PROBES
- avg = 0;
- for (int j = 0; j < SERVER_RTT_PROBES; ++j) {
- avg += altServers[i].rtt[j];
+ if ( likely( local->initDone ) ) {
+ local->rtt[++local->rttIndex % SERVER_RTT_PROBES] = rtt;
+ for ( j = 0; j < SERVER_RTT_PROBES; ++j ) {
+ avg += local->rtt[j];
}
avg /= SERVER_RTT_PROBES;
-#endif
- // If we got a new rtt value, server must be working
- if ( altServers[i].numFails > 0 ) {
- altServers[i].numFails--;
+ } else { // First rtt measurement -- copy to every slot
+ for ( j = 0; j < SERVER_RTT_PROBES; ++j ) {
+ local->rtt[j] = rtt;
}
- break;
+ avg = rtt;
+ local->initDone = true;
}
+ altServers[index].rtt[++altServers[index].rttIndex % SERVER_RTT_PROBES] = avg;
mutex_unlock( &altServersLock );
return avg;
}
@@ -364,250 +336,234 @@ int altservers_netCloseness(dnbd3_host_t *host1, dnbd3_host_t *host2)
* track of how often servers fail, and consider them disabled for some time if they
* fail too many times.
*/
-void altservers_serverFailed(const dnbd3_host_t * const host)
+void altservers_serverFailed(int server)
{
- int i;
- int foundIndex = -1, lastOk = -1;
- ticks now;
- timing_get( &now );
+ declare_now;
mutex_lock( &altServersLock );
- for (i = 0; i < numAltServers; ++i) {
- if ( foundIndex == -1 ) {
- // Looking for the failed server in list
- if ( isSameAddressPort( host, &altServers[i].host ) ) {
- foundIndex = i;
- }
- } else if ( altServers[i].host.type != 0 && altServers[i].numFails == 0 ) {
- lastOk = i;
- }
- }
- // Do only increase counter if last fail was not too recent. This is
- // to prevent the counter from increasing rapidly if many images use the
- // same uplink. If there's a network hickup, all uplinks will call this
- // function and would increase the counter too quickly, disabling the server.
- if ( foundIndex != -1 && timing_diff( &altServers[foundIndex].lastFail, &now ) > SERVER_RTT_INTERVAL_INIT ) {
- altServers[foundIndex].numFails += SERVER_UPLINK_FAIL_INCREASE;
- altServers[foundIndex].lastFail = now;
- if ( lastOk != -1 ) {
- // Make sure non-working servers are put at the end of the list, so they're less likely
- // to get picked when testing servers for uplink connections.
- const dnbd3_alt_server_t tmp = altServers[foundIndex];
- altServers[foundIndex] = altServers[lastOk];
- altServers[lastOk] = tmp;
+ if ( timing_diff( &altServers[server].lastFail, &now ) > SERVER_GLOBAL_DUP_TIME ) {
+ altServers[server].lastFail = now;
+ if ( altServers[server].fails++ >= SERVER_BAD_UPLINK_MAX ) {
+ altServers[server].blocked = true;
}
}
mutex_unlock( &altServersLock );
}
+
/**
- * Mainloop of this module. It will wait for requests by uplinks to find a
- * suitable uplink server for them. If found, it will tell the uplink about
- * the best server found. Currently the RTT history is kept per server and
- * not per uplink, so if many images use the same uplink server, the history
- * will update quite quickly. Needs to be improved some time, ie. by only
- * updating the rtt if the last update was at least X seconds ago.
+ * Called from RTT checker if connecting to a server succeeded but
+ * subsequently selecting the given image failed. Handle this within
+ * the uplink and don't increase the global fail counter.
*/
-static void *altservers_main(void *data UNUSED)
+static void altservers_imageFailed(dnbd3_uplink_t *uplink, int server)
+{
+ mutex_lock( &altServersLock );
+ if ( uplink->altData[server].fails++ >= SERVER_BAD_UPLINK_MAX ) {
+ uplink->altData[server].blocked = true;
+ }
+ mutex_unlock( &altServersLock );
+}
+
+static void *altservers_runCheck(void *data)
+{
+ dnbd3_uplink_t * const uplink = (dnbd3_uplink_t*)data;
+
+ assert( uplink != NULL );
+ setThreadName( "altserver-check" );
+ altservers_findUplinkInternal( uplink );
+ ref_put( &uplink->reference ); // Acquired in findUplinkAsync
+ return NULL;
+}
+
+void altservers_findUplink(dnbd3_uplink_t *uplink)
+{
+ altservers_findUplinkInternal( uplink );
+ while ( uplink->rttTestResult == RTT_INPROGRESS ) {
+ usleep( 5000 );
+ }
+}
+
+int altservers_hostToIndex(dnbd3_host_t *host)
+{
+ for ( int i = 0; i < numAltServers; ++i ) {
+ if ( isSameAddressPort( host, &altServers[i].host ) )
+ return i;
+ }
+ return -1;
+}
+
+const dnbd3_host_t* altservers_indexToHost(int server)
+{
+ return &altServers[server].host;
+}
+
+// XXX Sync call above must block until async worker has finished XXX
+static void altservers_findUplinkInternal(dnbd3_uplink_t *uplink)
{
const int ALTS = 4;
- int ret, itLink, itAlt, numAlts;
- bool found;
- char buffer[DNBD3_BLOCK_SIZE ];
- dnbd3_reply_t reply;
- dnbd3_host_t servers[ALTS + 1];
- serialized_buffer_t serialized;
+ int ret, itAlt, numAlts, current;
+ bool panic;
+ int servers[ALTS + 1];
struct timespec start, end;
- ticks nextCloseUnusedFd;
- setThreadName( "altserver-check" );
- blockNoncriticalSignals();
- timing_gets( &nextCloseUnusedFd, 900 );
- // LOOP
- while ( !_shutdown ) {
- // Wait 5 seconds max.
- ret = signal_wait( runSignal, 5000 );
- if ( _shutdown ) goto cleanup;
- if ( ret == SIGNAL_ERROR ) {
- if ( errno == EAGAIN || errno == EINTR ) continue;
- logadd( LOG_WARNING, "Error %d on signal_clear on alservers_main! Things will break!", errno );
- usleep( 100000 );
+ if ( _shutdown )
+ return;
+ mutex_lock( &uplink->rttLock );
+ // Maybe we already have a result, or check is currently running
+ if ( uplink->better.fd != -1 || uplink->rttTestResult == RTT_INPROGRESS ) {
+ mutex_unlock( &uplink->rttLock );
+ return;
+ }
+ assert( uplink->rttTestResult != RTT_DOCHANGE );
+ uplink->rttTestResult = RTT_INPROGRESS;
+ panic = ( uplink->current.fd == -1 );
+ current = uplink->current.index; // Current server index (or last one in panic mode)
+ mutex_unlock( &uplink->rttLock );
+ // First, get 4 alt servers
+ numAlts = altservers_getListForUplink( uplink, servers, ALTS, panic ? -1 : current );
+ // If we're already connected and only got one server anyways, there isn't much to do
+ if ( numAlts == 0 || ( numAlts == 1 && !panic ) ) {
+ uplink->rttTestResult = RTT_DONTCHANGE;
+ return;
+ }
+ dnbd3_image_t * const image = image_lock( uplink->image );
+ if ( image == NULL ) { // Check again after locking
+ uplink->rttTestResult = RTT_NOT_REACHABLE;
+ logadd( LOG_WARNING, "Image has gone away that was queued for RTT measurement" );
+ return;
+ }
+ LOG( LOG_DEBUG2, "Running alt check for %s:%d", image->name, (int)image->rid );
+ assert( uplink->rttTestResult == RTT_INPROGRESS );
+ // Test them all
+ dnbd3_server_connection_t best = { .fd = -1 };
+ unsigned long bestRtt = RTT_UNREACHABLE;
+ unsigned long currentRtt = RTT_UNREACHABLE;
+ for (itAlt = 0; itAlt < numAlts; ++itAlt) {
+ int server = servers[itAlt];
+ // Connect
+ clock_gettime( BEST_CLOCK_SOURCE, &start );
+ int sock = sock_connect( &altServers[server].host, 750, 1000 );
+ if ( sock == -1 ) { // Connection failed means global error
+ altservers_serverFailed( server );
+ continue;
}
- // Work your way through the queue
- for (itLink = 0; itLink < SERVER_MAX_PENDING_ALT_CHECKS; ++itLink) {
- mutex_lock( &pendingLockWrite );
- if ( pending[itLink] == NULL ) {
- mutex_unlock( &pendingLockWrite );
- continue; // Check once before locking, as a mutex is expensive
- }
- mutex_unlock( &pendingLockWrite );
- mutex_lock( &pendingLockConsume );
- mutex_lock( &pendingLockWrite );
- dnbd3_connection_t * const uplink = pending[itLink];
- mutex_unlock( &pendingLockWrite );
- if ( uplink == NULL ) { // Check again after locking
- mutex_unlock( &pendingLockConsume );
- continue;
- }
- dnbd3_image_t * const image = image_lock( uplink->image );
- if ( image == NULL ) { // Check again after locking
- uplink->rttTestResult = RTT_NOT_REACHABLE;
- mutex_lock( &pendingLockWrite );
- pending[itLink] = NULL;
- mutex_unlock( &pendingLockWrite );
- mutex_unlock( &pendingLockConsume );
- logadd( LOG_DEBUG1, "Image has gone away that was queued for RTT measurement" );
- continue;
- }
- LOG( LOG_DEBUG2, "[%d] Running alt check", itLink );
- assert( uplink->rttTestResult == RTT_INPROGRESS );
- // Now get 4 alt servers
- numAlts = altservers_getListForUplink( servers, ALTS, uplink->fd == -1 );
- if ( uplink->fd != -1 ) {
- // Add current server if not already in list
- found = false;
- for (itAlt = 0; itAlt < numAlts; ++itAlt) {
- if ( !isSameAddressPort( &uplink->currentServer, &servers[itAlt] ) ) continue;
- found = true;
- break;
- }
- if ( !found ) servers[numAlts++] = uplink->currentServer;
- }
- // Test them all
- int bestSock = -1;
- int bestIndex = -1;
- int bestProtocolVersion = -1;
- unsigned long bestRtt = RTT_UNREACHABLE;
- unsigned long currentRtt = RTT_UNREACHABLE;
- for (itAlt = 0; itAlt < numAlts; ++itAlt) {
- usleep( 1000 ); // Wait a very short moment for the network to recover (we might be doing lots of measurements...)
- // Connect
- clock_gettime( BEST_CLOCK_SOURCE, &start );
- int sock = sock_connect( &servers[itAlt], 750, 1000 );
- if ( sock < 0 ) continue;
- // Select image ++++++++++++++++++++++++++++++
- if ( !dnbd3_select_image( sock, image->name, image->rid, SI_SERVER_FLAGS ) ) {
- goto server_failed;
- }
- // See if selecting the image succeeded ++++++++++++++++++++++++++++++
- uint16_t protocolVersion, rid;
- uint64_t imageSize;
- char *name;
- if ( !dnbd3_select_image_reply( &serialized, sock, &protocolVersion, &name, &rid, &imageSize ) ) {
- goto server_image_not_available;
- }
- if ( protocolVersion < MIN_SUPPORTED_SERVER ) goto server_failed;
- if ( name == NULL || strcmp( name, image->name ) != 0 ) {
- ERROR_GOTO( server_failed, "[RTT] Server offers image '%s'", name );
- }
- if ( rid != image->rid ) {
- ERROR_GOTO( server_failed, "[RTT] Server provides rid %d", (int)rid );
- }
- if ( imageSize != image->virtualFilesize ) {
- ERROR_GOTO( server_failed, "[RTT] Remote size: %" PRIu64 ", expected: %" PRIu64, imageSize, image->virtualFilesize );
- }
- // Request first block (NOT random!) ++++++++++++++++++++++++++++++
- if ( !dnbd3_get_block( sock, 0, DNBD3_BLOCK_SIZE, 0, COND_HOPCOUNT( protocolVersion, 1 ) ) ) {
- LOG_GOTO( server_failed, LOG_DEBUG1, "[RTT%d] Could not request first block", itLink );
- }
- // See if requesting the block succeeded ++++++++++++++++++++++
- if ( !dnbd3_get_reply( sock, &reply ) ) {
- LOG_GOTO( server_failed, LOG_DEBUG1, "[RTT%d] Received corrupted reply header after CMD_GET_BLOCK", itLink );
- }
- // check reply header
- if ( reply.cmd != CMD_GET_BLOCK || reply.size != DNBD3_BLOCK_SIZE ) {
- ERROR_GOTO( server_failed, "[RTT] Reply to first block request is %" PRIu32 " bytes", reply.size );
- }
- if ( recv( sock, buffer, DNBD3_BLOCK_SIZE, MSG_WAITALL ) != DNBD3_BLOCK_SIZE ) {
- ERROR_GOTO( server_failed, "[RTT%d] Could not read first block payload", itLink );
- }
- clock_gettime( BEST_CLOCK_SOURCE, &end );
- // Measurement done - everything fine so far
- mutex_lock( &uplink->rttLock );
- const bool isCurrent = isSameAddressPort( &servers[itAlt], &uplink->currentServer );
- // Penaltize rtt if this was a cycle; this will treat this server with lower priority
- // in the near future too, so we prevent alternating between two servers that are both
- // part of a cycle and have the lowest latency.
- const unsigned int rtt = (unsigned int)((end.tv_sec - start.tv_sec) * 1000000
- + (end.tv_nsec - start.tv_nsec) / 1000
- + ( (isCurrent && uplink->cycleDetected) ? 1000000 : 0 )); // µs
- unsigned int avg = altservers_updateRtt( &servers[itAlt], rtt );
- // If a cycle was detected, or we lost connection to the current (last) server, penaltize it one time
- if ( ( uplink->cycleDetected || uplink->fd == -1 ) && isCurrent ) avg = (avg * 2) + 50000;
- mutex_unlock( &uplink->rttLock );
- if ( uplink->fd != -1 && isCurrent ) {
- // Was measuring current server
- currentRtt = avg;
- close( sock );
- } else if ( avg < bestRtt ) {
- // Was another server, update "best"
- if ( bestSock != -1 ) close( bestSock );
- bestSock = sock;
- bestRtt = avg;
- bestIndex = itAlt;
- bestProtocolVersion = protocolVersion;
- } else {
- // Was too slow, ignore
- close( sock );
- }
- // We're done, call continue
- continue;
- // Jump here if anything went wrong
- // This will cleanup and continue
- server_failed: ;
- altservers_serverFailed( &servers[itAlt] );
- server_image_not_available: ;
- close( sock );
- }
- // Done testing all servers. See if we should switch
- if ( bestSock != -1 && (uplink->fd == -1 || (bestRtt < 10000000 && RTT_THRESHOLD_FACTOR(currentRtt) > bestRtt)) ) {
- // yep
- if ( currentRtt > 10000000 || uplink->fd == -1 ) {
- LOG( LOG_DEBUG1, "Change - best: %luµs, current: -", bestRtt );
- } else {
- LOG( LOG_DEBUG1, "Change - best: %luµs, current: %luµs", bestRtt, currentRtt );
- }
- sock_setTimeout( bestSock, _uplinkTimeout );
- mutex_lock( &uplink->rttLock );
- uplink->betterFd = bestSock;
- uplink->betterServer = servers[bestIndex];
- uplink->betterVersion = bestProtocolVersion;
- uplink->rttTestResult = RTT_DOCHANGE;
- mutex_unlock( &uplink->rttLock );
- signal_call( uplink->signal );
- } else if ( bestSock == -1 && currentRtt == RTT_UNREACHABLE ) {
- // No server was reachable
- mutex_lock( &uplink->rttLock );
- uplink->rttTestResult = RTT_NOT_REACHABLE;
- mutex_unlock( &uplink->rttLock );
- } else {
- // nope
- if ( bestSock != -1 ) close( bestSock );
- mutex_lock( &uplink->rttLock );
- uplink->rttTestResult = RTT_DONTCHANGE;
- uplink->cycleDetected = false; // It's a lie, but prevents rtt measurement triggering again right away
- mutex_unlock( &uplink->rttLock );
- if ( !image->working ) {
- image->working = true;
- LOG( LOG_DEBUG1, "[%d] No better alt server found, enabling again", itLink );
- }
+ // Select image ++++++++++++++++++++++++++++++
+ if ( !dnbd3_select_image( sock, image->name, image->rid, SI_SERVER_FLAGS ) ) {
+ goto image_failed;
+ }
+ // See if selecting the image succeeded ++++++++++++++++++++++++++++++
+ uint16_t protocolVersion, rid;
+ uint64_t imageSize;
+ char *name;
+ serialized_buffer_t serialized;
+ if ( !dnbd3_select_image_reply( &serialized, sock, &protocolVersion, &name, &rid, &imageSize ) ) {
+ goto image_failed;
+ }
+ if ( protocolVersion < MIN_SUPPORTED_SERVER ) { // Server version unsupported; global fail
+ goto server_failed;
+ }
+ if ( name == NULL || strcmp( name, image->name ) != 0 ) {
+ ERROR_GOTO( image_failed, "[RTT] Server offers image '%s' instead of '%s'", name, image->name );
+ }
+ if ( rid != image->rid ) {
+ ERROR_GOTO( image_failed, "[RTT] Server provides rid %d instead of %d", (int)rid, (int)image->rid );
+ }
+ if ( imageSize != image->virtualFilesize ) {
+ ERROR_GOTO( image_failed, "[RTT] Remote size: %" PRIu64 ", expected: %" PRIu64, imageSize, image->virtualFilesize );
+ }
+ // Request first block (NOT random!) ++++++++++++++++++++++++++++++
+ if ( !dnbd3_get_block( sock, 0, DNBD3_BLOCK_SIZE, 0, COND_HOPCOUNT( protocolVersion, 1 ) ) ) {
+ LOG_GOTO( image_failed, LOG_DEBUG1, "[RTT%d] Could not request first block", server );
+ }
+ // See if requesting the block succeeded ++++++++++++++++++++++
+ dnbd3_reply_t reply;
+ if ( !dnbd3_get_reply( sock, &reply ) ) {
+ LOG_GOTO( image_failed, LOG_DEBUG1, "[RTT%d] Received corrupted reply header after CMD_GET_BLOCK", server );
+ }
+ // check reply header
+ if ( reply.cmd != CMD_GET_BLOCK || reply.size != DNBD3_BLOCK_SIZE ) {
+ // Sanity check failed; count this as global error (malicious/broken server)
+ ERROR_GOTO( server_failed, "[RTT] Reply to first block request is %" PRIu32 " bytes", reply.size );
+ }
+ // flush payload to include this into measurement
+ char buffer[DNBD3_BLOCK_SIZE];
+ if ( recv( sock, buffer, DNBD3_BLOCK_SIZE, MSG_WAITALL ) != DNBD3_BLOCK_SIZE ) {
+ ERROR_GOTO( image_failed, "[RTT%d] Could not read first block payload", server );
+ }
+ clock_gettime( BEST_CLOCK_SOURCE, &end );
+ // Measurement done - everything fine so far
+ mutex_lock( &uplink->rttLock );
+ const bool isCurrent = ( uplink->current.index == server );
+ mutex_unlock( &uplink->rttLock );
+ // Penaltize rtt if this was a cycle; this will treat this server with lower priority
+ // in the near future too, so we prevent alternating between two servers that are both
+ // part of a cycle and have the lowest latency.
+ uint32_t rtt = (uint32_t)((end.tv_sec - start.tv_sec) * 1000000
+ + (end.tv_nsec - start.tv_nsec) / 1000); // µs
+ uint32_t avg = altservers_updateRtt( uplink, server, rtt );
+ // If a cycle was detected, or we lost connection to the current (last) server, penaltize it one time
+ if ( ( uplink->cycleDetected || panic ) && isCurrent ) {
+ avg = (avg * 2) + 50000;
+ }
+ if ( !panic && isCurrent ) {
+ // Was measuring current server
+ currentRtt = avg;
+ close( sock );
+ } else if ( avg < bestRtt ) {
+ // Was another server, update "best"
+ if ( best.fd != -1 ) {
+ close( best.fd );
}
- image_release( image );
- // end of loop over all pending uplinks
- mutex_lock( &pendingLockWrite );
- pending[itLink] = NULL;
- mutex_unlock( &pendingLockWrite );
- mutex_unlock( &pendingLockConsume );
+ best.fd = sock;
+ bestRtt = avg;
+ best.index = server;
+ best.version = protocolVersion;
+ } else {
+ // Was too slow, ignore
+ close( sock );
+ }
+ // We're done, call continue
+ continue;
+ // Jump here if anything went wrong
+ // This will cleanup and continue
+image_failed:
+ altservers_imageFailed( uplink, server );
+ goto failed;
+server_failed:
+ altservers_serverFailed( server );
+failed:
+ close( sock );
+ }
+ // Done testing all servers. See if we should switch
+ if ( best.fd != -1 && (panic || (bestRtt < 10000000 && RTT_THRESHOLD_FACTOR(currentRtt) > bestRtt)) ) {
+ // yep
+ if ( currentRtt > 10000000 || panic ) {
+ LOG( LOG_DEBUG1, "Change - best: %luµs, current: -", bestRtt );
+ } else {
+ LOG( LOG_DEBUG1, "Change - best: %luµs, current: %luµs", bestRtt, currentRtt );
+ }
+ sock_setTimeout( best.fd, _uplinkTimeout );
+ mutex_lock( &uplink->rttLock );
+ uplink->better = best;
+ uplink->rttTestResult = RTT_DOCHANGE;
+ mutex_unlock( &uplink->rttLock );
+ signal_call( uplink->signal );
+ } else if ( best.fd == -1 && currentRtt == RTT_UNREACHABLE ) {
+ // No server was reachable, including current
+ uplink->rttTestResult = RTT_NOT_REACHABLE;
+ } else {
+ // nope
+ if ( best.fd != -1 ) {
+ close( best.fd );
}
- // Save cache maps of all images if applicable
- declare_now;
- // TODO: Has nothing to do with alt servers really, maybe move somewhere else?
- if ( _closeUnusedFd && timing_reached( &nextCloseUnusedFd, &now ) ) {
- timing_gets( &nextCloseUnusedFd, 900 );
- image_closeUnusedFd();
+ if ( !image->working || uplink->cycleDetected ) {
+ image->working = true;
+ LOG( LOG_DEBUG1, "[RTT] No better alt server found, enabling '%s:%d' again... :-(", image->name, (int)image->rid );
}
+ uplink->cycleDetected = false; // It's a lie, but prevents rtt measurement triggering again right away
+ mutex_lock( &uplink->rttLock );
+ uplink->rttTestResult = RTT_DONTCHANGE;
+ mutex_unlock( &uplink->rttLock );
}
- cleanup: ;
- if ( runSignal != NULL ) signal_close( runSignal );
- runSignal = NULL;
- return NULL ;
+ image_release( image );
}
diff --git a/src/server/altservers.h b/src/server/altservers.h
index 7b7b46d..8e2b964 100644
--- a/src/server/altservers.h
+++ b/src/server/altservers.h
@@ -7,23 +7,27 @@ struct json_t;
void altservers_init();
-void altservers_shutdown();
-
int altservers_load();
bool altservers_add(dnbd3_host_t *host, const char *comment, const int isPrivate, const int isClientOnly);
-void altservers_findUplink(dnbd3_connection_t *uplink);
+void altservers_findUplinkAsync(dnbd3_uplink_t *uplink);
-void altservers_removeUplink(dnbd3_connection_t *uplink);
+void altservers_findUplink(dnbd3_uplink_t *uplink);
int altservers_getListForClient(dnbd3_host_t *host, dnbd3_server_entry_t *output, int size);
-int altservers_getListForUplink(dnbd3_host_t *output, int size, int emergency);
+int altservers_getHostListForReplication(dnbd3_host_t *servers, int size);
+
+bool altservers_toString(int server, char *buffer, size_t len);
int altservers_netCloseness(dnbd3_host_t *host1, dnbd3_host_t *host2);
-void altservers_serverFailed(const dnbd3_host_t * const host);
+void altservers_serverFailed(int server);
+
+int altservers_hostToIndex(dnbd3_host_t *host);
+
+const dnbd3_host_t* altservers_indexToHost(int server);
struct json_t* altservers_toJson();
diff --git a/src/server/globals.c b/src/server/globals.c
index 69e8a6e..46c1030 100644
--- a/src/server/globals.c
+++ b/src/server/globals.c
@@ -112,7 +112,7 @@ void globals_loadConfig()
asprintf( &name, "%s/%s", _configDir, CONFIG_FILENAME );
if ( name == NULL ) return;
if ( initialLoad ) {
- mutex_init( &loadLock );
+ mutex_init( &loadLock, LOCK_LOAD_CONFIG );
}
if ( mutex_trylock( &loadLock ) != 0 ) {
logadd( LOG_INFO, "Ignoring config reload request due to already running reload" );
diff --git a/src/server/globals.h b/src/server/globals.h
index b248800..f940666 100644
--- a/src/server/globals.h
+++ b/src/server/globals.h
@@ -8,27 +8,16 @@
#include <stdatomic.h>
#include <time.h>
#include <pthread.h>
+#include "reftypes.h"
typedef struct timespec ticks;
// ######### All structs/types used by the server ########
-typedef struct _dnbd3_connection dnbd3_connection_t;
+typedef struct _dnbd3_uplink dnbd3_uplink_t;
typedef struct _dnbd3_image dnbd3_image_t;
typedef struct _dnbd3_client dnbd3_client_t;
-// Slot is free, can be used.
-// Must only be set in uplink_handle_receive() or uplink_remove_client()
-#define ULR_FREE 0
-// Slot has been filled with a request that hasn't been sent to the upstream server yet, matching request can safely rely on reuse.
-// Must only be set in uplink_request()
-#define ULR_NEW 1
-// Slot is occupied, reply has not yet been received, matching request can safely rely on reuse.
-// Must only be set in uplink_mainloop() or uplink_request()
-#define ULR_PENDING 2
-// Slot is being processed, do not consider for hop on.
-// Must only be set in uplink_handle_receive()
-#define ULR_PROCESSING 3
typedef struct
{
uint64_t handle; // Client defined handle to pass back in reply
@@ -42,54 +31,68 @@ typedef struct
uint8_t hopCount; // How many hops this request has already taken across proxies
} dnbd3_queued_request_t;
+typedef struct
+{
+ int fails; // Hard fail: Connection failed
+ int rttIndex;
+ uint32_t rtt[SERVER_RTT_PROBES];
+ bool isPrivate, isClientOnly;
+ bool blocked; // If true count down fails until 0 to enable again
+ ticks lastFail; // Last hard fail
+ dnbd3_host_t host;
+ char comment[COMMENT_LENGTH];
+} dnbd3_alt_server_t;
+
+typedef struct
+{
+ int fails; // Soft fail: Image not found
+ int rttIndex;
+ uint32_t rtt[SERVER_RTT_PROBES];
+ bool blocked; // True if server is to be ignored and fails should be counted down
+ bool initDone;
+} dnbd3_alt_local_t;
+
+typedef struct {
+ int fd; // Socket fd for this connection
+ int version; // Protocol version of remote server
+ int index; // Entry in uplinks list
+} dnbd3_server_connection_t;
+
#define RTT_IDLE 0 // Not in progress
#define RTT_INPROGRESS 1 // In progess, not finished
#define RTT_DONTCHANGE 2 // Finished, but no better alternative found
#define RTT_DOCHANGE 3 // Finished, better alternative written to .betterServer + .betterFd
#define RTT_NOT_REACHABLE 4 // No uplink was reachable
-struct _dnbd3_connection
+struct _dnbd3_uplink
{
- int fd; // socket fd to remote server
- int version; // remote server protocol version
+ ref reference;
+ dnbd3_server_connection_t current; // Currently active connection; fd == -1 means disconnected
+ dnbd3_server_connection_t better; // Better connection as found by altserver worker; fd == -1 means none
dnbd3_signal_t* signal; // used to wake up the process
pthread_t thread; // thread holding the connection
pthread_mutex_t sendMutex; // For locking socket while sending
pthread_mutex_t queueLock; // lock for synchronization on request queue etc.
dnbd3_image_t *image; // image that this uplink is used for; do not call get/release for this pointer
- dnbd3_host_t currentServer; // Current server we're connected to
pthread_mutex_t rttLock; // When accessing rttTestResult, betterFd or betterServer
- int rttTestResult; // RTT_*
+ atomic_int rttTestResult; // RTT_*
int cacheFd; // used to write to the image, in case it is relayed. ONLY USE FROM UPLINK THREAD!
- int betterVersion; // protocol version of better server
- int betterFd; // Active connection to better server, ready to use
- dnbd3_host_t betterServer; // The better server
uint8_t *recvBuffer; // Buffer for receiving payload
uint32_t recvBufferLen; // Len of ^^
- volatile bool shutdown; // signal this thread to stop, must only be set from uplink_shutdown() or cleanup in uplink_mainloop()
+ atomic_bool shutdown; // signal this thread to stop, must only be set from uplink_shutdown() or cleanup in uplink_mainloop()
bool replicatedLastBlock; // bool telling if the last block has been replicated yet
bool cycleDetected; // connection cycle between proxies detected for current remote server
int nextReplicationIndex; // Which index in the cache map we should start looking for incomplete blocks at
// If BGR == BGR_HASHBLOCK, -1 means "currently no incomplete block"
uint64_t replicationHandle; // Handle of pending replication request
atomic_uint_fast64_t bytesReceived; // Number of bytes received by the uplink since startup.
- int queueLen; // length of queue
+ atomic_int queueLen; // length of queue
uint32_t idleTime; // How many seconds the uplink was idle (apart from keep-alives)
dnbd3_queued_request_t queue[SERVER_MAX_UPLINK_QUEUE];
+ dnbd3_alt_local_t altData[SERVER_MAX_ALTS];
};
typedef struct
{
- char comment[COMMENT_LENGTH];
- dnbd3_host_t host;
- unsigned int rtt[SERVER_RTT_PROBES];
- unsigned int rttIndex;
- bool isPrivate, isClientOnly;
- ticks lastFail;
- int numFails;
-} dnbd3_alt_server_t;
-
-typedef struct
-{
uint8_t host[16];
int bytes;
int bitMask;
@@ -106,7 +109,7 @@ struct _dnbd3_image
{
char *path; // absolute path of the image
char *name; // public name of the image (usually relative path minus revision ID)
- dnbd3_connection_t *uplink; // pointer to a server connection
+ weakref uplinkref; // pointer to a server connection
uint8_t *cache_map; // cache map telling which parts are locally cached, NULL if complete
uint64_t virtualFilesize; // virtual size of image (real size rounded up to multiple of 4k)
uint64_t realFilesize; // actual file size on disk
@@ -117,9 +120,9 @@ struct _dnbd3_image
uint32_t masterCrc32; // CRC-32 of the crc-32 list
int readFd; // used to read the image. Used from multiple threads, so use atomic operations (pread et al)
int completenessEstimate; // Completeness estimate in percent
- int users; // clients currently using this image
+ atomic_int users; // clients currently using this image. XXX Lock on imageListLock when modifying and checking whether the image should be freed. Reading it elsewhere is fine without the lock.
int id; // Unique ID of this image. Only unique in the context of this running instance of DNBD3-Server
- bool working; // true if image exists and completeness is == 100% or a working upstream proxy is connected
+ atomic_bool working; // true if image exists and completeness is == 100% or a working upstream proxy is connected
uint16_t rid; // revision of image
pthread_mutex_t lock;
};
@@ -128,13 +131,14 @@ struct _dnbd3_client
{
#define HOSTNAMELEN (48)
atomic_uint_fast64_t bytesSent; // Byte counter for this client.
- dnbd3_image_t *image; // Image in use by this client, or NULL during handshake
+ dnbd3_image_t * _Atomic image; // Image in use by this client, or NULL during handshake
int sock;
bool isServer; // true if a server in proxy mode, false if real client
dnbd3_host_t host;
char hostName[HOSTNAMELEN]; // inet_ntop version of host
pthread_mutex_t sendMutex; // Held while writing to sock if image is incomplete (since uplink uses socket too)
pthread_mutex_t lock;
+ pthread_t thread;
};
// #######################################################
diff --git a/src/server/image.c b/src/server/image.c
index bfba6cb..248c12c 100644
--- a/src/server/image.c
+++ b/src/server/image.c
@@ -8,6 +8,7 @@
#include "../shared/protocol.h"
#include "../shared/timing.h"
#include "../shared/crc32.h"
+#include "reference.h"
#include <assert.h>
#include <fcntl.h>
@@ -53,15 +54,17 @@ static bool image_ensureDiskSpace(uint64_t size, bool force);
static uint8_t* image_loadCacheMap(const char * const imagePath, const int64_t fileSize);
static uint32_t* image_loadCrcList(const char * const imagePath, const int64_t fileSize, uint32_t *masterCrc);
static bool image_checkRandomBlocks(const int count, int fdImage, const int64_t fileSize, uint32_t * const crc32list, uint8_t * const cache_map);
+static void* closeUnusedFds(void*);
// ##########################################
void image_serverStartup()
{
srand( (unsigned int)time( NULL ) );
- mutex_init( &imageListLock );
- mutex_init( &remoteCloneLock );
- mutex_init( &reloadLock );
+ mutex_init( &imageListLock, LOCK_IMAGE_LIST );
+ mutex_init( &remoteCloneLock, LOCK_REMOTE_CLONE );
+ mutex_init( &reloadLock, LOCK_RELOAD );
+ server_addJob( &closeUnusedFds, NULL, 10, 900 );
}
/**
@@ -267,14 +270,12 @@ dnbd3_image_t* image_get(char *name, uint16_t revision, bool checkIfWorking)
return NULL ;
}
- mutex_lock( &candidate->lock );
- mutex_unlock( &imageListLock );
candidate->users++;
- mutex_unlock( &candidate->lock );
+ mutex_unlock( &imageListLock );
// Found, see if it works
-// TODO: Also make sure a non-working image still has old fd open but created a new one and removed itself from the list
-// TODO: But remember size-changed images forever
+ // TODO: Also make sure a non-working image still has old fd open but created a new one and removed itself from the list
+ // TODO: But remember size-changed images forever
if ( candidate->working || checkIfWorking ) {
// Is marked working, but might not have an fd open
if ( !image_ensureOpen( candidate ) ) {
@@ -349,7 +350,7 @@ dnbd3_image_t* image_get(char *name, uint16_t revision, bool checkIfWorking)
img->rid = candidate->rid;
img->users = 1;
img->working = false;
- mutex_init( &img->lock );
+ mutex_init( &img->lock, LOCK_IMAGE );
if ( candidate->crc32 != NULL ) {
const size_t mb = IMGSIZE_TO_HASHBLOCKS( candidate->virtualFilesize ) * sizeof(uint32_t);
img->crc32 = malloc( mb );
@@ -377,9 +378,7 @@ dnbd3_image_t* image_get(char *name, uint16_t revision, bool checkIfWorking)
// Check if image is incomplete, handle
if ( candidate->cache_map != NULL ) {
- if ( candidate->uplink == NULL ) {
- uplink_init( candidate, -1, NULL, -1 );
- }
+ uplink_init( candidate, -1, NULL, -1 );
}
return candidate; // We did all we can, hopefully it's working
@@ -391,17 +390,15 @@ dnbd3_image_t* image_get(char *name, uint16_t revision, bool checkIfWorking)
* Every call to image_lock() needs to be followed by a call to image_release() at some point.
* Locks on: imageListLock, _images[].lock
*/
-dnbd3_image_t* image_lock(dnbd3_image_t *image) // TODO: get rid, fix places that do image->users--
+dnbd3_image_t* image_lock(dnbd3_image_t *image)
{
if ( image == NULL ) return NULL ;
int i;
mutex_lock( &imageListLock );
for (i = 0; i < _num_images; ++i) {
if ( _images[i] == image ) {
- mutex_lock( &image->lock );
- mutex_unlock( &imageListLock );
image->users++;
- mutex_unlock( &image->lock );
+ mutex_unlock( &imageListLock );
return image;
}
}
@@ -419,12 +416,9 @@ dnbd3_image_t* image_release(dnbd3_image_t *image)
{
if ( image == NULL ) return NULL;
mutex_lock( &imageListLock );
- mutex_lock( &image->lock );
assert( image->users > 0 );
- image->users--;
- bool inUse = image->users != 0;
- mutex_unlock( &image->lock );
- if ( inUse ) { // Still in use, do nothing
+ // Decrement and check for 0
+ if ( --image->users != 0 ) { // Still in use, do nothing
mutex_unlock( &imageListLock );
return NULL;
}
@@ -439,7 +433,7 @@ dnbd3_image_t* image_release(dnbd3_image_t *image)
}
mutex_unlock( &imageListLock );
// So it wasn't in the images list anymore either, get rid of it
- if ( !inUse ) image = image_free( image );
+ image = image_free( image );
return NULL;
}
@@ -470,7 +464,6 @@ static dnbd3_image_t* image_remove(dnbd3_image_t *image)
{
bool mustFree = false;
mutex_lock( &imageListLock );
- mutex_lock( &image->lock );
for ( int i = _num_images - 1; i >= 0; --i ) {
if ( _images[i] == image ) {
_images[i] = NULL;
@@ -478,7 +471,6 @@ static dnbd3_image_t* image_remove(dnbd3_image_t *image)
}
if ( _images[i] == NULL && i + 1 == _num_images ) _num_images--;
}
- mutex_unlock( &image->lock );
mutex_unlock( &imageListLock );
if ( mustFree ) image = image_free( image );
return image;
@@ -493,17 +485,7 @@ void image_killUplinks()
mutex_lock( &imageListLock );
for (i = 0; i < _num_images; ++i) {
if ( _images[i] == NULL ) continue;
- mutex_lock( &_images[i]->lock );
- if ( _images[i]->uplink != NULL ) {
- mutex_lock( &_images[i]->uplink->queueLock );
- if ( !_images[i]->uplink->shutdown ) {
- thread_detach( _images[i]->uplink->thread );
- _images[i]->uplink->shutdown = true;
- }
- mutex_unlock( &_images[i]->uplink->queueLock );
- signal_call( _images[i]->uplink->signal );
- }
- mutex_unlock( &_images[i]->lock );
+ uplink_shutdown( _images[i] );
}
mutex_unlock( &imageListLock );
}
@@ -542,18 +524,14 @@ bool image_loadAll(char *path)
// Lock again, see if image is still there, free if required
mutex_lock( &imageListLock );
if ( ret || i >= _num_images || _images[i] == NULL || _images[i]->id != imgId ) continue;
- // Image needs to be removed
+ // File not readable but still in list -- needs to be removed
imgHandle = _images[i];
_images[i] = NULL;
if ( i + 1 == _num_images ) _num_images--;
- mutex_lock( &imgHandle->lock );
- const bool freeImg = ( imgHandle->users == 0 );
- mutex_unlock( &imgHandle->lock );
- // We unlocked, but the image has been removed from the list already, so
- // there's no way the users-counter can increase at this point.
- if ( freeImg ) {
+ if ( imgHandle->users == 0 ) {
// Image is not in use anymore, free the dangling entry immediately
- mutex_unlock( &imageListLock ); // image_free might do several fs operations; unlock
+ mutex_unlock( &imageListLock ); // image_free locks on this, and
+ // might do several fs operations; unlock
image_free( imgHandle );
mutex_lock( &imageListLock );
}
@@ -581,12 +559,10 @@ bool image_tryFreeAll()
{
mutex_lock( &imageListLock );
for (int i = _num_images - 1; i >= 0; --i) {
- if ( _images[i] != NULL && _images[i]->users == 0 ) { // XXX Data race...
+ if ( _images[i] != NULL && _images[i]->users == 0 ) {
dnbd3_image_t *image = _images[i];
_images[i] = NULL;
- mutex_unlock( &imageListLock );
image = image_free( image );
- mutex_lock( &imageListLock );
}
if ( i + 1 == _num_images && _images[i] == NULL ) _num_images--;
}
@@ -596,16 +572,18 @@ bool image_tryFreeAll()
/**
* Free image. DOES NOT check if it's in use.
- * Indirectly locks on imageListLock, image.lock, uplink.queueLock
+ * (Indirectly) locks on image.lock, uplink.queueLock
*/
static dnbd3_image_t* image_free(dnbd3_image_t *image)
{
assert( image != NULL );
- if ( !_shutdown ) {
- logadd( LOG_INFO, "Freeing image %s:%d", image->name, (int)image->rid );
- }
- //
- uplink_shutdown( image );
+ assert( image->users == 0 );
+ logadd( ( _shutdown ? LOG_DEBUG1 : LOG_INFO ), "Freeing image %s:%d", image->name, (int)image->rid );
+ // uplink_shutdown might return false to tell us
+ // that the shutdown is in progress. Bail out since
+ // this will get called again when the uplink is done.
+ if ( !uplink_shutdown( image ) )
+ return NULL;
mutex_lock( &image->lock );
free( image->cache_map );
free( image->crc32 );
@@ -618,8 +596,6 @@ static dnbd3_image_t* image_free(dnbd3_image_t *image)
mutex_unlock( &image->lock );
if ( image->readFd != -1 ) close( image->readFd );
mutex_destroy( &image->lock );
- //
- memset( image, 0, sizeof(*image) );
free( image );
return NULL ;
}
@@ -873,7 +849,7 @@ static bool image_load(char *base, char *path, int withUplink)
image->cache_map = cache_map;
image->crc32 = crc32list;
image->masterCrc32 = masterCrc;
- image->uplink = NULL;
+ image->uplinkref = NULL;
image->realFilesize = realFilesize;
image->virtualFilesize = virtualFilesize;
image->rid = (uint16_t)revision;
@@ -882,7 +858,7 @@ static bool image_load(char *base, char *path, int withUplink)
image->working = (image->cache_map == NULL );
timing_get( &image->nextCompletenessEstimate );
image->completenessEstimate = -1;
- mutex_init( &image->lock );
+ mutex_init( &image->lock, LOCK_IMAGE );
int32_t offset;
if ( stat( path, &st ) == 0 ) {
// Negatively offset atime by file modification time
@@ -1191,7 +1167,7 @@ static dnbd3_image_t *loadImageProxy(char * const name, const uint16_t revision,
dnbd3_host_t servers[REP_NUM_SRV];
int uplinkSock = -1;
dnbd3_host_t uplinkServer;
- const int count = altservers_getListForUplink( servers, REP_NUM_SRV, false );
+ const int count = altservers_getHostListForReplication( servers, REP_NUM_SRV );
uint16_t remoteProtocolVersion;
uint16_t remoteRid = revision;
uint64_t remoteImageSize;
@@ -1504,9 +1480,9 @@ json_t* image_getListAsJson()
json_t *imagesJson = json_array();
json_t *jsonImage;
int i;
- char uplinkName[100] = { 0 };
+ char uplinkName[100];
uint64_t bytesReceived;
- int users, completeness, idleTime;
+ int completeness, idleTime;
declare_now;
mutex_lock( &imageListLock );
@@ -1514,27 +1490,26 @@ json_t* image_getListAsJson()
if ( _images[i] == NULL ) continue;
dnbd3_image_t *image = _images[i];
mutex_lock( &image->lock );
- mutex_unlock( &imageListLock );
- users = image->users;
idleTime = (int)timing_diff( &image->atime, &now );
completeness = image_getCompletenessEstimate( image );
- if ( image->uplink == NULL ) {
+ mutex_unlock( &image->lock );
+ dnbd3_uplink_t *uplink = ref_get_uplink( &image->uplinkref );
+ if ( uplink == NULL ) {
bytesReceived = 0;
uplinkName[0] = '\0';
} else {
- bytesReceived = image->uplink->bytesReceived;
- if ( image->uplink->fd == -1 || !host_to_string( &image->uplink->currentServer, uplinkName, sizeof(uplinkName) ) ) {
+ bytesReceived = uplink->bytesReceived;
+ if ( !uplink_getHostString( uplink, uplinkName, sizeof(uplinkName) ) ) {
uplinkName[0] = '\0';
}
+ ref_put( &uplink->reference );
}
- image->users++; // Prevent freeing after we unlock
- mutex_unlock( &image->lock );
jsonImage = json_pack( "{sisssisisisisI}",
"id", image->id, // id, name, rid never change, so access them without locking
"name", image->name,
"rid", (int) image->rid,
- "users", users,
+ "users", image->users,
"complete", completeness,
"idle", idleTime,
"size", (json_int_t)image->virtualFilesize );
@@ -1546,8 +1521,6 @@ json_t* image_getListAsJson()
}
json_array_append_new( imagesJson, jsonImage );
- image = image_release( image ); // Since we did image->users++;
- mutex_lock( &imageListLock );
}
mutex_unlock( &imageListLock );
return imagesJson;
@@ -1669,7 +1642,7 @@ bool image_ensureDiskSpaceLocked(uint64_t size, bool force)
* TODO: Store last access time of images. Currently the
* last access time is reset to the file modification time
* on server restart. Thus it will
- * currently only delete images if server uptime is > 10 hours.
+ * currently only delete images if server uptime is > 24 hours.
* This can be overridden by setting force to true, in case
* free space is desperately needed.
* Return true iff enough space is available. false in random other cases
@@ -1693,34 +1666,39 @@ static bool image_ensureDiskSpace(uint64_t size, bool force)
(int)(size / (1024 * 1024)) );
// Find least recently used image
dnbd3_image_t *oldest = NULL;
- int i; // XXX improve locking
+ int i;
+ mutex_lock( &imageListLock );
for (i = 0; i < _num_images; ++i) {
- if ( _images[i] == NULL ) continue;
- dnbd3_image_t *current = image_lock( _images[i] );
+ dnbd3_image_t *current = _images[i];
if ( current == NULL ) continue;
- if ( current->users == 1 ) { // Just from the lock above
+ if ( current->users == 0 ) { // Not in use :-)
if ( oldest == NULL || timing_1le2( &current->atime, &oldest->atime ) ) {
// Oldest access time so far
oldest = current;
}
}
- current = image_release( current );
+ }
+ if ( oldest != NULL ) {
+ oldest->users++;
+ }
+ mutex_unlock( &imageListLock );
+ if ( oldest == NULL ) {
+ logadd( LOG_INFO, "All images are currently in use :-(" );
+ return false;
}
declare_now;
- if ( oldest == NULL || ( !_sparseFiles && timing_diff( &oldest->atime, &now ) < 86400 ) ) {
- if ( oldest == NULL ) {
- logadd( LOG_INFO, "All images are currently in use :-(" );
- } else {
- logadd( LOG_INFO, "Won't free any image, all have been in use in the past 24 hours :-(" );
- }
+ if ( !_sparseFiles && timing_diff( &oldest->atime, &now ) < 86400 ) {
+ logadd( LOG_INFO, "Won't free any image, all have been in use in the past 24 hours :-(" );
+ image_release( oldest ); // We did users++ above; image might have to be freed entirely
return false;
}
- oldest = image_lock( oldest );
- if ( oldest == NULL ) continue; // Image freed in the meantime? Try again
logadd( LOG_INFO, "'%s:%d' has to go!", oldest->name, (int)oldest->rid );
- char *filename = strdup( oldest->path );
- oldest = image_remove( oldest );
- oldest = image_release( oldest );
+ char *filename = strdup( oldest->path ); // Copy name as we remove the image first
+ oldest = image_remove( oldest ); // Remove from list first...
+ oldest = image_release( oldest ); // Decrease users counter; if it falls to 0, image will be freed
+ // Technically the image might have been grabbed again, but chances for
+ // this should be close to zero anyways since the image went unused for more than 24 hours..
+ // Proper fix would be a "delete" flag in the image struct that will be checked in image_free
unlink( filename );
size_t len = strlen( filename ) + 10;
char buffer[len];
@@ -1735,62 +1713,32 @@ static bool image_ensureDiskSpace(uint64_t size, bool force)
return false;
}
-void image_closeUnusedFd()
+#define FDCOUNT (400)
+static void* closeUnusedFds(void* nix UNUSED)
{
- int fd, i;
+ if ( !_closeUnusedFd )
+ return NULL;
ticks deadline;
timing_gets( &deadline, -UNUSED_FD_TIMEOUT );
- char imgstr[300];
+ int fds[FDCOUNT];
+ int fdindex = 0;
mutex_lock( &imageListLock );
- for (i = 0; i < _num_images; ++i) {
+ for ( int i = 0; i < _num_images; ++i ) {
dnbd3_image_t * const image = _images[i];
if ( image == NULL )
continue;
- mutex_lock( &image->lock );
- mutex_unlock( &imageListLock );
- if ( image->users == 0 && image->uplink == NULL && timing_reached( &image->atime, &deadline ) ) {
- snprintf( imgstr, sizeof(imgstr), "%s:%d", image->name, (int)image->rid );
- fd = image->readFd;
- image->readFd = -1;
- } else {
- fd = -1;
- }
- mutex_unlock( &image->lock );
- if ( fd != -1 ) {
- close( fd );
- logadd( LOG_DEBUG1, "Inactive fd closed for %s", imgstr );
+ if ( image->users == 0 && image->uplinkref == NULL && timing_reached( &image->atime, &deadline ) ) {
+ logadd( LOG_DEBUG1, "Inactive fd closed for %s:%d", image->name, (int)image->rid );
+ fds[fdindex++] = image->readFd;
+ image->readFd = -1; // Not a race; image->users is 0 and to increase it you need imageListLock
+ if ( fdindex == FDCOUNT )
+ break;
}
- mutex_lock( &imageListLock );
}
mutex_unlock( &imageListLock );
+ // Do this after unlock since close might block
+ for ( int i = 0; i < fdindex; ++i ) {
+ close( fds[i] );
+ }
+ return NULL;
}
-
-/*
- void image_find_latest()
- {
- // Not in array or most recent rid is requested, try file system
- if (revision != 0) {
- // Easy case - specific RID
- char
- } else {
- // Determine base directory where the image in question has to reside.
- // Eg, the _basePath is "/srv/", requested image is "rz/ubuntu/default-13.04"
- // Then searchPath has to be set to "/srv/rz/ubuntu"
- char searchPath[strlen(_basePath) + len + 1];
- char *lastSlash = strrchr(name, '/');
- char *baseName; // Name of the image. In the example above, it will be "default-13.04"
- if ( lastSlash == NULL ) {
- *searchPath = '\0';
- baseName = name;
- } else {
- char *from = name, *to = searchPath;
- while (from < lastSlash) *to++ = *from++;
- *to = '\0';
- baseName = lastSlash + 1;
- }
- // Now we have the search path in our real file system and the expected image name.
- // The revision naming sceme is <IMAGENAME>.r<RID>, so if we're looking for revision 13,
- // our example image has to be named default-13.04.r13
- }
- }
- */
diff --git a/src/server/integrity.c b/src/server/integrity.c
index 8f17855..e7ebeb2 100644
--- a/src/server/integrity.c
+++ b/src/server/integrity.c
@@ -4,6 +4,7 @@
#include "locks.h"
#include "image.h"
#include "uplink.h"
+#include "reference.h"
#include <assert.h>
#include <sys/syscall.h>
@@ -29,7 +30,7 @@ static queue_entry checkQueue[CHECK_QUEUE_SIZE];
static pthread_mutex_t integrityQueueLock;
static pthread_cond_t queueSignal;
static int queueLen = -1;
-static volatile bool bRunning = false;
+static atomic_bool bRunning = false;
static void* integrity_main(void *data);
@@ -39,7 +40,7 @@ static void* integrity_main(void *data);
void integrity_init()
{
assert( queueLen == -1 );
- mutex_init( &integrityQueueLock );
+ mutex_init( &integrityQueueLock, LOCK_INTEGRITY_QUEUE );
pthread_cond_init( &queueSignal, NULL );
mutex_lock( &integrityQueueLock );
queueLen = 0;
@@ -183,13 +184,20 @@ static void* integrity_main(void * data UNUSED)
mutex_unlock( &image->lock );
}
#if defined(linux) || defined(__linux)
- if ( sync_file_range( fd, start, end - start, SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER ) == -1 ) {
+ while ( sync_file_range( fd, start, end - start, SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER ) == -1 )
#else
- if ( fsync( fd ) == -1 ) {
+ while ( fsync( fd ) == -1 )
#endif
- logadd( LOG_ERROR, "Cannot flush %s for integrity check", image->path );
+ {
+ if ( _shutdown )
+ break;
+ if ( errno == EINTR )
+ continue;
+ logadd( LOG_ERROR, "Cannot flush %s for integrity check (errno=%d)", image->path, errno );
exit( 1 );
}
+ if ( _shutdown )
+ break;
// Use direct I/O only if read length is multiple of 4096 to be on the safe side
int tfd;
if ( direct && ( end % DNBD3_BLOCK_SIZE ) == 0 ) {
@@ -238,11 +246,13 @@ static void* integrity_main(void * data UNUSED)
if ( i + 1 == queueLen ) queueLen--;
// Mark as working again if applicable
if ( !foundCorrupted ) {
- mutex_lock( &image->lock );
- if ( image->uplink != NULL ) { // TODO: image_determineWorkingState() helper?
- image->working = image->uplink->fd != -1 && image->readFd != -1;
+ dnbd3_uplink_t *uplink = ref_get_uplink( &image->uplinkref );
+ if ( uplink != NULL ) { // TODO: image_determineWorkingState() helper?
+ mutex_lock( &image->lock );
+ image->working = uplink->current.fd != -1 && image->readFd != -1;
+ mutex_unlock( &image->lock );
+ ref_put( &uplink->reference );
}
- mutex_unlock( &image->lock );
}
} else {
// Still more blocks to go...
@@ -255,19 +265,17 @@ static void* integrity_main(void * data UNUSED)
// Something was fishy, make sure uplink exists
mutex_lock( &image->lock );
image->working = false;
- bool restart = image->uplink == NULL || image->uplink->shutdown;
mutex_unlock( &image->lock );
- if ( restart ) {
- uplink_shutdown( image );
- uplink_init( image, -1, NULL, -1 );
- }
+ uplink_init( image, -1, NULL, -1 );
}
// Release :-)
image_release( image );
}
}
mutex_unlock( &integrityQueueLock );
- if ( buffer != NULL ) free( buffer );
+ if ( buffer != NULL ) {
+ free( buffer );
+ }
bRunning = false;
return NULL;
}
diff --git a/src/server/locks.c b/src/server/locks.c
index a5b7c76..b39576b 100644
--- a/src/server/locks.c
+++ b/src/server/locks.c
@@ -12,47 +12,45 @@
#ifdef _DEBUG
#define MAXLOCKS (SERVER_MAX_CLIENTS * 2 + SERVER_MAX_ALTS + 200 + SERVER_MAX_IMAGES)
#define MAXTHREADS (SERVER_MAX_CLIENTS + 100)
+#define MAXLPT 20
#define LOCKLEN 60
typedef struct
{
- void *lock;
+ void * _Atomic lock;
ticks locktime;
- char locked;
- pthread_t thread;
+ bool _Atomic locked;
+ pthread_t _Atomic thread;
int lockId;
+ int prio;
char name[LOCKLEN];
char where[LOCKLEN];
} debug_lock_t;
typedef struct
{
- pthread_t tid;
+ pthread_t _Atomic tid;
ticks time;
char name[LOCKLEN];
char where[LOCKLEN];
-
+ debug_lock_t *locks[MAXLPT];
} debug_thread_t;
int debugThreadCount = 0;
static debug_lock_t locks[MAXLOCKS];
static debug_thread_t threads[MAXTHREADS];
-static int init_done = 0;
-static pthread_mutex_t initdestory;
+static pthread_mutex_t initdestory = PTHREAD_MUTEX_INITIALIZER;
static int lockId = 0;
-static pthread_t watchdog = 0;
-static dnbd3_signal_t* watchdogSignal = NULL;
-static void *debug_thread_watchdog(void *something);
+#define ULDE(...) do { \
+ pthread_mutex_unlock( &initdestory ); \
+ logadd( LOG_ERROR, __VA_ARGS__ ); \
+ debug_dump_lock_stats(); \
+ exit( 4 ); \
+} while(0)
-int debug_mutex_init(const char *name, const char *file, int line, pthread_mutex_t *lock)
+int debug_mutex_init(const char *name, const char *file, int line, pthread_mutex_t *lock, int priority)
{
- if ( !init_done ) {
- memset( locks, 0, MAXLOCKS * sizeof(debug_lock_t) );
- memset( threads, 0, MAXTHREADS * sizeof(debug_thread_t) );
- pthread_mutex_init( &initdestory, NULL );
- init_done = 1;
- }
int first = -1;
pthread_mutex_lock( &initdestory );
for (int i = 0; i < MAXLOCKS; ++i) {
@@ -63,20 +61,18 @@ int debug_mutex_init(const char *name, const char *file, int line, pthread_mutex
if ( first == -1 && locks[i].lock == NULL ) first = i;
}
if ( first == -1 ) {
- logadd( LOG_ERROR, "No more free debug locks (%s:%d)\n", file, line );
- pthread_mutex_unlock( &initdestory );
- debug_dump_lock_stats();
- exit( 4 );
+ ULDE( "No more free debug locks (%s:%d)\n", file, line );
}
locks[first].lock = (void*)lock;
- locks[first].locked = 0;
+ locks[first].locked = false;
+ locks[first].prio = priority;
snprintf( locks[first].name, LOCKLEN, "%s", name );
snprintf( locks[first].where, LOCKLEN, "I %s:%d", file, line );
pthread_mutex_unlock( &initdestory );
return pthread_mutex_init( lock, NULL );
}
-int debug_mutex_lock(const char *name, const char *file, int line, pthread_mutex_t *lock)
+int debug_mutex_lock(const char *name, const char *file, int line, pthread_mutex_t *lock, bool try)
{
debug_lock_t *l = NULL;
pthread_mutex_lock( &initdestory );
@@ -86,163 +82,180 @@ int debug_mutex_lock(const char *name, const char *file, int line, pthread_mutex
break;
}
}
- pthread_mutex_unlock( &initdestory );
if ( l == NULL ) {
- logadd( LOG_ERROR, "Tried to lock uninitialized lock %p (%s) at %s:%d\n", (void*)lock, name, file, line );
- debug_dump_lock_stats();
- exit( 4 );
+ ULDE( "Tried to lock uninitialized lock %p (%s) at %s:%d\n", (void*)lock, name, file, line );
}
debug_thread_t *t = NULL;
- pthread_mutex_lock( &initdestory );
+ int first = -1;
+ const pthread_t self = pthread_self();
for (int i = 0; i < MAXTHREADS; ++i) {
- if ( threads[i].tid != 0 ) continue;
- threads[i].tid = pthread_self();
- timing_get( &threads[i].time );
- snprintf( threads[i].name, LOCKLEN, "%s", name );
- snprintf( threads[i].where, LOCKLEN, "%s:%d", file, line );
- t = &threads[i];
- break;
- }
- pthread_mutex_unlock( &initdestory );
- if ( t == NULL ) {
- logadd( LOG_ERROR, "Lock sanity check: Too many waiting threads for lock %p (%s) at %s:%d\n", (void*)lock, name, file, line );
- exit( 4 );
- }
- const int retval = pthread_mutex_lock( lock );
- pthread_mutex_lock( &initdestory );
- t->tid = 0;
- pthread_mutex_unlock( &initdestory );
- if ( l->locked ) {
- logadd( LOG_ERROR, "Lock sanity check: lock %p (%s) already locked at %s:%d\n", (void*)lock, name, file, line );
- exit( 4 );
- }
- l->locked = 1;
- timing_get( &l->locktime );
- l->thread = pthread_self();
- snprintf( l->where, LOCKLEN, "L %s:%d", file, line );
- pthread_mutex_lock( &initdestory );
- l->lockId = ++lockId;
- pthread_mutex_unlock( &initdestory );
- return retval;
-}
-
-int debug_mutex_trylock(const char *name, const char *file, int line, pthread_mutex_t *lock)
-{
- debug_lock_t *l = NULL;
- pthread_mutex_lock( &initdestory );
- for (int i = 0; i < MAXLOCKS; ++i) {
- if ( locks[i].lock == lock ) {
- l = &locks[i];
+ if ( threads[i].tid == self ) {
+ t = &threads[i];
break;
}
+ if ( first == -1 && threads[i].tid == 0 ) {
+ first = i;
+ }
}
- pthread_mutex_unlock( &initdestory );
- if ( l == NULL ) {
- logadd( LOG_ERROR, "Tried to lock uninitialized lock %p (%s) at %s:%d\n", (void*)lock, name, file, line );
- debug_dump_lock_stats();
- exit( 4 );
- }
- debug_thread_t *t = NULL;
- pthread_mutex_lock( &initdestory );
- for (int i = 0; i < MAXTHREADS; ++i) {
- if ( threads[i].tid != 0 ) continue;
- threads[i].tid = pthread_self();
- timing_get( &threads[i].time );
- snprintf( threads[i].name, LOCKLEN, "%s", name );
- snprintf( threads[i].where, LOCKLEN, "%s:%d", file, line );
- t = &threads[i];
- break;
- }
- pthread_mutex_unlock( &initdestory );
+ int idx;
if ( t == NULL ) {
- logadd( LOG_ERROR, "Lock sanity check: Too many waiting threads for %p (%s) at %s:%d\n", (void*)lock, name, file, line );
- exit( 4 );
+ if ( first == -1 ) {
+ ULDE( "Lock sanity check: Too many waiting threads for lock %p (%s) at %s:%d\n", (void*)lock, name, file, line );
+ }
+ t = &threads[first];
+ timing_get( &t->time );
+ t->tid = self;
+ snprintf( t->name, LOCKLEN, "%s", name );
+ snprintf( t->where, LOCKLEN, "%s:%d", file, line );
+ memset( t->locks, 0, sizeof(t->locks) );
+ idx = 0;
+ } else {
+ // Thread already has locks, check for order violation
+ idx = -1;
+ for (int i = 0; i < MAXLPT; ++i) {
+ if ( t->locks[i] == NULL ) {
+ if ( idx == -1 ) {
+ idx = i;
+ }
+ continue;
+ }
+ if ( t->locks[i]->prio >= l->prio ) {
+ ULDE( "Lock priority violation: %s at %s:%d (%d) when already holding %s at %s (%d)",
+ name, file, line, l->prio,
+ t->locks[i]->name, t->locks[i]->where, t->locks[i]->prio );
+ }
+ if ( t->locks[i] == l ) {
+ ULDE( "Tried to recusively lock %s in the same thread. Tried at %s:%d, when already locked at %s",
+ name, file, line, t->locks[i]->name );
+ }
+ }
+ if ( idx == -1 ) {
+ ULDE( "Thread %d tried to lock more than %d locks.", (int)self, (int)MAXLPT );
+ }
}
- const int retval = pthread_mutex_trylock( lock );
- pthread_mutex_lock( &initdestory );
- t->tid = 0;
pthread_mutex_unlock( &initdestory );
+ const int retval = try ? pthread_mutex_trylock( lock ) : pthread_mutex_lock( lock );
if ( retval == 0 ) {
+ timing_get( &l->locktime );
+ l->thread = self;
+ snprintf( l->where, LOCKLEN, "L %s:%d", file, line );
+ pthread_mutex_lock( &initdestory );
if ( l->locked ) {
logadd( LOG_ERROR, "Lock sanity check: lock %p (%s) already locked at %s:%d\n", (void*)lock, name, file, line );
exit( 4 );
}
- l->locked = 1;
- timing_get( &l->locktime );
- l->thread = pthread_self();
- snprintf( l->where, LOCKLEN, "L %s:%d", file, line );
- pthread_mutex_lock( &initdestory );
+ l->locked = true;
+ t->locks[idx] = l;
l->lockId = ++lockId;
pthread_mutex_unlock( &initdestory );
+ } else if ( !try || retval != EBUSY ) {
+ logadd( LOG_ERROR, "Acquiring lock %s at %s:%d failed with error code %d", name, file, line, retval );
+ debug_dump_lock_stats();
+ exit( 4 );
}
return retval;
}
int debug_mutex_unlock(const char *name, const char *file, int line, pthread_mutex_t *lock)
{
- debug_lock_t *l = NULL;
+ debug_thread_t *t = NULL;
+ pthread_t self = pthread_self();
pthread_mutex_lock( &initdestory );
- for (int i = 0; i < MAXLOCKS; ++i) {
- if ( locks[i].lock == lock ) {
- l = &locks[i];
+ for (int i = 0; i < MAXTHREADS; ++i) {
+ if ( threads[i].tid == self ) {
+ t = &threads[i];
break;
}
}
- pthread_mutex_unlock( &initdestory );
- if ( l == NULL ) {
- logadd( LOG_ERROR, "Tried to unlock uninitialized lock %p (%s) at %s:%d\n", (void*)lock, name, file, line );
- exit( 4 );
+ if ( t == NULL ) {
+ ULDE( "Unlock called from unknown thread for %s at %s:%d", name, file, line );
}
- if ( !l->locked ) {
- logadd( LOG_ERROR, "Unlock sanity check: lock %p (%s) not locked at %s:%d\n", (void*)lock, name, file, line );
- exit( 4 );
+ int idx = -1;
+ int cnt = 0;
+ for (int i = 0; i < MAXLPT; ++i) {
+ if ( t->locks[i] == NULL )
+ continue;
+ cnt++;
+ if ( t->locks[i]->lock == lock ) {
+ idx = i;
+ }
+ }
+ if ( idx == -1 ) {
+ ULDE( "Unlock: Calling thread doesn't hold lock %s at %s:%d", name, file, line );
}
- l->locked = 0;
+ debug_lock_t *l = t->locks[idx];
+ if ( l->thread != self || !l->locked ) {
+ ULDE( "Unlock sanity check for lock debugger failed! Lock %s is assigned to calling thread, but lock's meta data doesn't match up at %s:%d", name, file, line );
+ }
+ l->locked = false;
l->thread = 0;
+ t->locks[idx] = NULL;
+ if ( cnt == 1 ) {
+ t->tid = 0; // No more locks held, free up slot
+ }
snprintf( l->where, LOCKLEN, "U %s:%d", file, line );
- int retval = pthread_mutex_unlock( lock );
+ pthread_mutex_unlock( &initdestory );
+ const int retval = pthread_mutex_unlock( lock );
+ if ( retval != 0 ) {
+ logadd( LOG_ERROR, "pthread_mutex_unlock returned %d for %s at %s:%d", retval, name, file, line );
+ exit( 4 );
+ }
return retval;
}
int debug_mutex_cond_wait(const char *name, const char *file, int line, pthread_cond_t *restrict cond, pthread_mutex_t *restrict lock)
{
debug_lock_t *l = NULL;
+ debug_thread_t *t = NULL;
+ pthread_t self = pthread_self();
pthread_mutex_lock( &initdestory );
- for (int i = 0; i < MAXLOCKS; ++i) {
- if ( locks[i].lock == lock ) {
- l = &locks[i];
+ for (int i = 0; i < MAXTHREADS; ++i) {
+ if ( threads[i].tid == self ) {
+ t = &threads[i];
break;
}
}
- pthread_mutex_unlock( &initdestory );
+ if ( t == NULL ) {
+ ULDE( "Unlock called from unknown thread for %s at %s:%d", name, file, line );
+ }
+ int mp = 0, mpi = -1;
+ for (int i = 0; i < MAXLPT; ++i) {
+ if ( t->locks[i] == NULL )
+ continue;
+ if ( t->locks[i]->lock == lock ) {
+ l = t->locks[i];
+ } else if ( t->locks[i]->prio > mp ) {
+ mp = t->locks[i]->prio;
+ mpi = i;
+ }
+ }
if ( l == NULL ) {
- logadd( LOG_ERROR, "Tried to cond_wait on uninitialized lock %p (%s) at %s:%d\n", (void*)lock, name, file, line );
- exit( 4 );
+ ULDE( "cond_wait: Calling thread doesn't hold lock %s at %s:%d", name, file, line );
}
- if ( !l->locked ) {
- logadd( LOG_ERROR, "Cond_wait sanity check: lock %p (%s) not locked at %s:%d\n", (void*)lock, name, file, line );
- exit( 4 );
+ if ( l->thread != self || !l->locked ) {
+ ULDE( "cond_wait: Sanity check for lock debugger failed! Lock %s is assigned to calling thread, but lock's meta data doesn't match up at %s:%d", name, file, line );
}
- pthread_t self = pthread_self();
- if ( l->thread != self ) {
- logadd( LOG_ERROR, "Cond_wait called from non-owning thread for %p (%s) at %s:%d\n", (void*)lock, name, file, line );
- exit( 4 );
+ if ( mp >= l->prio ) {
+ ULDE( "cond_wait: Yielding a mutex while holding another one with higher prio: %s at %s:%d (%d) while also holding %s at %s (%d)",
+ name, file, line, l->prio,
+ t->locks[mpi]->name, t->locks[mpi]->where, mp );
}
- l->locked = 0;
+ l->locked = false;
l->thread = 0;
- snprintf( l->where, LOCKLEN, "CW %s:%d", file, line );
+ snprintf( l->where, LOCKLEN, "CWU %s:%d", file, line );
+ pthread_mutex_unlock( &initdestory );
int retval = pthread_cond_wait( cond, lock );
if ( retval != 0 ) {
logadd( LOG_ERROR, "pthread_cond_wait returned %d for lock %p (%s) at %s:%d\n", retval, (void*)lock, name, file, line );
exit( 4 );
}
- if ( l->locked != 0 || l->thread != 0 ) {
+ if ( l->locked || l->thread != 0 ) {
logadd( LOG_ERROR, "Lock is not free after returning from pthread_cond_wait for %p (%s) at %s:%d\n", (void*)lock, name, file, line );
exit( 4 );
}
- l->locked = 1;
l->thread = self;
timing_get( &l->locktime );
+ l->locked = true;
pthread_mutex_lock( &initdestory );
l->lockId = ++lockId;
pthread_mutex_unlock( &initdestory );
@@ -256,6 +269,7 @@ int debug_mutex_destroy(const char *name, const char *file, int line, pthread_mu
if ( locks[i].lock == lock ) {
if ( locks[i].locked ) {
logadd( LOG_ERROR, "Tried to destroy lock %p (%s) at %s:%d when it is still locked\n", (void*)lock, name, file, line );
+ logadd( LOG_ERROR, "Currently locked by: %s", locks[i].where );
exit( 4 );
}
locks[i].lock = NULL;
@@ -289,63 +303,21 @@ void debug_dump_lock_stats()
"* Locked: %d\n", locks[i].name, locks[i].where, (int)locks[i].locked );
}
}
- printf( "\n **** WAITING THREADS ****\n\n" );
+ printf( "\n **** ACTIVE THREADS ****\n\n" );
for (int i = 0; i < MAXTHREADS; ++i) {
- if ( threads[i].tid == 0 ) continue;
+ if ( threads[i].tid == 0 )
+ continue;
printf( "* *** Thread %d ***\n"
"* Lock: %s\n"
"* Where: %s\n"
"* How long: %d secs\n", (int)threads[i].tid, threads[i].name, threads[i].where, (int)timing_diff( &threads[i].time, &now ) );
- }
- pthread_mutex_unlock( &initdestory );
-}
-
-static void *debug_thread_watchdog(void *something UNUSED)
-{
- setThreadName( "debug-watchdog" );
- while ( !_shutdown ) {
- if ( init_done ) {
- declare_now;
- pthread_mutex_lock( &initdestory );
- for (int i = 0; i < MAXTHREADS; ++i) {
- if ( threads[i].tid == 0 ) continue;
- const uint32_t diff = timing_diff( &threads[i].time, &now );
- if ( diff > 6 && diff < 100000 ) {
- printf( "\n\n +++++++++ DEADLOCK ++++++++++++\n\n" );
- pthread_mutex_unlock( &initdestory );
- debug_dump_lock_stats();
- exit( 99 );
- }
- }
- pthread_mutex_unlock( &initdestory );
+ for (int j = 0; j < MAXLPT; ++j) {
+ if ( threads[i].locks[j] == NULL )
+ continue;
+ printf( " * Lock %s @ %s\n", threads[i].locks[j]->name, threads[i].locks[j]->where );
}
- if ( watchdogSignal == NULL || signal_wait( watchdogSignal, 5000 ) == SIGNAL_ERROR ) sleep( 5 );
}
- return NULL ;
-}
-
-#endif
-
-void debug_locks_start_watchdog()
-{
-#ifdef _DEBUG
- watchdogSignal = signal_new();
- if ( 0 != thread_create( &watchdog, NULL, &debug_thread_watchdog, (void *)NULL ) ) {
- logadd( LOG_ERROR, "Could not start debug-lock watchdog." );
- return;
- }
-#endif
+ pthread_mutex_unlock( &initdestory );
}
-void debug_locks_stop_watchdog()
-{
-#ifdef _DEBUG
- _shutdown = true;
- printf( "Killing debug watchdog...\n" );
- pthread_mutex_lock( &initdestory );
- signal_call( watchdogSignal );
- pthread_mutex_unlock( &initdestory );
- thread_join( watchdog, NULL );
- signal_close( watchdogSignal );
#endif
-}
diff --git a/src/server/locks.h b/src/server/locks.h
index 7f72722..e5c9801 100644
--- a/src/server/locks.h
+++ b/src/server/locks.h
@@ -5,19 +5,38 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
+
+// Lock priority
+
+#define LOCK_RELOAD 90
+#define LOCK_LOAD_CONFIG 100
+#define LOCK_REMOTE_CLONE 110
+#define LOCK_CLIENT_LIST 120
+#define LOCK_CLIENT 130
+#define LOCK_INTEGRITY_QUEUE 140
+#define LOCK_IMAGE_LIST 150
+#define LOCK_IMAGE 160
+#define LOCK_UPLINK_QUEUE 170
+#define LOCK_ALT_SERVER_LIST 180
+#define LOCK_CLIENT_SEND 190
+#define LOCK_UPLINK_RTT 200
+#define LOCK_UPLINK_SEND 210
+#define LOCK_RPC_ACL 220
+
+//
#ifdef _DEBUG
-#define mutex_init( lock ) debug_mutex_init( #lock, __FILE__, __LINE__, lock)
-#define mutex_lock( lock ) debug_mutex_lock( #lock, __FILE__, __LINE__, lock)
-#define mutex_trylock( lock ) debug_mutex_trylock( #lock, __FILE__, __LINE__, lock)
+#define mutex_init( lock, prio ) debug_mutex_init( #lock, __FILE__, __LINE__, lock, prio)
+#define mutex_lock( lock ) debug_mutex_lock( #lock, __FILE__, __LINE__, lock, false)
+#define mutex_trylock( lock ) debug_mutex_lock( #lock, __FILE__, __LINE__, lock, true)
#define mutex_unlock( lock ) debug_mutex_unlock( #lock, __FILE__, __LINE__, lock)
#define mutex_cond_wait( cond, lock ) debug_mutex_cond_wait( #lock, __FILE__, __LINE__, cond, lock)
#define mutex_destroy( lock ) debug_mutex_destroy( #lock, __FILE__, __LINE__, lock)
-int debug_mutex_init(const char *name, const char *file, int line, pthread_mutex_t *lock);
-int debug_mutex_lock(const char *name, const char *file, int line, pthread_mutex_t *lock);
-int debug_mutex_trylock(const char *name, const char *file, int line, pthread_mutex_t *lock);
+int debug_mutex_init(const char *name, const char *file, int line, pthread_mutex_t *lock, int priority);
+int debug_mutex_lock(const char *name, const char *file, int line, pthread_mutex_t *lock, bool try);
int debug_mutex_unlock(const char *name, const char *file, int line, pthread_mutex_t *lock);
int debug_mutex_cond_wait(const char *name, const char *file, int line, pthread_cond_t *restrict cond, pthread_mutex_t *restrict lock);
int debug_mutex_destroy(const char *name, const char *file, int line, pthread_mutex_t *lock);
@@ -27,7 +46,7 @@ void debug_dump_lock_stats();
#else
-#define mutex_init( lock ) pthread_mutex_init(lock, NULL)
+#define mutex_init( lock, prio ) pthread_mutex_init(lock, NULL)
#define mutex_lock( lock ) pthread_mutex_lock(lock)
#define mutex_trylock( lock ) pthread_mutex_trylock(lock)
#define mutex_unlock( lock ) pthread_mutex_unlock(lock)
@@ -82,7 +101,4 @@ static inline int debug_thread_join(pthread_t thread, void **value_ptr)
#endif
-void debug_locks_start_watchdog();
-void debug_locks_stop_watchdog();
-
#endif /* LOCKS_H_ */
diff --git a/src/server/net.c b/src/server/net.c
index 9abe221..9c855e4 100644
--- a/src/server/net.c
+++ b/src/server/net.c
@@ -24,6 +24,7 @@
#include "locks.h"
#include "rpc.h"
#include "altservers.h"
+#include "reference.h"
#include "../shared/sockhelper.h"
#include "../shared/timing.h"
@@ -43,6 +44,7 @@
#include <jansson.h>
#include <inttypes.h>
#include <stdatomic.h>
+#include <signal.h>
static dnbd3_client_t *_clients[SERVER_MAX_CLIENTS];
static int _num_clients = 0;
@@ -145,13 +147,14 @@ static inline bool sendPadding( const int fd, uint32_t bytes )
void net_init()
{
- mutex_init( &_clients_lock );
+ mutex_init( &_clients_lock, LOCK_CLIENT_LIST );
}
void* net_handleNewConnection(void *clientPtr)
{
dnbd3_client_t * const client = (dnbd3_client_t *)clientPtr;
dnbd3_request_t request;
+ client->thread = pthread_self();
// Await data from client. Since this is a fresh connection, we expect data right away
sock_setTimeout( client->sock, _clientTimeout );
@@ -186,8 +189,8 @@ void* net_handleNewConnection(void *clientPtr)
}
} while (0);
// Fully init client struct
- mutex_init( &client->lock );
- mutex_init( &client->sendMutex );
+ mutex_init( &client->lock, LOCK_CLIENT );
+ mutex_init( &client->sendMutex, LOCK_CLIENT_SEND );
mutex_lock( &client->lock );
host_to_string( &client->host, client->hostName, HOSTNAMELEN );
@@ -229,7 +232,7 @@ void* net_handleNewConnection(void *clientPtr)
rid = serializer_get_uint16( &payload );
const uint8_t flags = serializer_get_uint8( &payload );
client->isServer = ( flags & FLAGS8_SERVER );
- if ( request.size < 3 || !image_name || client_version < MIN_SUPPORTED_CLIENT ) {
+ if ( unlikely( request.size < 3 || !image_name || client_version < MIN_SUPPORTED_CLIENT ) ) {
if ( client_version < MIN_SUPPORTED_CLIENT ) {
logadd( LOG_DEBUG1, "Client %s too old", client->hostName );
} else {
@@ -255,25 +258,27 @@ void* net_handleNewConnection(void *clientPtr)
// No BGR mismatch, but don't lookup if image is unknown locally
image = image_get( image_name, rid, true );
}
- mutex_lock( &client->lock );
client->image = image;
- mutex_unlock( &client->lock );
- if ( image == NULL ) {
+ atomic_thread_fence( memory_order_release );
+ if ( unlikely( image == NULL ) ) {
//logadd( LOG_DEBUG1, "Client requested non-existent image '%s' (rid:%d), rejected\n", image_name, (int)rid );
- } else if ( !image->working ) {
+ } else if ( unlikely( !image->working ) ) {
logadd( LOG_DEBUG1, "Client %s requested non-working image '%s' (rid:%d), rejected\n",
client->hostName, image_name, (int)rid );
} else {
- bool penalty;
// Image is fine so far, but occasionally drop a client if the uplink for the image is clogged or unavailable
bOk = true;
if ( image->cache_map != NULL ) {
- mutex_lock( &image->lock );
- if ( image->uplink == NULL || image->uplink->cacheFd == -1 || image->uplink->queueLen > SERVER_UPLINK_QUEUELEN_THRES ) {
+ dnbd3_uplink_t *uplink = ref_get_uplink( &image->uplinkref );
+ if ( uplink == NULL || uplink->cacheFd == -1 || uplink->queueLen > SERVER_UPLINK_QUEUELEN_THRES ) {
bOk = ( rand() % 4 ) == 1;
}
- penalty = bOk && image->uplink != NULL && image->uplink->cacheFd == -1;
- mutex_unlock( &image->lock );
+ bool penalty = bOk && ( uplink == NULL || uplink->cacheFd == -1 );
+ if ( uplink == NULL ) {
+ uplink_init( image, -1, NULL, 0 );
+ } else {
+ ref_put( &uplink->reference );
+ }
if ( penalty ) { // Wait 100ms if local caching is not working so this
usleep( 100000 ); // server gets a penalty and is less likely to be selected
}
@@ -301,7 +306,7 @@ void* net_handleNewConnection(void *clientPtr)
}
}
- if ( bOk ) {
+ if ( likely( bOk ) ) {
// add artificial delay if applicable
if ( client->isServer && _serverPenalty != 0 ) {
usleep( _serverPenalty );
@@ -315,7 +320,8 @@ void* net_handleNewConnection(void *clientPtr)
case CMD_GET_BLOCK:;
const uint64_t offset = request.offset_small; // Copy to full uint64 to prevent repeated masking
- if ( offset >= image->virtualFilesize ) {
+ reply.handle = request.handle;
+ if ( unlikely( offset >= image->virtualFilesize ) ) {
// Sanity check
logadd( LOG_WARNING, "Client %s requested non-existent block", client->hostName );
reply.size = 0;
@@ -323,7 +329,7 @@ void* net_handleNewConnection(void *clientPtr)
send_reply( client->sock, &reply, NULL );
break;
}
- if ( offset + request.size > image->virtualFilesize ) {
+ if ( unlikely( offset + request.size > image->virtualFilesize ) ) {
// Sanity check
logadd( LOG_WARNING, "Client %s requested data block that extends beyond image size", client->hostName );
reply.size = 0;
@@ -396,10 +402,9 @@ void* net_handleNewConnection(void *clientPtr)
reply.cmd = CMD_GET_BLOCK;
reply.size = request.size;
- reply.handle = request.handle;
fixup_reply( reply );
- const bool lock = image->uplink != NULL;
+ const bool lock = image->uplinkref != NULL;
if ( lock ) mutex_lock( &client->sendMutex );
// Send reply header
if ( send( client->sock, &reply, sizeof(dnbd3_reply_t), (request.size == 0 ? 0 : MSG_MORE) ) != sizeof(dnbd3_reply_t) ) {
@@ -533,16 +538,15 @@ exit_client_cleanup: ;
removeFromList( client );
totalBytesSent += client->bytesSent;
// Access time, but only if client didn't just probe
- if ( image != NULL ) {
+ if ( image != NULL && client->bytesSent > DNBD3_BLOCK_SIZE * 10 ) {
mutex_lock( &image->lock );
- if ( client->bytesSent > DNBD3_BLOCK_SIZE * 10 ) {
- timing_get( &image->atime );
- }
+ timing_get( &image->atime );
mutex_unlock( &image->lock );
}
freeClientStruct( client ); // This will also call image_release on client->image
return NULL ;
fail_preadd: ;
+ // This is before we even initialized any mutex
close( client->sock );
free( client );
return NULL;
@@ -609,6 +613,12 @@ void net_getStats(int *clientCount, int *serverCount, uint64_t *bytesSent)
}
bs += client->bytesSent;
}
+ // Do this before unlocking the list, otherwise we might
+ // account for a client twice if it would disconnect after
+ // unlocking but before we add the count here.
+ if ( bytesSent != NULL ) {
+ *bytesSent = totalBytesSent + bs;
+ }
mutex_unlock( &_clients_lock );
if ( clientCount != NULL ) {
*clientCount = cc;
@@ -616,9 +626,6 @@ void net_getStats(int *clientCount, int *serverCount, uint64_t *bytesSent)
if ( serverCount != NULL ) {
*serverCount = sc;
}
- if ( bytesSent != NULL ) {
- *bytesSent = totalBytesSent + bs;
- }
}
void net_disconnectAll()
@@ -626,11 +633,10 @@ void net_disconnectAll()
int i;
mutex_lock( &_clients_lock );
for (i = 0; i < _num_clients; ++i) {
- if ( _clients[i] == NULL ) continue;
- dnbd3_client_t * const client = _clients[i];
- mutex_lock( &client->lock );
- if ( client->sock >= 0 ) shutdown( client->sock, SHUT_RDWR );
- mutex_unlock( &client->lock );
+ if ( _clients[i] == NULL )
+ continue;
+ shutdown( _clients[i]->sock, SHUT_RDWR );
+ pthread_kill( _clients[i]->thread, SIGINT );
}
mutex_unlock( &_clients_lock );
}
@@ -668,11 +674,19 @@ static void removeFromList(dnbd3_client_t *client)
{
int i;
mutex_lock( &_clients_lock );
- for ( i = _num_clients - 1; i >= 0; --i ) {
- if ( _clients[i] == client ) {
- _clients[i] = NULL;
+ if ( _num_clients != 0 ) {
+ for ( i = _num_clients - 1; i >= 0; --i ) {
+ if ( _clients[i] == client ) {
+ _clients[i] = NULL;
+ break;
+ }
+ }
+ if ( i != 0 && i + 1 == _num_clients ) {
+ do {
+ i--;
+ } while ( _clients[i] == NULL && i > 0 );
+ _num_clients = i + 1;
}
- if ( _clients[i] == NULL && i + 1 == _num_clients ) --_num_clients;
}
mutex_unlock( &_clients_lock );
}
@@ -686,17 +700,21 @@ static void removeFromList(dnbd3_client_t *client)
static dnbd3_client_t* freeClientStruct(dnbd3_client_t *client)
{
mutex_lock( &client->lock );
+ if ( client->image != NULL ) {
+ dnbd3_uplink_t *uplink = ref_get_uplink( &client->image->uplinkref );
+ if ( uplink != NULL ) {
+ uplink_removeClient( uplink, client );
+ ref_put( &uplink->reference );
+ }
+ }
mutex_lock( &client->sendMutex );
- if ( client->sock != -1 ) close( client->sock );
+ if ( client->sock != -1 ) {
+ close( client->sock );
+ }
client->sock = -1;
mutex_unlock( &client->sendMutex );
- if ( client->image != NULL ) {
- mutex_lock( &client->image->lock );
- if ( client->image->uplink != NULL ) uplink_removeClient( client->image->uplink, client );
- mutex_unlock( &client->image->lock );
- client->image = image_release( client->image );
- }
mutex_unlock( &client->lock );
+ client->image = image_release( client->image );
mutex_destroy( &client->lock );
mutex_destroy( &client->sendMutex );
free( client );
@@ -729,3 +747,15 @@ static bool addToList(dnbd3_client_t *client)
return true;
}
+void net_sendReply(dnbd3_client_t *client, uint16_t cmd, uint64_t handle)
+{
+ dnbd3_reply_t reply;
+ reply.magic = dnbd3_packet_magic;
+ reply.cmd = cmd;
+ reply.handle = handle;
+ reply.size = 0;
+ mutex_lock( &client->sendMutex );
+ send_reply( client->sock, &reply, NULL );
+ mutex_unlock( &client->sendMutex );
+}
+
diff --git a/src/server/net.h b/src/server/net.h
index 6813b49..7719aef 100644
--- a/src/server/net.h
+++ b/src/server/net.h
@@ -37,4 +37,6 @@ void net_disconnectAll();
void net_waitForAllDisconnected();
+void net_sendReply(dnbd3_client_t *client, uint16_t cmd, uint64_t handle);
+
#endif /* NET_H_ */
diff --git a/src/server/reference.c b/src/server/reference.c
new file mode 100644
index 0000000..468e00b
--- /dev/null
+++ b/src/server/reference.c
@@ -0,0 +1,33 @@
+#ifndef unlikely
+#define unlikely(x) (x)
+#endif
+#include "reference.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+void ref_init( ref *reference, void ( *freefun )( ref * ), long count )
+{
+ reference->count = count;
+ reference->free = freefun;
+}
+
+_Noreturn void _ref_error( const char *message )
+{
+ fprintf( stderr, "Reference counter overflow\n" );
+ abort();
+}
+
+void ref_setref( weakref *weakref, ref *ref )
+{
+ union _aligned_ref_ *new_weakref = 0;
+ if ( ref ) {
+ ( new_weakref = aligned_ref( ref->_aligned_ref ) )->ref = ref;
+ ref->count += sizeof( union _aligned_ref_ ) + 1;
+ }
+ char *old_weakref = (char *)atomic_exchange( weakref, new_weakref );
+ if ( !old_weakref )
+ return;
+ struct _ref_ *old_ref = aligned_ref( old_weakref )->ref;
+ old_ref->count += old_weakref - (char *)aligned_ref( old_weakref ) - sizeof( union _aligned_ref_ );
+ ref_put( old_ref );
+}
diff --git a/src/server/reference.h b/src/server/reference.h
new file mode 100644
index 0000000..0bc081a
--- /dev/null
+++ b/src/server/reference.h
@@ -0,0 +1,54 @@
+#ifndef _REFERENCE_H_
+#define _REFERENCE_H_
+
+#include "reftypes.h"
+#include <stddef.h>
+#include <stdint.h>
+
+#define container_of(ptr, type, member) \
+ ((type *)((char *)(ptr) - (char *)&(((type *)NULL)->member)))
+
+void ref_init( ref *reference, void ( *freefun )( ref * ), long count );
+
+void ref_setref( weakref *weakref, ref *ref );
+
+_Noreturn void _ref_error( const char *message );
+
+static inline ref *ref_get( weakref *weakref )
+{
+ char *old_weakref = (char *)*weakref;
+ do {
+ if ( old_weakref == NULL )
+ return NULL;
+ if ( aligned_ref( old_weakref ) != aligned_ref( old_weakref + 1 ) ) {
+ old_weakref = (char *)*weakref;
+ continue;
+ }
+ } while ( !atomic_compare_exchange_weak( weakref, (void **)&old_weakref, old_weakref + 1 ) );
+ struct _ref_ *ref = aligned_ref( old_weakref )->ref;
+ if ( unlikely( ++ref->count == -1 ) ) {
+ _ref_error( "Reference counter overflow. Aborting.\n" );
+ }
+ char *cur_weakref = ( char * )*weakref;
+ do {
+ if ( aligned_ref( cur_weakref ) != aligned_ref( old_weakref ) ) {
+ ref->count--;
+ break;
+ }
+ } while ( !atomic_compare_exchange_weak( weakref, (void **)&cur_weakref, cur_weakref - 1 ) );
+ return ref;
+}
+
+static inline void ref_put( ref *ref )
+{
+ if ( --ref->count == 0 ) {
+ ref->free( ref );
+ }
+}
+
+#define ref_get_uplink(wr) ({ \
+ ref* ref = ref_get( wr ); \
+ ref == NULL ? NULL : container_of(ref, dnbd3_uplink_t, reference); \
+})
+
+#endif
diff --git a/src/server/reftypes.h b/src/server/reftypes.h
new file mode 100644
index 0000000..45c0c20
--- /dev/null
+++ b/src/server/reftypes.h
@@ -0,0 +1,25 @@
+#ifndef _REFTYPES_H_
+#define _REFTYPES_H_
+
+#include <stdatomic.h>
+
+_Static_assert( sizeof( void * ) == sizeof( _Atomic( void * ) ), "Atomic pointer bad" );
+
+typedef _Atomic( void * ) weakref;
+
+#define aligned_ref(ptr) \
+ ((union _aligned_ref_ *)((ptr) - (uintptr_t)(ptr) % sizeof(union _aligned_ref_)))
+
+union _aligned_ref_ {
+ struct _ref_ *ref;
+ void *_padding[( 32 - 1 ) / sizeof( void * ) + 1];
+};
+
+typedef struct _ref_ {
+ _Atomic long count;
+ void ( *free )( struct _ref_ * );
+ char _padding[sizeof( union _aligned_ref_ )];
+ char _aligned_ref[sizeof( union _aligned_ref_ )];
+} ref;
+
+#endif
diff --git a/src/server/rpc.c b/src/server/rpc.c
index 5dbcafe..662263e 100644
--- a/src/server/rpc.c
+++ b/src/server/rpc.c
@@ -75,10 +75,9 @@ static json_int_t randomRunId;
static pthread_mutex_t aclLock;
#define MAX_CLIENTS 50
#define CUTOFF_START 40
-static pthread_mutex_t statusLock;
static struct {
- int count;
- bool overloaded;
+ atomic_int count;
+ atomic_bool overloaded;
} status;
static bool handleStatus(int sock, int permissions, struct field *fields, size_t fields_num, int keepAlive);
@@ -91,8 +90,7 @@ static void loadAcl();
void rpc_init()
{
- mutex_init( &aclLock );
- mutex_init( &statusLock );
+ mutex_init( &aclLock, LOCK_RPC_ACL );
randomRunId = (((json_int_t)getpid()) << 16) | (json_int_t)time(NULL);
// </guard>
if ( sizeof(randomRunId) > 4 ) {
@@ -123,10 +121,8 @@ void rpc_sendStatsJson(int sock, dnbd3_host_t* host, const void* data, const int
return;
}
do {
- mutex_lock( &statusLock );
const int curCount = ++status.count;
UPDATE_LOADSTATE( curCount );
- mutex_unlock( &statusLock );
if ( curCount > MAX_CLIENTS ) {
sendReply( sock, "503 Service Temporarily Unavailable", "text/plain", "Too many HTTP clients", -1, HTTP_CLOSE );
goto func_return;
@@ -141,13 +137,13 @@ void rpc_sendStatsJson(int sock, dnbd3_host_t* host, const void* data, const int
bool hasName = false;
bool ok;
int keepAlive = HTTP_KEEPALIVE;
- do {
+ while ( !_shutdown ) {
// Read request from client
struct phr_header headers[100];
size_t numHeaders, prevLen = 0, consumed;
struct string method, path;
int minorVersion;
- do {
+ while ( !_shutdown ) {
// Parse before calling recv, there might be a complete pipelined request in the buffer already
// If the request is incomplete, we allow exactly one additional recv() to complete it.
// This should suffice for real world scenarios as I don't know of any
@@ -192,15 +188,15 @@ void rpc_sendStatsJson(int sock, dnbd3_host_t* host, const void* data, const int
sendReply( sock, "400 Bad Request", "text/plain", "Server cannot understand what you're trying to say", -1, HTTP_CLOSE );
goto func_return;
}
- } while ( true );
+ } // Loop while request header incomplete
+ if ( _shutdown )
+ break;
if ( keepAlive == HTTP_KEEPALIVE ) {
// Only keep the connection alive (and indicate so) if the client seems to support this
if ( minorVersion == 0 || hasHeaderValue( headers, numHeaders, &STR_CONNECTION, &STR_CLOSE ) ) {
keepAlive = HTTP_CLOSE;
} else { // And if there aren't too many active HTTP sessions
- mutex_lock( &statusLock );
if ( status.overloaded ) keepAlive = HTTP_CLOSE;
- mutex_unlock( &statusLock );
}
}
if ( method.s != NULL && path.s != NULL ) {
@@ -219,7 +215,8 @@ void rpc_sendStatsJson(int sock, dnbd3_host_t* host, const void* data, const int
} else {
ok = sendReply( sock, "404 Not found", "text/plain", "Nothing", -1, keepAlive );
}
- if ( !ok ) break;
+ if ( !ok )
+ break;
}
// hoff might be beyond end if the client sent another request (burst)
const ssize_t extra = hoff - consumed;
@@ -231,13 +228,11 @@ void rpc_sendStatsJson(int sock, dnbd3_host_t* host, const void* data, const int
hasName = true;
setThreadName( "HTTP" );
}
- } while (true);
+ } // Loop while more requests
func_return:;
do {
- mutex_lock( &statusLock );
const int curCount = --status.count;
UPDATE_LOADSTATE( curCount );
- mutex_unlock( &statusLock );
} while (0);
}
diff --git a/src/server/server.c b/src/server/server.c
index 10ab208..0dddea7 100644
--- a/src/server/server.c
+++ b/src/server/server.c
@@ -37,6 +37,8 @@
#include <signal.h>
#include <getopt.h>
#include <assert.h>
+#include <sys/types.h>
+#include <unistd.h>
#define LONGOPT_CRC4 1000
#define LONGOPT_ASSERT 1001
@@ -45,6 +47,26 @@
#define LONGOPT_SIZE 1004
#define LONGOPT_ERRORMSG 1005
+typedef struct _job job_t;
+
+struct _job {
+ job_t *next;
+ void *(*startRoutine)(void *);
+ void *arg;
+ ticks dueDate;
+ int intervalSecs;
+};
+
+static job_t *jobHead;
+static _Atomic(job_t *) newJob;
+static bool hasTimerThread = false;
+static pthread_t timerThread;
+
+static pid_t mainPid;
+static pthread_t mainThread;
+
+#define DEFAULT_TIMER_TIMEOUT (60)
+
static poll_list_t *listeners = NULL;
/**
@@ -71,6 +93,12 @@ static void dnbd3_handleSignal2(int signum, siginfo_t *info, void *data);
static void* server_asyncImageListLoad(void *data);
+static void* timerMainloop(void*);
+
+static int handlePendingJobs(void);
+
+static void queueJobInternal(job_t *job);
+
/**
* Print help text for usage instructions
*/
@@ -105,14 +133,21 @@ void dnbd3_printVersion()
/**
* Clean up structs, connections, write out data, then exit
*/
-void dnbd3_cleanup()
+_Noreturn static void dnbd3_cleanup()
{
int retries;
_shutdown = true;
logadd( LOG_INFO, "Cleanup..." );
- if ( listeners != NULL ) sock_destroyPollList( listeners );
+ if ( hasTimerThread ) {
+ pthread_kill( timerThread, SIGINT );
+ thread_join( timerThread, NULL );
+ }
+
+ if ( listeners != NULL ) {
+ sock_destroyPollList( listeners );
+ }
listeners = NULL;
// Kill connection to all clients
@@ -121,9 +156,6 @@ void dnbd3_cleanup()
// Disable threadpool
threadpool_close();
- // Terminate the altserver checking thread
- altservers_shutdown();
-
// Terminate all uplinks
image_killUplinks();
@@ -133,8 +165,7 @@ void dnbd3_cleanup()
// Wait for clients to disconnect
net_waitForAllDisconnected();
- // Watchdog not needed anymore
- debug_locks_stop_watchdog();
+ threadpool_waitEmpty();
// Clean up images
retries = 5;
@@ -178,6 +209,8 @@ int main(int argc, char *argv[])
{ 0, 0, 0, 0 }
};
+ mainPid = getpid();
+ mainThread = pthread_self();
opt = getopt_long( argc, argv, optString, longOpts, &longIndex );
while ( opt != -1 ) {
@@ -201,6 +234,15 @@ int main(int argc, char *argv[])
case LONGOPT_CRC4:
return image_generateCrcFile( optarg ) ? 0 : EXIT_FAILURE;
case LONGOPT_ASSERT:
+ printf( "Now leaking memory:\n" );
+ char *bla = malloc( 10 );
+ bla[2] = 3;
+ bla = NULL;
+ printf( "Testing use after free:\n" );
+ char *test = malloc( 10 );
+ test[0] = 1;
+ free( (void*)test );
+ test[1] = 2;
printf( "Testing a failing assertion:\n" );
assert( 4 == 5 );
printf( "Assertion 4 == 5 seems to hold. ;-)\n" );
@@ -303,16 +345,11 @@ int main(int argc, char *argv[])
logadd( LOG_WARNING, "Could not load alt-servers. Does the file exist in %s?", _configDir );
}
-#ifdef _DEBUG
- debug_locks_start_watchdog();
-#endif
-
// setup signal handler
- struct sigaction sa;
- memset( &sa, 0, sizeof(sa) );
- sa.sa_sigaction = dnbd3_handleSignal2;
- sa.sa_flags = SA_SIGINFO;
- //sa.sa_mask = ;
+ struct sigaction sa = {
+ .sa_sigaction = dnbd3_handleSignal2,
+ .sa_flags = SA_SIGINFO,
+ };
sigaction( SIGTERM, &sa, NULL );
sigaction( SIGINT, &sa, NULL );
sigaction( SIGUSR1, &sa, NULL );
@@ -347,6 +384,10 @@ int main(int argc, char *argv[])
logadd( LOG_INFO, "Server is ready. (%s)", VERSION_STRING );
+ if ( thread_create( &timerThread, NULL, &timerMainloop, NULL ) == 0 ) {
+ hasTimerThread = true;
+ }
+
// +++++++++++++++++++++++++++++++++++++++++++++++++++ main loop
struct sockaddr_storage client;
socklen_t len;
@@ -370,7 +411,7 @@ int main(int argc, char *argv[])
//
len = sizeof(client);
fd = sock_accept( listeners, &client, &len );
- if ( fd < 0 ) {
+ if ( fd == -1 ) {
const int err = errno;
if ( err == EINTR || err == EAGAIN ) continue;
logadd( LOG_ERROR, "Client accept failure (err=%d)", err );
@@ -474,8 +515,16 @@ static void dnbd3_handleSignal(int signum)
static void dnbd3_handleSignal2(int signum, siginfo_t *info, void *data UNUSED)
{
- memcpy( &lastSignal, info, sizeof(siginfo_t) );
- dnbd3_handleSignal( signum );
+ if ( info->si_pid != mainPid ) { // Source is not this process
+ memcpy( &lastSignal, info, sizeof(siginfo_t) ); // Copy signal info
+ if ( info->si_pid != 0 && !pthread_equal( pthread_self(), mainThread ) ) {
+ pthread_kill( mainThread, info->si_signo ); // And relay signal if we're not the main thread
+ }
+ }
+ if ( pthread_equal( pthread_self(), mainThread ) ) {
+ // Signal received by main thread -- handle
+ dnbd3_handleSignal( signum );
+ }
}
uint32_t dnbd3_serverUptime()
@@ -493,3 +542,85 @@ static void* server_asyncImageListLoad(void *data UNUSED)
return NULL;
}
+static void* timerMainloop(void* stuff UNUSED)
+{
+ setThreadName( "timer" );
+ while ( !_shutdown ) {
+ // Handle jobs/timer events; returns timeout until next event
+ int to = handlePendingJobs();
+ sleep( MIN( MAX( 1, to ), DEFAULT_TIMER_TIMEOUT ) );
+ }
+ logadd( LOG_DEBUG1, "Timer thread done" );
+ return NULL;
+}
+
+static int handlePendingJobs(void)
+{
+ declare_now;
+ job_t *todo, **temp, *old;
+ int diff;
+ todo = jobHead;
+ for ( temp = &todo; *temp != NULL; temp = &(*temp)->next ) {
+ diff = (int)timing_diff( &now, &(*temp)->dueDate );
+ if ( diff > 0 ) // Found one that's in the future
+ break;
+ }
+ jobHead = *temp; // Make it list head
+ *temp = NULL; // Split off part before that
+ while ( todo != NULL ) {
+ threadpool_run( todo->startRoutine, todo->arg );
+ old = todo;
+ todo = todo->next;
+ if ( old->intervalSecs == 0 ) {
+ free( old ); // oneshot
+ } else {
+ timing_set( &old->dueDate, &now, old->intervalSecs );
+ queueJobInternal( old ); // repeated
+ }
+ }
+ // See if any new jobs have been queued
+ while ( newJob != NULL ) {
+ todo = newJob;
+ // NULL should never happen since we're the only consumer
+ assert( todo != NULL );
+ if ( !atomic_compare_exchange_weak( &newJob, &todo, NULL ) )
+ continue;
+ do {
+ old = todo;
+ todo = todo->next;
+ queueJobInternal( old );
+ } while ( todo != NULL );
+ }
+ // Return new timeout
+ if ( jobHead == NULL )
+ return DEFAULT_TIMER_TIMEOUT;
+ return (int)timing_diff( &now, &jobHead->dueDate );
+}
+
+static void queueJobInternal(job_t *job)
+{
+ assert( job != NULL );
+ job_t **it;
+ for ( it = &jobHead; *it != NULL; it = &(*it)->next ) {
+ if ( timing_1le2( &job->dueDate, &(*it)->dueDate ) )
+ break;
+ }
+ job->next = *it;
+ *it = job;
+}
+
+void server_addJob(void *(*startRoutine)(void *), void *arg, int delaySecs, int intervalSecs)
+{
+ declare_now;
+ job_t *new = malloc( sizeof(*new) );
+ new->startRoutine = startRoutine;
+ new->arg = arg;
+ new->intervalSecs = intervalSecs;
+ timing_set( &new->dueDate, &now, delaySecs );
+ for ( ;; ) {
+ new->next = newJob;
+ if ( atomic_compare_exchange_weak( &newJob, &new->next, new ) )
+ break;
+ }
+}
+
diff --git a/src/server/server.h b/src/server/server.h
index bab8421..a026eb6 100644
--- a/src/server/server.h
+++ b/src/server/server.h
@@ -24,8 +24,8 @@
#include "globals.h"
#include "../types.h"
-void dnbd3_cleanup();
uint32_t dnbd3_serverUptime();
+void server_addJob(void *(*startRoutine)(void *), void *arg, int delaySecs, int intervalSecs);
#if !defined(_FILE_OFFSET_BITS) || _FILE_OFFSET_BITS != 64
#error Please set _FILE_OFFSET_BITS to 64 in your makefile/configuration
diff --git a/src/server/threadpool.c b/src/server/threadpool.c
index dac0980..0b46fd6 100644
--- a/src/server/threadpool.c
+++ b/src/server/threadpool.c
@@ -4,7 +4,6 @@
#include "locks.h"
typedef struct _entry_t {
- struct _entry_t *next;
pthread_t thread;
dnbd3_signal_t* signal;
void *(*startRoutine)(void *);
@@ -14,16 +13,21 @@ typedef struct _entry_t {
static void *threadpool_worker(void *entryPtr);
static pthread_attr_t threadAttrs;
-
-static int maxIdleThreads = -1;
-static entry_t *pool = NULL;
-static pthread_mutex_t poolLock;
+static atomic_int maxIdleThreads = -1;
+static _Atomic(entry_t *) *pool = NULL;
+static atomic_int activeThreads = 0;
bool threadpool_init(int maxIdle)
{
- if ( maxIdle < 0 || maxIdleThreads >= 0 ) return false;
- mutex_init( &poolLock );
- maxIdleThreads = maxIdle;
+ if ( maxIdle < 0 )
+ return false;
+ int exp = -1;
+ if ( !atomic_compare_exchange_strong( &maxIdleThreads, &exp, maxIdle ) )
+ return false;
+ pool = malloc( maxIdle * sizeof(*pool) );
+ for ( int i = 0; i < maxIdle; ++i ) {
+ atomic_init( &pool[i], NULL );
+ }
pthread_attr_init( &threadAttrs );
pthread_attr_setdetachstate( &threadAttrs, PTHREAD_CREATE_DETACHED );
return true;
@@ -31,28 +35,47 @@ bool threadpool_init(int maxIdle)
void threadpool_close()
{
- _shutdown = true;
- if ( maxIdleThreads < 0 ) return;
- mutex_lock( &poolLock );
- maxIdleThreads = -1;
- entry_t *ptr = pool;
- while ( ptr != NULL ) {
- entry_t *current = ptr;
- ptr = ptr->next;
- signal_call( current->signal );
+ int max = atomic_exchange( &maxIdleThreads, -1 );
+ if ( max <= 0 )
+ return;
+ for ( int i = 0; i < max; ++i ) {
+ entry_t *cur = pool[i];
+ if ( cur != NULL && atomic_compare_exchange_strong( &pool[i], &cur, NULL ) ) {
+ signal_call( cur->signal );
+ }
}
- mutex_unlock( &poolLock );
- mutex_destroy( &poolLock );
+}
+
+void threadpool_waitEmpty()
+{
+ if ( activeThreads == 0 )
+ return;
+ do {
+ sleep( 1 );
+ logadd( LOG_INFO, "Threadpool: %d threads still active", (int)activeThreads );
+ } while ( activeThreads != 0 );
}
bool threadpool_run(void *(*startRoutine)(void *), void *arg)
{
- mutex_lock( &poolLock );
- entry_t *entry = pool;
- if ( entry != NULL ) pool = entry->next;
- mutex_unlock( &poolLock );
- if ( entry == NULL ) {
- entry = (entry_t*)malloc( sizeof(entry_t) );
+ if ( unlikely( _shutdown ) ) {
+ logadd( LOG_MINOR, "Cannot submit work to threadpool while shutting down!" );
+ return false;
+ }
+ if ( unlikely( startRoutine == NULL ) ) {
+ logadd( LOG_ERROR, "Trying to queue work for thread pool with NULL startRoutine" );
+ return false; // Or bail out!?
+ }
+ entry_t *entry = NULL;
+ for ( int i = 0; i < maxIdleThreads; ++i ) {
+ entry_t *cur = pool[i];
+ if ( cur != NULL && atomic_compare_exchange_weak( &pool[i], &cur, NULL ) ) {
+ entry = cur;
+ break;
+ }
+ }
+ if ( unlikely( entry == NULL ) ) {
+ entry = malloc( sizeof(entry_t) );
if ( entry == NULL ) {
logadd( LOG_WARNING, "Could not alloc entry_t for new thread\n" );
return false;
@@ -69,10 +92,11 @@ bool threadpool_run(void *(*startRoutine)(void *), void *arg)
free( entry );
return false;
}
+ activeThreads++;
}
- entry->next = NULL;
entry->startRoutine = startRoutine;
entry->arg = arg;
+ atomic_thread_fence( memory_order_release );
signal_call( entry->signal );
return true;
}
@@ -84,43 +108,44 @@ static void *threadpool_worker(void *entryPtr)
{
blockNoncriticalSignals();
entry_t *entry = (entry_t*)entryPtr;
+ int ret;
for ( ;; ) {
+keep_going:;
// Wait for signal from outside that we have work to do
- int ret = signal_clear( entry->signal );
- if ( _shutdown ) break;
- if ( ret > 0 ) {
- if ( entry->startRoutine == NULL ) {
- logadd( LOG_DEBUG1, "Worker woke up but has no work to do!" );
- continue;
- }
- // Start assigned work
- (*entry->startRoutine)( entry->arg );
- // Reset vars for safety
- entry->startRoutine = NULL;
- entry->arg = NULL;
- if ( _shutdown ) break;
- // Put thread back into pool if there are less than maxIdleThreds threads, just die otherwise
- int threadCount = 0;
- mutex_lock( &poolLock );
- entry_t *ptr = pool;
- while ( ptr != NULL ) {
- threadCount++;
- ptr = ptr->next;
- }
- if ( threadCount >= maxIdleThreads ) {
- mutex_unlock( &poolLock );
- break;
- }
- entry->next = pool;
- pool = entry;
- mutex_unlock( &poolLock );
- setThreadName( "[pool]" );
- } else {
+ ret = signal_clear( entry->signal );
+ atomic_thread_fence( memory_order_acquire );
+ if ( _shutdown )
+ break;
+ if ( ret <= 0 ) {
logadd( LOG_DEBUG1, "Unexpected return value %d for signal_wait in threadpool worker!", ret );
+ continue;
+ }
+ if ( entry->startRoutine == NULL ) {
+ logadd( LOG_ERROR, "Worker woke up but has no work to do!" );
+ exit( 1 );
+ }
+ // Start assigned work
+ (*entry->startRoutine)( entry->arg );
+ // Reset vars for safety
+ entry->startRoutine = NULL;
+ entry->arg = NULL;
+ atomic_thread_fence( memory_order_release );
+ if ( _shutdown )
+ break;
+ // Put thread back into pool
+ setThreadName( "[pool]" );
+ for ( int i = 0; i < maxIdleThreads; ++i ) {
+ entry_t *exp = NULL;
+ if ( atomic_compare_exchange_weak( &pool[i], &exp, entry ) ) {
+ goto keep_going;
+ }
}
+ // Reaching here means pool is full; just let the thread exit
+ break;
}
signal_close( entry->signal );
free( entry );
+ activeThreads--;
return NULL;
}
diff --git a/src/server/threadpool.h b/src/server/threadpool.h
index 15dd151..ee0b3aa 100644
--- a/src/server/threadpool.h
+++ b/src/server/threadpool.h
@@ -18,6 +18,11 @@ bool threadpool_init(int maxIdleThreadCount);
void threadpool_close();
/**
+ * Block until all threads spawned have exited
+ */
+void threadpool_waitEmpty();
+
+/**
* Run a thread using the thread pool.
* @param startRoutine function to run in new thread
* @param arg argument to pass to thead
diff --git a/src/server/uplink.c b/src/server/uplink.c
index 682b986..d77be9c 100644
--- a/src/server/uplink.c
+++ b/src/server/uplink.c
@@ -3,10 +3,12 @@
#include "locks.h"
#include "image.h"
#include "altservers.h"
+#include "net.h"
#include "../shared/sockhelper.h"
#include "../shared/protocol.h"
#include "../shared/timing.h"
#include "../shared/crc32.h"
+#include "reference.h"
#include <assert.h>
#include <inttypes.h>
@@ -21,19 +23,43 @@
#define REP_NONE ( (uint64_t)0xffffffffffffffff )
+// Status of request in queue
+
+// Slot is free, can be used.
+// Must only be set in uplink_handle_receive() or uplink_remove_client()
+#define ULR_FREE 0
+// Slot has been filled with a request that hasn't been sent to the upstream server yet, matching request can safely rely on reuse.
+// Must only be set in uplink_request()
+#define ULR_NEW 1
+// Slot is occupied, reply has not yet been received, matching request can safely rely on reuse.
+// Must only be set in uplink_mainloop() or uplink_request()
+#define ULR_PENDING 2
+// Slot is being processed, do not consider for hop on.
+// Must only be set in uplink_handle_receive()
+#define ULR_PROCESSING 3
+
+static const char *const NAMES_ULR[4] = {
+ [ULR_FREE] = "ULR_FREE",
+ [ULR_NEW] = "ULR_NEW",
+ [ULR_PENDING] = "ULR_PENDING",
+ [ULR_PROCESSING] = "ULR_PROCESSING",
+};
+
static atomic_uint_fast64_t totalBytesReceived = 0;
+static void cancelAllRequests(dnbd3_uplink_t *uplink);
+static void uplink_free(ref *ref);
static void* uplink_mainloop(void *data);
-static void uplink_sendRequests(dnbd3_connection_t *link, bool newOnly);
-static int uplink_findNextIncompleteHashBlock(dnbd3_connection_t *link, const int lastBlockIndex);
-static void uplink_handleReceive(dnbd3_connection_t *link);
+static void uplink_sendRequests(dnbd3_uplink_t *uplink, bool newOnly);
+static int uplink_findNextIncompleteHashBlock(dnbd3_uplink_t *uplink, const int lastBlockIndex);
+static void uplink_handleReceive(dnbd3_uplink_t *uplink);
static int uplink_sendKeepalive(const int fd);
-static void uplink_addCrc32(dnbd3_connection_t *uplink);
-static void uplink_sendReplicationRequest(dnbd3_connection_t *link);
-static bool uplink_reopenCacheFd(dnbd3_connection_t *link, const bool force);
-static bool uplink_saveCacheMap(dnbd3_connection_t *link);
-static bool uplink_connectionShouldShutdown(dnbd3_connection_t *link);
-static void uplink_connectionFailed(dnbd3_connection_t *link, bool findNew);
+static void uplink_addCrc32(dnbd3_uplink_t *uplink);
+static void uplink_sendReplicationRequest(dnbd3_uplink_t *uplink);
+static bool uplink_reopenCacheFd(dnbd3_uplink_t *uplink, const bool force);
+static bool uplink_saveCacheMap(dnbd3_uplink_t *uplink);
+static bool uplink_connectionShouldShutdown(dnbd3_uplink_t *uplink);
+static void uplink_connectionFailed(dnbd3_uplink_t *uplink, bool findNew);
// ############ uplink connection handling
@@ -54,56 +80,63 @@ uint64_t uplink_getTotalBytesReceived()
bool uplink_init(dnbd3_image_t *image, int sock, dnbd3_host_t *host, int version)
{
if ( !_isProxy || _shutdown ) return false;
- dnbd3_connection_t *link = NULL;
assert( image != NULL );
mutex_lock( &image->lock );
- if ( image->uplink != NULL && !image->uplink->shutdown ) {
+ dnbd3_uplink_t *uplink = ref_get_uplink( &image->uplinkref );
+ if ( uplink != NULL ) {
mutex_unlock( &image->lock );
- if ( sock >= 0 ) close( sock );
+ if ( sock != -1 ) {
+ close( sock );
+ }
+ ref_put( &uplink->reference );
return true; // There's already an uplink, so should we consider this success or failure?
}
if ( image->cache_map == NULL ) {
logadd( LOG_WARNING, "Uplink was requested for image %s, but it is already complete", image->name );
goto failure;
}
- link = image->uplink = calloc( 1, sizeof(dnbd3_connection_t) );
- mutex_init( &link->queueLock );
- mutex_init( &link->rttLock );
- mutex_init( &link->sendMutex );
- link->image = image;
- link->bytesReceived = 0;
- link->idleTime = 0;
- link->queueLen = 0;
- mutex_lock( &link->sendMutex );
- link->fd = -1;
- mutex_unlock( &link->sendMutex );
- link->cacheFd = -1;
- link->signal = NULL;
- link->replicationHandle = REP_NONE;
- mutex_lock( &link->rttLock );
- link->cycleDetected = false;
- if ( sock >= 0 ) {
- link->betterFd = sock;
- link->betterServer = *host;
- link->rttTestResult = RTT_DOCHANGE;
- link->betterVersion = version;
+ uplink = calloc( 1, sizeof(dnbd3_uplink_t) );
+ // Start with one reference for the uplink thread. We'll return it when the thread finishes
+ ref_init( &uplink->reference, uplink_free, 1 );
+ mutex_init( &uplink->queueLock, LOCK_UPLINK_QUEUE );
+ mutex_init( &uplink->rttLock, LOCK_UPLINK_RTT );
+ mutex_init( &uplink->sendMutex, LOCK_UPLINK_SEND );
+ uplink->image = image;
+ uplink->bytesReceived = 0;
+ uplink->idleTime = 0;
+ uplink->queueLen = 0;
+ uplink->cacheFd = -1;
+ uplink->signal = NULL;
+ uplink->replicationHandle = REP_NONE;
+ mutex_lock( &uplink->rttLock );
+ mutex_lock( &uplink->sendMutex );
+ uplink->current.fd = -1;
+ mutex_unlock( &uplink->sendMutex );
+ uplink->cycleDetected = false;
+ if ( sock != -1 ) {
+ uplink->better.fd = sock;
+ int index = altservers_hostToIndex( host );
+ uplink->better.index = index == -1 ? 0 : index; // Prevent invalid array access
+ uplink->rttTestResult = RTT_DOCHANGE;
+ uplink->better.version = version;
} else {
- link->betterFd = -1;
- link->rttTestResult = RTT_IDLE;
+ uplink->better.fd = -1;
+ uplink->rttTestResult = RTT_IDLE;
}
- mutex_unlock( &link->rttLock );
- link->recvBufferLen = 0;
- link->shutdown = false;
- if ( 0 != thread_create( &(link->thread), NULL, &uplink_mainloop, (void *)link ) ) {
+ mutex_unlock( &uplink->rttLock );
+ uplink->recvBufferLen = 0;
+ uplink->shutdown = false;
+ if ( 0 != thread_create( &(uplink->thread), NULL, &uplink_mainloop, (void *)uplink ) ) {
logadd( LOG_ERROR, "Could not start thread for new uplink." );
goto failure;
}
+ ref_setref( &image->uplinkref, &uplink->reference );
mutex_unlock( &image->lock );
return true;
failure: ;
- if ( link != NULL ) {
- free( link );
- link = image->uplink = NULL;
+ if ( uplink != NULL ) {
+ free( uplink );
+ uplink = NULL;
}
mutex_unlock( &image->lock );
return false;
@@ -114,45 +147,97 @@ failure: ;
* Calling it multiple times, even concurrently, will
* not break anything.
*/
-void uplink_shutdown(dnbd3_image_t *image)
+bool uplink_shutdown(dnbd3_image_t *image)
{
- bool join = false;
- pthread_t thread;
assert( image != NULL );
mutex_lock( &image->lock );
- if ( image->uplink == NULL ) {
+ dnbd3_uplink_t *uplink = ref_get_uplink( &image->uplinkref );
+ if ( uplink == NULL ) {
mutex_unlock( &image->lock );
- return;
+ return true;
}
- dnbd3_connection_t * const uplink = image->uplink;
mutex_lock( &uplink->queueLock );
- if ( !uplink->shutdown ) {
- uplink->shutdown = true;
+ bool exp = false;
+ if ( atomic_compare_exchange_strong( &uplink->shutdown, &exp, true ) ) {
+ image->users++; // Prevent free while uplink shuts down
signal_call( uplink->signal );
- thread = uplink->thread;
- join = true;
+ } else {
+ logadd( LOG_ERROR, "This will never happen. '%s:%d'", image->name, (int)image->rid );
}
+ cancelAllRequests( uplink );
+ ref_setref( &image->uplinkref, NULL );
+ ref_put( &uplink->reference );
mutex_unlock( &uplink->queueLock );
- bool wait = image->uplink != NULL;
+ bool retval = ( exp && image->users == 0 );
mutex_unlock( &image->lock );
- if ( join ) thread_join( thread, NULL );
- while ( wait ) {
- usleep( 5000 );
- mutex_lock( &image->lock );
- wait = image->uplink != NULL && image->uplink->shutdown;
- mutex_unlock( &image->lock );
+ return exp;
+}
+
+/**
+ * Cancel all requests of this uplink.
+ * HOLD QUEUE LOCK WHILE CALLING
+ */
+static void cancelAllRequests(dnbd3_uplink_t *uplink)
+{
+ for ( int i = 0; i < uplink->queueLen; ++i ) {
+ if ( uplink->queue[i].status != ULR_FREE ) {
+ net_sendReply( uplink->queue[i].client, CMD_ERROR, uplink->queue[i].handle );
+ uplink->queue[i].status = ULR_FREE;
+ }
}
+ uplink->queueLen = 0;
+}
+
+static void uplink_free(ref *ref)
+{
+ dnbd3_uplink_t *uplink = container_of(ref, dnbd3_uplink_t, reference);
+ logadd( LOG_DEBUG1, "Freeing uplink for '%s:%d'", uplink->image->name, (int)uplink->image->rid );
+ assert( uplink->queueLen == 0 );
+ signal_close( uplink->signal );
+ if ( uplink->current.fd != -1 ) {
+ close( uplink->current.fd );
+ uplink->current.fd = -1;
+ }
+ if ( uplink->better.fd != -1 ) {
+ close( uplink->better.fd );
+ uplink->better.fd = -1;
+ }
+ mutex_destroy( &uplink->queueLock );
+ mutex_destroy( &uplink->rttLock );
+ mutex_destroy( &uplink->sendMutex );
+ free( uplink->recvBuffer );
+ uplink->recvBuffer = NULL;
+ if ( uplink->cacheFd != -1 ) {
+ close( uplink->cacheFd );
+ }
+ // TODO Requeue any requests
+ dnbd3_image_t *image = image_lock( uplink->image );
+ if ( image != NULL ) {
+ // != NULL means image is still in list...
+ if ( !_shutdown && image->cache_map != NULL ) {
+ // Ingegrity checker must have found something in the meantime
+ uplink_init( image, -1, NULL, 0 );
+ }
+ image_release( image );
+ }
+ // Finally let go of image. It was acquired either in uplink_shutdown or in the cleanup code
+ // of the uplink thread, depending on who set the uplink->shutdown flag.
+ image_release( image );
+ free( uplink ); // !!!
}
/**
* Remove given client from uplink request queue
* Locks on: uplink.queueLock
*/
-void uplink_removeClient(dnbd3_connection_t *uplink, dnbd3_client_t *client)
+void uplink_removeClient(dnbd3_uplink_t *uplink, dnbd3_client_t *client)
{
mutex_lock( &uplink->queueLock );
for (int i = uplink->queueLen - 1; i >= 0; --i) {
if ( uplink->queue[i].client == client ) {
+ // Make sure client doesn't get destroyed while we're sending it data
+ mutex_lock( &client->sendMutex );
+ mutex_unlock( &client->sendMutex );
uplink->queue[i].client = NULL;
uplink->queue[i].status = ULR_FREE;
}
@@ -167,89 +252,94 @@ void uplink_removeClient(dnbd3_connection_t *uplink, dnbd3_client_t *client)
*/
bool uplink_request(dnbd3_client_t *client, uint64_t handle, uint64_t start, uint32_t length, uint8_t hops)
{
- if ( client == NULL || client->image == NULL ) return false;
+ if ( client == NULL || client->image == NULL )
+ return false;
if ( length > (uint32_t)_maxPayload ) {
logadd( LOG_WARNING, "Cannot relay request by client; length of %" PRIu32 " exceeds maximum payload", length );
return false;
}
- mutex_lock( &client->image->lock );
- if ( client->image->uplink == NULL ) {
- mutex_unlock( &client->image->lock );
+ dnbd3_uplink_t * const uplink = ref_get_uplink( &client->image->uplinkref );
+ if ( uplink == NULL ) {
logadd( LOG_DEBUG1, "Uplink request for image with no uplink" );
return false;
}
- dnbd3_connection_t * const uplink = client->image->uplink;
if ( uplink->shutdown ) {
- mutex_unlock( &client->image->lock );
logadd( LOG_DEBUG1, "Uplink request for image with uplink shutting down" );
- return false;
+ goto fail_ref;
}
// Check if the client is the same host as the uplink. If so assume this is a circular proxy chain
// This might be a false positive if there are multiple instances running on the same host (IP)
- if ( hops != 0 && isSameAddress( &uplink->currentServer, &client->host ) ) {
- mutex_unlock( &client->image->lock );
- logadd( LOG_WARNING, "Proxy cycle detected (same host)." );
- mutex_lock( &uplink->rttLock );
+ if ( hops != 0 && isSameAddress( altservers_indexToHost( uplink->current.index ), &client->host ) ) {
uplink->cycleDetected = true;
- mutex_unlock( &uplink->rttLock );
signal_call( uplink->signal );
- return false;
+ logadd( LOG_WARNING, "Proxy cycle detected (same host)." );
+ goto fail_ref;
}
int foundExisting = -1; // Index of a pending request that is a superset of our range, -1 otherwise
int existingType = -1; // ULR_* type of existing request
int i;
int freeSlot = -1;
+ int firstUsedSlot = -1;
bool requestLoop = false;
const uint64_t end = start + length;
mutex_lock( &uplink->queueLock );
- mutex_unlock( &client->image->lock );
+ if ( uplink->shutdown ) { // Check again after locking to prevent lost requests
+ goto fail_lock;
+ }
for (i = 0; i < uplink->queueLen; ++i) {
- if ( freeSlot == -1 && uplink->queue[i].status == ULR_FREE ) {
- freeSlot = i;
+ // find free slot to place this request into
+ if ( uplink->queue[i].status == ULR_FREE ) {
+ if ( freeSlot == -1 || existingType != ULR_PROCESSING ) {
+ freeSlot = i;
+ }
continue;
}
- if ( uplink->queue[i].status != ULR_PENDING && uplink->queue[i].status != ULR_NEW ) continue;
- if ( uplink->queue[i].from <= start && uplink->queue[i].to >= end ) {
- if ( hops > uplink->queue[i].hopCount && uplink->queue[i].from == start && uplink->queue[i].to == end ) {
- requestLoop = true;
- break;
- }
- if ( foundExisting == -1 || existingType == ULR_PENDING ) {
- foundExisting = i;
- existingType = uplink->queue[i].status;
- if ( freeSlot != -1 ) break;
- }
+ if ( firstUsedSlot == -1 ) {
+ firstUsedSlot = i;
+ }
+ // find existing request to attach to
+ if ( uplink->queue[i].from > start || uplink->queue[i].to < end )
+ continue; // Range not suitable
+ // Detect potential proxy cycle. New request hopcount is greater, range is same, old request has already been sent -> suspicious
+ if ( hops > uplink->queue[i].hopCount && uplink->queue[i].from == start && uplink->queue[i].to == end && uplink->queue[i].status == ULR_PENDING ) {
+ requestLoop = true;
+ break;
+ }
+ if ( foundExisting == -1 || existingType == ULR_PROCESSING ) {
+ foundExisting = i;
+ existingType = uplink->queue[i].status;
}
}
- if ( requestLoop ) {
- mutex_unlock( &uplink->queueLock );
- logadd( LOG_WARNING, "Rejecting relay of request to upstream proxy because of possible cyclic proxy chain. Incoming hop-count is %" PRIu8 ".", hops );
- mutex_lock( &uplink->rttLock );
+ if ( unlikely( requestLoop ) ) {
uplink->cycleDetected = true;
- mutex_unlock( &uplink->rttLock );
signal_call( uplink->signal );
- return false;
+ logadd( LOG_WARNING, "Rejecting relay of request to upstream proxy because of possible cyclic proxy chain. Incoming hop-count is %" PRIu8 ".", hops );
+ goto fail_lock;
+ }
+ if ( freeSlot < firstUsedSlot && firstUsedSlot < 10 && existingType != ULR_PROCESSING ) {
+ freeSlot = -1; // Not attaching to existing request, make it use a higher slot
}
if ( freeSlot == -1 ) {
if ( uplink->queueLen >= SERVER_MAX_UPLINK_QUEUE ) {
- mutex_unlock( &uplink->queueLock );
logadd( LOG_WARNING, "Uplink queue is full, consider increasing SERVER_MAX_UPLINK_QUEUE. Dropping client..." );
- return false;
+ goto fail_lock;
}
freeSlot = uplink->queueLen++;
}
// Do not send request to uplink server if we have a matching pending request AND the request either has the
- // status ULR_NEW OR we found a free slot with LOWER index than the one we attach to. Otherwise
+ // status ULR_NEW/PENDING OR we found a free slot with LOWER index than the one we attach to. Otherwise
// explicitly send this request to the uplink server. The second condition mentioned here is to prevent
// a race condition where the reply for the outstanding request already arrived and the uplink thread
// is currently traversing the request queue. As it is processing the queue from highest to lowest index, it might
// already have passed the index of the free slot we determined, but not reached the existing request we just found above.
- if ( foundExisting != -1 && existingType != ULR_NEW && freeSlot > foundExisting ) foundExisting = -1; // -1 means "send request"
+ if ( foundExisting != -1 && existingType == ULR_PROCESSING && freeSlot > foundExisting ) {
+ foundExisting = -1; // -1 means "send request"
+ }
#ifdef _DEBUG
if ( foundExisting != -1 ) {
- logadd( LOG_DEBUG2, "%p (%s) Found existing request of type %s at slot %d, attaching in slot %d.\n", (void*)uplink, uplink->image->name, existingType == ULR_NEW ? "ULR_NEW" : "ULR_PENDING", foundExisting, freeSlot );
+ logadd( LOG_DEBUG2, "%p (%s) Found existing request of type %s at slot %d, attaching in slot %d.\n", (void*)uplink, uplink->image->name, NAMES_ULR[existingType], foundExisting, freeSlot );
logadd( LOG_DEBUG2, "Original %" PRIu64 "-%" PRIu64 " (%p)\n"
"New %" PRIu64 "-%" PRIu64 " (%p)\n",
uplink->queue[foundExisting].from, uplink->queue[foundExisting].to, (void*)uplink->queue[foundExisting].client,
@@ -262,7 +352,8 @@ bool uplink_request(dnbd3_client_t *client, uint64_t handle, uint64_t start, uin
uplink->queue[freeSlot].handle = handle;
uplink->queue[freeSlot].client = client;
//int old = uplink->queue[freeSlot].status;
- uplink->queue[freeSlot].status = (foundExisting == -1 ? ULR_NEW : ULR_PENDING);
+ uplink->queue[freeSlot].status = ( foundExisting == -1 ? ULR_NEW :
+ ( existingType == ULR_NEW ? ULR_PENDING : existingType ) );
uplink->queue[freeSlot].hopCount = hops;
#ifdef _DEBUG
timing_get( &uplink->queue[freeSlot].entered );
@@ -270,45 +361,63 @@ bool uplink_request(dnbd3_client_t *client, uint64_t handle, uint64_t start, uin
#endif
mutex_unlock( &uplink->queueLock );
- if ( foundExisting != -1 )
+ if ( foundExisting != -1 ) {
+ ref_put( &uplink->reference );
return true; // Attached to pending request, do nothing
+ }
// See if we can fire away the request
- if ( mutex_trylock( &uplink->sendMutex ) != 0 ) {
+ if ( unlikely( mutex_trylock( &uplink->sendMutex ) != 0 ) ) {
logadd( LOG_DEBUG2, "Could not trylock send mutex, queueing uplink request" );
} else {
- if ( uplink->fd == -1 ) {
+ if ( unlikely( uplink->current.fd == -1 ) ) {
mutex_unlock( &uplink->sendMutex );
logadd( LOG_DEBUG2, "Cannot do direct uplink request: No socket open" );
} else {
const uint64_t reqStart = uplink->queue[freeSlot].from & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1);
const uint32_t reqSize = (uint32_t)(((uplink->queue[freeSlot].to + DNBD3_BLOCK_SIZE - 1) & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1)) - reqStart);
if ( hops < 200 ) ++hops;
- const bool ret = dnbd3_get_block( uplink->fd, reqStart, reqSize, reqStart, COND_HOPCOUNT( uplink->version, hops ) );
+ const bool ret = dnbd3_get_block( uplink->current.fd, reqStart, reqSize, reqStart, COND_HOPCOUNT( uplink->current.version, hops ) );
mutex_unlock( &uplink->sendMutex );
- if ( !ret ) {
+ if ( unlikely( !ret ) ) {
logadd( LOG_DEBUG2, "Could not send out direct uplink request, queueing" );
} else {
+ // Direct send succeeded, update queue entry from NEW to PENDING, so the request won't be sent again
+ int state;
mutex_lock( &uplink->queueLock );
- if ( uplink->queue[freeSlot].handle == handle && uplink->queue[freeSlot].client == client && uplink->queue[freeSlot].status == ULR_NEW ) {
- uplink->queue[freeSlot].status = ULR_PENDING;
- logadd( LOG_DEBUG2, "Succesful direct uplink request" );
+ if ( !uplink->shutdown && uplink->queue[freeSlot].handle == handle && uplink->queue[freeSlot].client == client ) {
+ state = uplink->queue[freeSlot].status;
+ if ( uplink->queue[freeSlot].status == ULR_NEW ) {
+ uplink->queue[freeSlot].status = ULR_PENDING;
+ }
} else {
- logadd( LOG_DEBUG2, "Weird queue update fail for direct uplink request" );
+ state = -1;
}
mutex_unlock( &uplink->queueLock );
+ if ( state == -1 ) {
+ logadd( LOG_DEBUG2, "Direct uplink request queue entry gone after sending and re-locking queue. *shrug*" );
+ } else if ( state == ULR_NEW ) {
+ //logadd( LOG_DEBUG2, "Direct uplink request" );
+ } else {
+ logadd( LOG_DEBUG2, "Direct uplink request queue entry changed to %s afte sending (expected ULR_NEW).", NAMES_ULR[uplink->queue[freeSlot].status] );
+ }
+ ref_put( &uplink->reference );
return true;
}
// Fall through to waking up sender thread
}
}
- if ( foundExisting == -1 ) { // Only wake up uplink thread if the request needs to be relayed
- if ( signal_call( uplink->signal ) == SIGNAL_ERROR ) {
- logadd( LOG_WARNING, "Cannot wake up uplink thread; errno=%d", (int)errno );
- }
+ if ( signal_call( uplink->signal ) == SIGNAL_ERROR ) {
+ logadd( LOG_WARNING, "Cannot wake up uplink thread; errno=%d", (int)errno );
}
+ ref_put( &uplink->reference );
return true;
+fail_lock:
+ mutex_unlock( &uplink->queueLock );
+fail_ref:
+ ref_put( &uplink->reference );
+ return false;
}
/**
@@ -321,9 +430,10 @@ static void* uplink_mainloop(void *data)
#define EV_SOCKET (1)
#define EV_COUNT (2)
struct pollfd events[EV_COUNT];
- dnbd3_connection_t * const link = (dnbd3_connection_t*)data;
+ dnbd3_uplink_t * const uplink = (dnbd3_uplink_t*)data;
int numSocks, i, waitTime;
int altCheckInterval = SERVER_RTT_INTERVAL_INIT;
+ int rttTestResult;
uint32_t discoverFailCount = 0;
uint32_t unsavedSeconds = 0;
ticks nextAltCheck, lastKeepalive;
@@ -332,31 +442,30 @@ static void* uplink_mainloop(void *data)
timing_get( &nextAltCheck );
lastKeepalive = nextAltCheck;
//
- assert( link != NULL );
+ assert( uplink != NULL );
setThreadName( "idle-uplink" );
+ thread_detach( uplink->thread );
blockNoncriticalSignals();
// Make sure file is open for writing
- if ( !uplink_reopenCacheFd( link, false ) ) {
+ if ( !uplink_reopenCacheFd( uplink, false ) ) {
// It might have failed - still offer proxy mode, we just can't cache
- logadd( LOG_WARNING, "Cannot open cache file %s for writing (errno=%d); will just proxy traffic without caching!", link->image->path, errno );
+ logadd( LOG_WARNING, "Cannot open cache file %s for writing (errno=%d); will just proxy traffic without caching!", uplink->image->path, errno );
}
//
- link->signal = signal_new();
- if ( link->signal == NULL ) {
+ uplink->signal = signal_new();
+ if ( uplink->signal == NULL ) {
logadd( LOG_WARNING, "error creating signal. Uplink unavailable." );
goto cleanup;
}
events[EV_SIGNAL].events = POLLIN;
- events[EV_SIGNAL].fd = signal_getWaitFd( link->signal );
+ events[EV_SIGNAL].fd = signal_getWaitFd( uplink->signal );
events[EV_SOCKET].fd = -1;
- while ( !_shutdown && !link->shutdown ) {
+ while ( !_shutdown && !uplink->shutdown ) {
// poll()
- mutex_lock( &link->rttLock );
- waitTime = link->rttTestResult == RTT_DOCHANGE ? 0 : -1;
- mutex_unlock( &link->rttLock );
+ waitTime = uplink->rttTestResult == RTT_DOCHANGE ? 0 : -1;
if ( waitTime == 0 ) {
- // Nothing
- } else if ( link->fd == -1 && !uplink_connectionShouldShutdown( link ) ) {
+ // 0 means poll, since we're about to change the server
+ } else if ( uplink->current.fd == -1 && !uplink_connectionShouldShutdown( uplink ) ) {
waitTime = 1000;
} else {
declare_now;
@@ -364,9 +473,9 @@ static void* uplink_mainloop(void *data)
if ( waitTime < 100 ) waitTime = 100;
if ( waitTime > 5000 ) waitTime = 5000;
}
- events[EV_SOCKET].fd = link->fd;
+ events[EV_SOCKET].fd = uplink->current.fd;
numSocks = poll( events, EV_COUNT, waitTime );
- if ( _shutdown || link->shutdown ) goto cleanup;
+ if ( _shutdown || uplink->shutdown ) goto cleanup;
if ( numSocks == -1 ) { // Error?
if ( errno == EINTR ) continue;
logadd( LOG_DEBUG1, "poll() error %d", (int)errno );
@@ -374,39 +483,36 @@ static void* uplink_mainloop(void *data)
continue;
}
// Check if server switch is in order
- mutex_lock( &link->rttLock );
- if ( link->rttTestResult != RTT_DOCHANGE ) {
- mutex_unlock( &link->rttLock );
- } else {
- link->rttTestResult = RTT_IDLE;
+ if ( unlikely( uplink->rttTestResult == RTT_DOCHANGE ) ) {
+ mutex_lock( &uplink->rttLock );
+ assert( uplink->rttTestResult == RTT_DOCHANGE );
+ uplink->rttTestResult = RTT_IDLE;
// The rttTest worker thread has finished our request.
// And says it's better to switch to another server
- const int fd = link->fd;
- mutex_lock( &link->sendMutex );
- link->fd = link->betterFd;
- mutex_unlock( &link->sendMutex );
- link->betterFd = -1;
- link->currentServer = link->betterServer;
- link->version = link->betterVersion;
- link->cycleDetected = false;
- mutex_unlock( &link->rttLock );
+ const int fd = uplink->current.fd;
+ mutex_lock( &uplink->sendMutex );
+ uplink->current = uplink->better;
+ mutex_unlock( &uplink->sendMutex );
+ uplink->better.fd = -1;
+ uplink->cycleDetected = false;
+ mutex_unlock( &uplink->rttLock );
discoverFailCount = 0;
if ( fd != -1 ) close( fd );
- link->replicationHandle = REP_NONE;
- link->image->working = true;
- link->replicatedLastBlock = false; // Reset this to be safe - request could've been sent but reply was never received
+ uplink->replicationHandle = REP_NONE;
+ uplink->image->working = true;
+ uplink->replicatedLastBlock = false; // Reset this to be safe - request could've been sent but reply was never received
buffer[0] = '@';
- if ( host_to_string( &link->currentServer, buffer + 1, sizeof(buffer) - 1 ) ) {
- logadd( LOG_DEBUG1, "(Uplink %s) Now connected to %s\n", link->image->name, buffer + 1 );
+ if ( altservers_toString( uplink->current.index, buffer + 1, sizeof(buffer) - 1 ) ) {
+ logadd( LOG_DEBUG1, "(Uplink %s) Now connected to %s\n", uplink->image->name, buffer + 1 );
setThreadName( buffer );
}
// If we don't have a crc32 list yet, see if the new server has one
- if ( link->image->crc32 == NULL ) {
- uplink_addCrc32( link );
+ if ( uplink->image->crc32 == NULL ) {
+ uplink_addCrc32( uplink );
}
// Re-send all pending requests
- uplink_sendRequests( link, false );
- uplink_sendReplicationRequest( link );
+ uplink_sendRequests( uplink, false );
+ uplink_sendReplicationRequest( uplink );
events[EV_SOCKET].events = POLLIN | POLLRDHUP;
timing_gets( &nextAltCheck, altCheckInterval );
// The rtt worker already did the handshake for our image, so there's nothing
@@ -419,202 +525,171 @@ static void* uplink_mainloop(void *data)
goto cleanup;
} else if ( (events[EV_SIGNAL].revents & POLLIN) ) {
// signal triggered -> pending requests
- if ( signal_clear( link->signal ) == SIGNAL_ERROR ) {
- logadd( LOG_WARNING, "Errno on signal on uplink for %s! Things will break!", link->image->name );
+ if ( signal_clear( uplink->signal ) == SIGNAL_ERROR ) {
+ logadd( LOG_WARNING, "Errno on signal on uplink for %s! Things will break!", uplink->image->name );
}
- if ( link->fd != -1 ) {
+ if ( uplink->current.fd != -1 ) {
// Uplink seems fine, relay requests to it...
- uplink_sendRequests( link, true );
+ uplink_sendRequests( uplink, true );
} else { // No uplink; maybe it was shutdown since it was idle for too long
- link->idleTime = 0;
+ uplink->idleTime = 0;
}
}
// Uplink socket
if ( (events[EV_SOCKET].revents & (POLLERR | POLLHUP | POLLRDHUP | POLLNVAL)) ) {
- uplink_connectionFailed( link, true );
- logadd( LOG_DEBUG1, "Uplink gone away, panic!\n" );
+ uplink_connectionFailed( uplink, true );
+ logadd( LOG_DEBUG1, "Uplink gone away, panic! (revents=%d)\n", (int)events[EV_SOCKET].revents );
setThreadName( "panic-uplink" );
} else if ( (events[EV_SOCKET].revents & POLLIN) ) {
- uplink_handleReceive( link );
- if ( _shutdown || link->shutdown ) goto cleanup;
+ uplink_handleReceive( uplink );
+ if ( _shutdown || uplink->shutdown ) goto cleanup;
}
declare_now;
uint32_t timepassed = timing_diff( &lastKeepalive, &now );
if ( timepassed >= SERVER_UPLINK_KEEPALIVE_INTERVAL ) {
lastKeepalive = now;
- link->idleTime += timepassed;
+ uplink->idleTime += timepassed;
unsavedSeconds += timepassed;
- if ( unsavedSeconds > 240 || ( unsavedSeconds > 60 && link->idleTime >= 20 && link->idleTime <= 70 ) ) {
- // fsync/save every 4 minutes, or every 60 seconds if link is idle
+ if ( unsavedSeconds > 240 || ( unsavedSeconds > 60 && uplink->idleTime >= 20 && uplink->idleTime <= 70 ) ) {
+ // fsync/save every 4 minutes, or every 60 seconds if uplink is idle
unsavedSeconds = 0;
- uplink_saveCacheMap( link );
+ uplink_saveCacheMap( uplink );
}
// Keep-alive
- if ( link->fd != -1 && link->replicationHandle == REP_NONE ) {
+ if ( uplink->current.fd != -1 && uplink->replicationHandle == REP_NONE ) {
// Send keep-alive if nothing is happening
- if ( uplink_sendKeepalive( link->fd ) ) {
+ if ( uplink_sendKeepalive( uplink->current.fd ) ) {
// Re-trigger periodically, in case it requires a minimum user count
- uplink_sendReplicationRequest( link );
+ uplink_sendReplicationRequest( uplink );
} else {
- uplink_connectionFailed( link, true );
+ uplink_connectionFailed( uplink, true );
logadd( LOG_DEBUG1, "Error sending keep-alive, panic!\n" );
setThreadName( "panic-uplink" );
}
}
- // Don't keep link established if we're idle for too much
- if ( link->fd != -1 && uplink_connectionShouldShutdown( link ) ) {
- mutex_lock( &link->sendMutex );
- close( link->fd );
- link->fd = events[EV_SOCKET].fd = -1;
- mutex_unlock( &link->sendMutex );
- link->cycleDetected = false;
- if ( link->recvBufferLen != 0 ) {
- link->recvBufferLen = 0;
- free( link->recvBuffer );
- link->recvBuffer = NULL;
+ // Don't keep uplink established if we're idle for too much
+ if ( uplink->current.fd != -1 && uplink_connectionShouldShutdown( uplink ) ) {
+ mutex_lock( &uplink->sendMutex );
+ close( uplink->current.fd );
+ uplink->current.fd = -1;
+ mutex_unlock( &uplink->sendMutex );
+ uplink->cycleDetected = false;
+ if ( uplink->recvBufferLen != 0 ) {
+ uplink->recvBufferLen = 0;
+ free( uplink->recvBuffer );
+ uplink->recvBuffer = NULL;
}
- logadd( LOG_DEBUG1, "Closing idle uplink for image %s:%d", link->image->name, (int)link->image->rid );
+ logadd( LOG_DEBUG1, "Closing idle uplink for image %s:%d", uplink->image->name, (int)uplink->image->rid );
setThreadName( "idle-uplink" );
}
}
// See if we should trigger an RTT measurement
- mutex_lock( &link->rttLock );
- const int rttTestResult = link->rttTestResult;
- mutex_unlock( &link->rttLock );
+ rttTestResult = uplink->rttTestResult;
if ( rttTestResult == RTT_IDLE || rttTestResult == RTT_DONTCHANGE ) {
- if ( timing_reached( &nextAltCheck, &now ) || ( link->fd == -1 && !uplink_connectionShouldShutdown( link ) ) || link->cycleDetected ) {
+ if ( timing_reached( &nextAltCheck, &now ) || ( uplink->current.fd == -1 && !uplink_connectionShouldShutdown( uplink ) ) || uplink->cycleDetected ) {
// It seems it's time for a check
- if ( image_isComplete( link->image ) ) {
+ if ( image_isComplete( uplink->image ) ) {
// Quit work if image is complete
- logadd( LOG_INFO, "Replication of %s complete.", link->image->name );
+ logadd( LOG_INFO, "Replication of %s complete.", uplink->image->name );
setThreadName( "finished-uplink" );
goto cleanup;
- } else if ( !uplink_connectionShouldShutdown( link ) ) {
+ } else if ( !uplink_connectionShouldShutdown( uplink ) ) {
// Not complete - do measurement
- altservers_findUplink( link ); // This will set RTT_INPROGRESS (synchronous)
- if ( _backgroundReplication == BGR_FULL && link->nextReplicationIndex == -1 ) {
- link->nextReplicationIndex = 0;
+ altservers_findUplinkAsync( uplink ); // This will set RTT_INPROGRESS (synchronous)
+ if ( _backgroundReplication == BGR_FULL && uplink->nextReplicationIndex == -1 ) {
+ uplink->nextReplicationIndex = 0;
}
}
altCheckInterval = MIN(altCheckInterval + 1, SERVER_RTT_INTERVAL_MAX);
timing_set( &nextAltCheck, &now, altCheckInterval );
}
} else if ( rttTestResult == RTT_NOT_REACHABLE ) {
- mutex_lock( &link->rttLock );
- link->rttTestResult = RTT_IDLE;
- mutex_unlock( &link->rttLock );
+ atomic_compare_exchange_strong( &uplink->rttTestResult, &rttTestResult, RTT_IDLE );
discoverFailCount++;
- timing_set( &nextAltCheck, &now, (discoverFailCount < SERVER_RTT_BACKOFF_COUNT ? altCheckInterval : SERVER_RTT_INTERVAL_FAILED) );
+ timing_set( &nextAltCheck, &now, (discoverFailCount < SERVER_RTT_MAX_UNREACH ? altCheckInterval : SERVER_RTT_INTERVAL_FAILED) );
}
#ifdef _DEBUG
- if ( link->fd != -1 && !link->shutdown ) {
+ if ( uplink->current.fd != -1 && !uplink->shutdown ) {
bool resend = false;
ticks deadline;
timing_set( &deadline, &now, -10 );
- mutex_lock( &link->queueLock );
- for (i = 0; i < link->queueLen; ++i) {
- if ( link->queue[i].status != ULR_FREE && timing_reached( &link->queue[i].entered, &deadline ) ) {
+ mutex_lock( &uplink->queueLock );
+ for (i = 0; i < uplink->queueLen; ++i) {
+ if ( uplink->queue[i].status != ULR_FREE && timing_reached( &uplink->queue[i].entered, &deadline ) ) {
snprintf( buffer, sizeof(buffer), "[DEBUG %p] Starving request slot %d detected:\n"
- "%s\n(from %" PRIu64 " to %" PRIu64 ", status: %d)\n", (void*)link, i, link->queue[i].client->image->name,
- link->queue[i].from, link->queue[i].to, link->queue[i].status );
- link->queue[i].entered = now;
+ "%s\n(from %" PRIu64 " to %" PRIu64 ", status: %d)\n", (void*)uplink, i, uplink->queue[i].client->image->name,
+ uplink->queue[i].from, uplink->queue[i].to, uplink->queue[i].status );
+ uplink->queue[i].entered = now;
#ifdef _DEBUG_RESEND_STARVING
- link->queue[i].status = ULR_NEW;
+ uplink->queue[i].status = ULR_NEW;
resend = true;
#endif
- mutex_unlock( &link->queueLock );
+ mutex_unlock( &uplink->queueLock );
logadd( LOG_WARNING, "%s", buffer );
- mutex_lock( &link->queueLock );
+ mutex_lock( &uplink->queueLock );
}
}
- mutex_unlock( &link->queueLock );
+ mutex_unlock( &uplink->queueLock );
if ( resend )
- uplink_sendRequests( link, true );
+ uplink_sendRequests( uplink, true );
}
#endif
}
cleanup: ;
- altservers_removeUplink( link );
- uplink_saveCacheMap( link );
- mutex_lock( &link->image->lock );
- if ( link->image->uplink == link ) {
- link->image->uplink = NULL;
- }
- mutex_lock( &link->queueLock );
- const int fd = link->fd;
- const dnbd3_signal_t* signal = link->signal;
- mutex_lock( &link->sendMutex );
- link->fd = -1;
- mutex_unlock( &link->sendMutex );
- link->signal = NULL;
- if ( !link->shutdown ) {
- link->shutdown = true;
- thread_detach( link->thread );
- }
- // Do not access link->image after unlocking, since we set
- // image->uplink to NULL. Acquire with image_lock first,
- // like done below when checking whether to re-init uplink
- mutex_unlock( &link->image->lock );
- mutex_unlock( &link->queueLock );
- if ( fd != -1 ) close( fd );
- if ( signal != NULL ) signal_close( signal );
- // Wait for the RTT check to finish/fail if it's in progress
- while ( link->rttTestResult == RTT_INPROGRESS )
- usleep( 10000 );
- if ( link->betterFd != -1 ) {
- close( link->betterFd );
+ uplink_saveCacheMap( uplink );
+ dnbd3_image_t *image = uplink->image;
+ mutex_lock( &image->lock );
+ bool exp = false;
+ if ( atomic_compare_exchange_strong( &uplink->shutdown, &exp, true ) ) {
+ image->users++; // We set the flag - hold onto image
}
- mutex_destroy( &link->queueLock );
- mutex_destroy( &link->rttLock );
- mutex_destroy( &link->sendMutex );
- free( link->recvBuffer );
- link->recvBuffer = NULL;
- if ( link->cacheFd != -1 ) {
- close( link->cacheFd );
+ dnbd3_uplink_t *current = ref_get_uplink( &image->uplinkref );
+ if ( current == uplink ) { // Set NULL if it's still us...
+ mutex_lock( &uplink->queueLock );
+ cancelAllRequests( uplink );
+ mutex_unlock( &uplink->queueLock );
+ ref_setref( &image->uplinkref, NULL );
}
- dnbd3_image_t *image = image_lock( link->image );
- free( link ); // !!!
- if ( image != NULL ) {
- if ( !_shutdown && image->cache_map != NULL ) {
- // Ingegrity checker must have found something in the meantime
- uplink_init( image, -1, NULL, 0 );
- }
- image_release( image );
+ if ( current != NULL ) { // Decrease ref in any case
+ ref_put( &current->reference );
}
+ mutex_unlock( &image->lock );
+ // Finally as the thread is done, decrease our own ref that we initialized with
+ ref_put( &uplink->reference );
return NULL ;
}
-static void uplink_sendRequests(dnbd3_connection_t *link, bool newOnly)
+static void uplink_sendRequests(dnbd3_uplink_t *uplink, bool newOnly)
{
// Scan for new requests
int j;
- mutex_lock( &link->queueLock );
- for (j = 0; j < link->queueLen; ++j) {
- if ( link->queue[j].status != ULR_NEW && (newOnly || link->queue[j].status != ULR_PENDING) ) continue;
- link->queue[j].status = ULR_PENDING;
- uint8_t hops = link->queue[j].hopCount;
- const uint64_t reqStart = link->queue[j].from & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1);
- const uint32_t reqSize = (uint32_t)(((link->queue[j].to + DNBD3_BLOCK_SIZE - 1) & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1)) - reqStart);
+ mutex_lock( &uplink->queueLock );
+ for (j = 0; j < uplink->queueLen; ++j) {
+ if ( uplink->queue[j].status != ULR_NEW && (newOnly || uplink->queue[j].status != ULR_PENDING) ) continue;
+ uplink->queue[j].status = ULR_PENDING;
+ uint8_t hops = uplink->queue[j].hopCount;
+ const uint64_t reqStart = uplink->queue[j].from & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1);
+ const uint32_t reqSize = (uint32_t)(((uplink->queue[j].to + DNBD3_BLOCK_SIZE - 1) & ~(uint64_t)(DNBD3_BLOCK_SIZE - 1)) - reqStart);
/*
logadd( LOG_DEBUG2, "[%p] Sending slot %d, now %d, handle %" PRIu64 ", Range: %" PRIu64 "-%" PRIu64 " (%" PRIu64 "-%" PRIu64 ")",
- (void*)link, j, link->queue[j].status, link->queue[j].handle, link->queue[j].from, link->queue[j].to, reqStart, reqStart+reqSize );
+ (void*)uplink, j, uplink->queue[j].status, uplink->queue[j].handle, uplink->queue[j].from, uplink->queue[j].to, reqStart, reqStart+reqSize );
*/
- mutex_unlock( &link->queueLock );
+ mutex_unlock( &uplink->queueLock );
if ( hops < 200 ) ++hops;
- mutex_lock( &link->sendMutex );
- const bool ret = dnbd3_get_block( link->fd, reqStart, reqSize, reqStart, COND_HOPCOUNT( link->version, hops ) );
- mutex_unlock( &link->sendMutex );
+ mutex_lock( &uplink->sendMutex );
+ const bool ret = dnbd3_get_block( uplink->current.fd, reqStart, reqSize, reqStart, COND_HOPCOUNT( uplink->current.version, hops ) );
+ mutex_unlock( &uplink->sendMutex );
if ( !ret ) {
// Non-critical - if the connection dropped or the server was changed
// the thread will re-send this request as soon as the connection
// is reestablished.
logadd( LOG_DEBUG1, "Error forwarding request to uplink server!\n" );
- altservers_serverFailed( &link->currentServer );
+ altservers_serverFailed( uplink->current.index );
return;
}
- mutex_lock( &link->queueLock );
+ mutex_lock( &uplink->queueLock );
}
- mutex_unlock( &link->queueLock );
+ mutex_unlock( &uplink->queueLock );
}
/**
@@ -627,13 +702,13 @@ static void uplink_sendRequests(dnbd3_connection_t *link, bool newOnly)
* the code simpler. Worst case would be only one bit is zero, which means
* 4kb are missing, but we will request 32kb.
*/
-static void uplink_sendReplicationRequest(dnbd3_connection_t *link)
+static void uplink_sendReplicationRequest(dnbd3_uplink_t *uplink)
{
- if ( link == NULL || link->fd == -1 ) return;
- if ( _backgroundReplication == BGR_DISABLED || link->cacheFd == -1 ) return; // Don't do background replication
- if ( link->nextReplicationIndex == -1 || link->replicationHandle != REP_NONE )
+ if ( uplink == NULL || uplink->current.fd == -1 ) return;
+ if ( _backgroundReplication == BGR_DISABLED || uplink->cacheFd == -1 ) return; // Don't do background replication
+ if ( uplink->nextReplicationIndex == -1 || uplink->replicationHandle != REP_NONE )
return;
- dnbd3_image_t * const image = link->image;
+ dnbd3_image_t * const image = uplink->image;
if ( image->virtualFilesize < DNBD3_BLOCK_SIZE ) return;
mutex_lock( &image->lock );
if ( image == NULL || image->cache_map == NULL || image->users < _bgrMinClients ) {
@@ -645,17 +720,17 @@ static void uplink_sendReplicationRequest(dnbd3_connection_t *link)
const int lastBlockIndex = mapBytes - 1;
int endByte;
if ( _backgroundReplication == BGR_FULL ) { // Full mode: consider all blocks
- endByte = link->nextReplicationIndex + mapBytes;
+ endByte = uplink->nextReplicationIndex + mapBytes;
} else { // Hashblock based: Only look for match in current hash block
- endByte = ( link->nextReplicationIndex + MAP_BYTES_PER_HASH_BLOCK ) & MAP_INDEX_HASH_START_MASK;
+ endByte = ( uplink->nextReplicationIndex + MAP_BYTES_PER_HASH_BLOCK ) & MAP_INDEX_HASH_START_MASK;
if ( endByte > mapBytes ) {
endByte = mapBytes;
}
}
int replicationIndex = -1;
- for ( int j = link->nextReplicationIndex; j < endByte; ++j ) {
+ for ( int j = uplink->nextReplicationIndex; j < endByte; ++j ) {
const int i = j % ( mapBytes ); // Wrap around for BGR_FULL
- if ( image->cache_map[i] != 0xff && ( i != lastBlockIndex || !link->replicatedLastBlock ) ) {
+ if ( image->cache_map[i] != 0xff && ( i != lastBlockIndex || !uplink->replicatedLastBlock ) ) {
// Found incomplete one
replicationIndex = i;
break;
@@ -664,31 +739,31 @@ static void uplink_sendReplicationRequest(dnbd3_connection_t *link)
mutex_unlock( &image->lock );
if ( replicationIndex == -1 && _backgroundReplication == BGR_HASHBLOCK ) {
// Nothing left in current block, find next one
- replicationIndex = uplink_findNextIncompleteHashBlock( link, endByte );
+ replicationIndex = uplink_findNextIncompleteHashBlock( uplink, endByte );
}
if ( replicationIndex == -1 ) {
// Replication might be complete, uplink_mainloop should take care....
- link->nextReplicationIndex = -1;
+ uplink->nextReplicationIndex = -1;
return;
}
const uint64_t offset = (uint64_t)replicationIndex * FILE_BYTES_PER_MAP_BYTE;
- link->replicationHandle = offset;
+ uplink->replicationHandle = offset;
const uint32_t size = (uint32_t)MIN( image->virtualFilesize - offset, FILE_BYTES_PER_MAP_BYTE );
- mutex_lock( &link->sendMutex );
- bool sendOk = dnbd3_get_block( link->fd, offset, size, link->replicationHandle, COND_HOPCOUNT( link->version, 1 ) );
- mutex_unlock( &link->sendMutex );
+ mutex_lock( &uplink->sendMutex );
+ bool sendOk = dnbd3_get_block( uplink->current.fd, offset, size, uplink->replicationHandle, COND_HOPCOUNT( uplink->current.version, 1 ) );
+ mutex_unlock( &uplink->sendMutex );
if ( !sendOk ) {
logadd( LOG_DEBUG1, "Error sending background replication request to uplink server!\n" );
return;
}
if ( replicationIndex == lastBlockIndex ) {
- link->replicatedLastBlock = true; // Special treatment, last byte in map could represent less than 8 blocks
+ uplink->replicatedLastBlock = true; // Special treatment, last byte in map could represent less than 8 blocks
}
- link->nextReplicationIndex = replicationIndex + 1; // Remember last incomplete offset for next time so we don't play Schlemiel the painter
+ uplink->nextReplicationIndex = replicationIndex + 1; // Remember last incomplete offset for next time so we don't play Schlemiel the painter
if ( _backgroundReplication == BGR_HASHBLOCK
- && link->nextReplicationIndex % MAP_BYTES_PER_HASH_BLOCK == 0 ) {
+ && uplink->nextReplicationIndex % MAP_BYTES_PER_HASH_BLOCK == 0 ) {
// Just crossed a hash block boundary, look for new candidate starting at this very index
- link->nextReplicationIndex = uplink_findNextIncompleteHashBlock( link, link->nextReplicationIndex );
+ uplink->nextReplicationIndex = uplink_findNextIncompleteHashBlock( uplink, uplink->nextReplicationIndex );
}
}
@@ -697,18 +772,18 @@ static void uplink_sendReplicationRequest(dnbd3_connection_t *link)
* of a hash block which is neither completely empty nor completely
* replicated yet. Returns -1 if no match.
*/
-static int uplink_findNextIncompleteHashBlock(dnbd3_connection_t *link, const int startMapIndex)
+static int uplink_findNextIncompleteHashBlock(dnbd3_uplink_t *uplink, const int startMapIndex)
{
int retval = -1;
- mutex_lock( &link->image->lock );
- const int mapBytes = IMGSIZE_TO_MAPBYTES( link->image->virtualFilesize );
- const uint8_t *cache_map = link->image->cache_map;
+ mutex_lock( &uplink->image->lock );
+ const int mapBytes = IMGSIZE_TO_MAPBYTES( uplink->image->virtualFilesize );
+ const uint8_t *cache_map = uplink->image->cache_map;
if ( cache_map != NULL ) {
int j;
const int start = ( startMapIndex & MAP_INDEX_HASH_START_MASK );
for (j = 0; j < mapBytes; ++j) {
const int i = ( start + j ) % mapBytes;
- const bool isFull = cache_map[i] == 0xff || ( i + 1 == mapBytes && link->replicatedLastBlock );
+ const bool isFull = cache_map[i] == 0xff || ( i + 1 == mapBytes && uplink->replicatedLastBlock );
const bool isEmpty = cache_map[i] == 0;
if ( !isEmpty && !isFull ) {
// Neither full nor empty, replicate
@@ -736,49 +811,49 @@ static int uplink_findNextIncompleteHashBlock(dnbd3_connection_t *link, const in
retval = -1;
}
}
- mutex_unlock( &link->image->lock );
+ mutex_unlock( &uplink->image->lock );
return retval;
}
/**
* Receive data from uplink server and process/dispatch
- * Locks on: link.lock, images[].lock
+ * Locks on: uplink.lock, images[].lock
*/
-static void uplink_handleReceive(dnbd3_connection_t *link)
+static void uplink_handleReceive(dnbd3_uplink_t *uplink)
{
dnbd3_reply_t inReply, outReply;
int ret, i;
for (;;) {
- ret = dnbd3_read_reply( link->fd, &inReply, false );
- if ( unlikely( ret == REPLY_INTR ) && likely( !_shutdown && !link->shutdown ) ) continue;
+ ret = dnbd3_read_reply( uplink->current.fd, &inReply, false );
+ if ( unlikely( ret == REPLY_INTR ) && likely( !_shutdown && !uplink->shutdown ) ) continue;
if ( ret == REPLY_AGAIN ) break;
if ( unlikely( ret == REPLY_CLOSED ) ) {
- logadd( LOG_INFO, "Uplink: Remote host hung up (%s)", link->image->path );
+ logadd( LOG_INFO, "Uplink: Remote host hung up (%s)", uplink->image->path );
goto error_cleanup;
}
if ( unlikely( ret == REPLY_WRONGMAGIC ) ) {
- logadd( LOG_WARNING, "Uplink server's packet did not start with dnbd3_packet_magic (%s)", link->image->path );
+ logadd( LOG_WARNING, "Uplink server's packet did not start with dnbd3_packet_magic (%s)", uplink->image->path );
goto error_cleanup;
}
if ( unlikely( ret != REPLY_OK ) ) {
- logadd( LOG_INFO, "Uplink: Connection error %d (%s)", ret, link->image->path );
+ logadd( LOG_INFO, "Uplink: Connection error %d (%s)", ret, uplink->image->path );
goto error_cleanup;
}
if ( unlikely( inReply.size > (uint32_t)_maxPayload ) ) {
- logadd( LOG_WARNING, "Pure evil: Uplink server sent too much payload (%" PRIu32 ") for %s", inReply.size, link->image->path );
+ logadd( LOG_WARNING, "Pure evil: Uplink server sent too much payload (%" PRIu32 ") for %s", inReply.size, uplink->image->path );
goto error_cleanup;
}
- if ( unlikely( link->recvBufferLen < inReply.size ) ) {
- link->recvBufferLen = MIN((uint32_t)_maxPayload, inReply.size + 65536);
- link->recvBuffer = realloc( link->recvBuffer, link->recvBufferLen );
- if ( link->recvBuffer == NULL ) {
+ if ( unlikely( uplink->recvBufferLen < inReply.size ) ) {
+ uplink->recvBufferLen = MIN((uint32_t)_maxPayload, inReply.size + 65536);
+ uplink->recvBuffer = realloc( uplink->recvBuffer, uplink->recvBufferLen );
+ if ( uplink->recvBuffer == NULL ) {
logadd( LOG_ERROR, "Out of memory when trying to allocate receive buffer for uplink" );
exit( 1 );
}
}
- if ( unlikely( (uint32_t)sock_recv( link->fd, link->recvBuffer, inReply.size ) != inReply.size ) ) {
- logadd( LOG_INFO, "Lost connection to uplink server of %s (payload)", link->image->path );
+ if ( unlikely( (uint32_t)sock_recv( uplink->current.fd, uplink->recvBuffer, inReply.size ) != inReply.size ) ) {
+ logadd( LOG_INFO, "Lost connection to uplink server of %s (payload)", uplink->image->path );
goto error_cleanup;
}
// Payload read completely
@@ -789,18 +864,18 @@ static void uplink_handleReceive(dnbd3_connection_t *link)
const uint64_t start = inReply.handle;
const uint64_t end = inReply.handle + inReply.size;
totalBytesReceived += inReply.size;
- link->bytesReceived += inReply.size;
+ uplink->bytesReceived += inReply.size;
// 1) Write to cache file
- if ( unlikely( link->cacheFd == -1 ) ) {
- uplink_reopenCacheFd( link, false );
+ if ( unlikely( uplink->cacheFd == -1 ) ) {
+ uplink_reopenCacheFd( uplink, false );
}
- if ( likely( link->cacheFd != -1 ) ) {
+ if ( likely( uplink->cacheFd != -1 ) ) {
int err = 0;
bool tryAgain = true; // Allow one retry in case we run out of space or the write fd became invalid
uint32_t done = 0;
ret = 0;
while ( done < inReply.size ) {
- ret = (int)pwrite( link->cacheFd, link->recvBuffer + done, inReply.size - done, start + done );
+ ret = (int)pwrite( uplink->cacheFd, uplink->recvBuffer + done, inReply.size - done, start + done );
if ( unlikely( ret == -1 ) ) {
err = errno;
if ( err == EINTR ) continue;
@@ -811,32 +886,37 @@ static void uplink_handleReceive(dnbd3_connection_t *link)
continue; // Success, retry write
}
if ( err == EBADF || err == EINVAL || err == EIO ) {
- if ( !tryAgain || !uplink_reopenCacheFd( link, true ) )
+ if ( !tryAgain || !uplink_reopenCacheFd( uplink, true ) )
break;
tryAgain = false;
continue; // Write handle to image successfully re-opened, try again
}
- logadd( LOG_DEBUG1, "Error trying to cache data for %s:%d -- errno=%d", link->image->name, (int)link->image->rid, err );
+ logadd( LOG_DEBUG1, "Error trying to cache data for %s:%d -- errno=%d", uplink->image->name, (int)uplink->image->rid, err );
break;
}
if ( unlikely( ret <= 0 || (uint32_t)ret > inReply.size - done ) ) {
- logadd( LOG_WARNING, "Unexpected return value %d from pwrite to %s:%d", ret, link->image->name, (int)link->image->rid );
+ logadd( LOG_WARNING, "Unexpected return value %d from pwrite to %s:%d", ret, uplink->image->name, (int)uplink->image->rid );
break;
}
done += (uint32_t)ret;
}
if ( likely( done > 0 ) ) {
- image_updateCachemap( link->image, start, start + done, true );
+ image_updateCachemap( uplink->image, start, start + done, true );
}
if ( unlikely( ret == -1 && ( err == EBADF || err == EINVAL || err == EIO ) ) ) {
logadd( LOG_WARNING, "Error writing received data for %s:%d (errno=%d); disabling caching.",
- link->image->name, (int)link->image->rid, err );
+ uplink->image->name, (int)uplink->image->rid, err );
}
}
// 2) Figure out which clients are interested in it
- mutex_lock( &link->queueLock );
- for (i = 0; i < link->queueLen; ++i) {
- dnbd3_queued_request_t * const req = &link->queue[i];
+ // Mark as ULR_PROCESSING, since we unlock repeatedly in the second loop
+ // below; this prevents uplink_request() from attaching to this request
+ // by populating a slot with index greater than the highest matching
+ // request with ULR_PROCESSING (assuming there is no ULR_PENDING or ULR_NEW
+ // where it's fine if the index is greater)
+ mutex_lock( &uplink->queueLock );
+ for (i = 0; i < uplink->queueLen; ++i) {
+ dnbd3_queued_request_t * const req = &uplink->queue[i];
assert( req->status != ULR_PROCESSING );
if ( req->status != ULR_PENDING && req->status != ULR_NEW ) continue;
assert( req->client != NULL );
@@ -849,8 +929,8 @@ static void uplink_handleReceive(dnbd3_connection_t *link)
// from 0, you also need to change the "attach to existing request"-logic in uplink_request()
outReply.magic = dnbd3_packet_magic;
bool served = false;
- for ( i = link->queueLen - 1; i >= 0; --i ) {
- dnbd3_queued_request_t * const req = &link->queue[i];
+ for ( i = uplink->queueLen - 1; i >= 0; --i ) {
+ dnbd3_queued_request_t * const req = &uplink->queue[i];
if ( req->status == ULR_PROCESSING ) {
size_t bytesSent = 0;
assert( req->from >= start && req->to <= end );
@@ -860,84 +940,90 @@ static void uplink_handleReceive(dnbd3_connection_t *link)
outReply.size = (uint32_t)( req->to - req->from );
iov[0].iov_base = &outReply;
iov[0].iov_len = sizeof outReply;
- iov[1].iov_base = link->recvBuffer + (req->from - start);
+ iov[1].iov_base = uplink->recvBuffer + (req->from - start);
iov[1].iov_len = outReply.size;
fixup_reply( outReply );
req->status = ULR_FREE;
req->client = NULL;
served = true;
mutex_lock( &client->sendMutex );
- mutex_unlock( &link->queueLock );
+ mutex_unlock( &uplink->queueLock );
if ( client->sock != -1 ) {
ssize_t sent = writev( client->sock, iov, 2 );
if ( sent > (ssize_t)sizeof outReply ) {
bytesSent = (size_t)sent - sizeof outReply;
}
}
- mutex_unlock( &client->sendMutex );
if ( bytesSent != 0 ) {
client->bytesSent += bytesSent;
}
- mutex_lock( &link->queueLock );
+ mutex_unlock( &client->sendMutex );
+ mutex_lock( &uplink->queueLock );
+ if ( i > uplink->queueLen ) {
+ i = uplink->queueLen; // Might have been set to 0 by cancelAllRequests
+ }
}
- if ( req->status == ULR_FREE && i == link->queueLen - 1 ) link->queueLen--;
+ if ( req->status == ULR_FREE && i == uplink->queueLen - 1 ) uplink->queueLen--;
}
- mutex_unlock( &link->queueLock );
+ mutex_unlock( &uplink->queueLock );
#ifdef _DEBUG
- if ( !served && start != link->replicationHandle ) {
- logadd( LOG_DEBUG2, "%p, %s -- Unmatched reply: %" PRIu64 " to %" PRIu64, (void*)link, link->image->name, start, end );
+ if ( !served && start != uplink->replicationHandle ) {
+ logadd( LOG_DEBUG2, "%p, %s -- Unmatched reply: %" PRIu64 " to %" PRIu64, (void*)uplink, uplink->image->name, start, end );
}
#endif
- if ( start == link->replicationHandle ) {
+ if ( start == uplink->replicationHandle ) {
// Was our background replication
- link->replicationHandle = REP_NONE;
+ uplink->replicationHandle = REP_NONE;
// Try to remove from fs cache if no client was interested in this data
- if ( !served && link->cacheFd != -1 ) {
- posix_fadvise( link->cacheFd, start, inReply.size, POSIX_FADV_DONTNEED );
+ if ( !served && uplink->cacheFd != -1 ) {
+ posix_fadvise( uplink->cacheFd, start, inReply.size, POSIX_FADV_DONTNEED );
}
}
if ( served ) {
// Was some client -- reset idle counter
- link->idleTime = 0;
+ uplink->idleTime = 0;
// Re-enable replication if disabled
- if ( link->nextReplicationIndex == -1 ) {
- link->nextReplicationIndex = (int)( start / FILE_BYTES_PER_MAP_BYTE ) & MAP_INDEX_HASH_START_MASK;
+ if ( uplink->nextReplicationIndex == -1 ) {
+ uplink->nextReplicationIndex = (int)( start / FILE_BYTES_PER_MAP_BYTE ) & MAP_INDEX_HASH_START_MASK;
}
}
}
- if ( link->replicationHandle == REP_NONE ) {
- mutex_lock( &link->queueLock );
- const bool rep = ( link->queueLen == 0 );
- mutex_unlock( &link->queueLock );
- if ( rep ) uplink_sendReplicationRequest( link );
+ if ( uplink->replicationHandle == REP_NONE ) {
+ mutex_lock( &uplink->queueLock );
+ const bool rep = ( uplink->queueLen == 0 );
+ mutex_unlock( &uplink->queueLock );
+ if ( rep ) uplink_sendReplicationRequest( uplink );
}
return;
// Error handling from failed receive or message parsing
error_cleanup: ;
- uplink_connectionFailed( link, true );
+ uplink_connectionFailed( uplink, true );
}
-static void uplink_connectionFailed(dnbd3_connection_t *link, bool findNew)
+/**
+ * Only call from uplink thread
+ */
+static void uplink_connectionFailed(dnbd3_uplink_t *uplink, bool findNew)
{
- if ( link->fd == -1 )
+ if ( uplink->current.fd == -1 )
return;
- altservers_serverFailed( &link->currentServer );
- mutex_lock( &link->sendMutex );
- close( link->fd );
- link->fd = -1;
- mutex_unlock( &link->sendMutex );
- link->replicationHandle = REP_NONE;
- if ( _backgroundReplication == BGR_FULL && link->nextReplicationIndex == -1 ) {
- link->nextReplicationIndex = 0;
+ altservers_serverFailed( uplink->current.index );
+ mutex_lock( &uplink->sendMutex );
+ close( uplink->current.fd );
+ uplink->current.fd = -1;
+ mutex_unlock( &uplink->sendMutex );
+ uplink->replicationHandle = REP_NONE;
+ if ( _backgroundReplication == BGR_FULL && uplink->nextReplicationIndex == -1 ) {
+ uplink->nextReplicationIndex = 0;
}
if ( !findNew )
return;
- mutex_lock( &link->rttLock );
- bool bail = link->rttTestResult == RTT_INPROGRESS || link->betterFd != -1;
- mutex_unlock( &link->rttLock );
+ mutex_lock( &uplink->rttLock );
+ bool bail = uplink->rttTestResult == RTT_INPROGRESS || uplink->better.fd != -1;
+ mutex_unlock( &uplink->rttLock );
if ( bail )
return;
- altservers_findUplink( link );
+ altservers_findUplinkAsync( uplink );
}
/**
@@ -954,7 +1040,7 @@ static int uplink_sendKeepalive(const int fd)
return send( fd, &request, sizeof(request), MSG_NOSIGNAL ) == sizeof(request);
}
-static void uplink_addCrc32(dnbd3_connection_t *uplink)
+static void uplink_addCrc32(dnbd3_uplink_t *uplink)
{
dnbd3_image_t *image = uplink->image;
if ( image == NULL || image->virtualFilesize == 0 ) return;
@@ -962,7 +1048,7 @@ static void uplink_addCrc32(dnbd3_connection_t *uplink)
uint32_t masterCrc;
uint32_t *buffer = malloc( bytes );
mutex_lock( &uplink->sendMutex );
- bool sendOk = dnbd3_get_crc32( uplink->fd, &masterCrc, buffer, &bytes );
+ bool sendOk = dnbd3_get_crc32( uplink->current.fd, &masterCrc, buffer, &bytes );
mutex_unlock( &uplink->sendMutex );
if ( !sendOk || bytes == 0 ) {
free( buffer );
@@ -997,14 +1083,14 @@ static void uplink_addCrc32(dnbd3_connection_t *uplink)
* it will be closed first. Otherwise, nothing will happen and true will be returned
* immediately.
*/
-static bool uplink_reopenCacheFd(dnbd3_connection_t *link, const bool force)
+static bool uplink_reopenCacheFd(dnbd3_uplink_t *uplink, const bool force)
{
- if ( link->cacheFd != -1 ) {
+ if ( uplink->cacheFd != -1 ) {
if ( !force ) return true;
- close( link->cacheFd );
+ close( uplink->cacheFd );
}
- link->cacheFd = open( link->image->path, O_WRONLY | O_CREAT, 0644 );
- return link->cacheFd != -1;
+ uplink->cacheFd = open( uplink->image->path, O_WRONLY | O_CREAT, 0644 );
+ return uplink->cacheFd != -1;
}
/**
@@ -1012,13 +1098,13 @@ static bool uplink_reopenCacheFd(dnbd3_connection_t *link, const bool force)
* Return true on success.
* Locks on: imageListLock, image.lock
*/
-static bool uplink_saveCacheMap(dnbd3_connection_t *link)
+static bool uplink_saveCacheMap(dnbd3_uplink_t *uplink)
{
- dnbd3_image_t *image = link->image;
+ dnbd3_image_t *image = uplink->image;
assert( image != NULL );
- if ( link->cacheFd != -1 ) {
- if ( fsync( link->cacheFd ) == -1 ) {
+ if ( uplink->cacheFd != -1 ) {
+ if ( fsync( uplink->cacheFd ) == -1 ) {
// A failing fsync means we have no guarantee that any data
// since the last fsync (or open if none) has been saved. Apart
// from keeping the cache_map from the last successful fsync
@@ -1080,8 +1166,19 @@ static bool uplink_saveCacheMap(dnbd3_connection_t *link)
return true;
}
-static bool uplink_connectionShouldShutdown(dnbd3_connection_t *link)
+static bool uplink_connectionShouldShutdown(dnbd3_uplink_t *uplink)
{
- return ( link->idleTime > SERVER_UPLINK_IDLE_TIMEOUT && _backgroundReplication != BGR_FULL );
+ return ( uplink->idleTime > SERVER_UPLINK_IDLE_TIMEOUT
+ && ( _backgroundReplication != BGR_FULL || _bgrMinClients > uplink->image->users ) );
}
+bool uplink_getHostString(dnbd3_uplink_t *uplink, char *buffer, size_t len)
+{
+ int current;
+ mutex_lock( &uplink->rttLock );
+ current = uplink->current.fd == -1 ? -1 : uplink->current.index;
+ mutex_unlock( &uplink->rttLock );
+ if ( current == -1 )
+ return false;
+ return altservers_toString( current, buffer, len );
+}
diff --git a/src/server/uplink.h b/src/server/uplink.h
index 2b41dfc..49ff0b4 100644
--- a/src/server/uplink.h
+++ b/src/server/uplink.h
@@ -10,10 +10,12 @@ uint64_t uplink_getTotalBytesReceived();
bool uplink_init(dnbd3_image_t *image, int sock, dnbd3_host_t *host, int version);
-void uplink_removeClient(dnbd3_connection_t *uplink, dnbd3_client_t *client);
+void uplink_removeClient(dnbd3_uplink_t *uplink, dnbd3_client_t *client);
bool uplink_request(dnbd3_client_t *client, uint64_t handle, uint64_t start, uint32_t length, uint8_t hopCount);
-void uplink_shutdown(dnbd3_image_t *image);
+bool uplink_shutdown(dnbd3_image_t *image);
+
+bool uplink_getHostString(dnbd3_uplink_t *uplink, char *buffer, size_t len);
#endif /* UPLINK_H_ */
diff --git a/src/serverconfig.h b/src/serverconfig.h
index 0cbb320..239f0a2 100644
--- a/src/serverconfig.h
+++ b/src/serverconfig.h
@@ -6,10 +6,12 @@
// +++++ Performance/memory related
#define SERVER_MAX_CLIENTS 4000
#define SERVER_MAX_IMAGES 5000
-#define SERVER_MAX_ALTS 100
+#define SERVER_MAX_ALTS 50
// +++++ Uplink handling (proxy mode)
-#define SERVER_UPLINK_FAIL_INCREASE 5 // On server failure, increase numFails by this value
-#define SERVER_BAD_UPLINK_THRES 40 // Thresold for numFails at which we ignore a server for the time span below
+#define SERVER_GLOBAL_DUP_TIME 6 // How many seconds to wait before changing global fail counter again
+#define SERVER_BAD_UPLINK_MIN 10 // Thresold for fails at which we start ignoring the server occasionally
+#define SERVER_BAD_UPLINK_MAX 20 // Hard block server if it failed this many times
+#define SERVER_BAD_UPLINK_LOCAL_BLOCK 10 // If a server didn't supply the requested image this many times, block it for some time
#define SERVER_BAD_UPLINK_IGNORE 180 // How many seconds is a server ignored
#define SERVER_MAX_UPLINK_QUEUE 1500 // Maximum number of queued requests per uplink
#define SERVER_UPLINK_QUEUELEN_THRES 900 // Threshold where we start dropping incoming clients
@@ -33,7 +35,7 @@
#define SERVER_RTT_PROBES 5 // How many probes to average over
#define SERVER_RTT_INTERVAL_INIT 5 // Initial interval between probes
#define SERVER_RTT_INTERVAL_MAX 45 // Maximum interval between probes
-#define SERVER_RTT_BACKOFF_COUNT 5 // If we can't reach any uplink server this many times, consider the uplink bad
+#define SERVER_RTT_MAX_UNREACH 10 // If no server was reachable this many times, stop RTT measurements for a while
#define SERVER_RTT_INTERVAL_FAILED 180 // Interval to use if no uplink server is reachable for above many times
#define SERVER_REMOTE_IMAGE_CHECK_CACHETIME 120 // 2 minutes
diff --git a/src/shared/sockhelper.c b/src/shared/sockhelper.c
index ab34aa1..ec80659 100644
--- a/src/shared/sockhelper.c
+++ b/src/shared/sockhelper.c
@@ -46,6 +46,7 @@ int sock_connect(const dnbd3_host_t * const addr, const int connect_ms, const in
#endif
else {
logadd( LOG_DEBUG1, "Unsupported address type: %d\n", (int)addr->type );
+ errno = EAFNOSUPPORT;
return -1;
}
int client_sock = socket( proto, SOCK_STREAM, IPPROTO_TCP );
@@ -56,8 +57,10 @@ int sock_connect(const dnbd3_host_t * const addr, const int connect_ms, const in
} else {
sock_setTimeout( client_sock, connect_ms );
}
+ int e2;
for ( int i = 0; i < 5; ++i ) {
int ret = connect( client_sock, (struct sockaddr *)&ss, addrlen );
+ e2 = errno;
if ( ret != -1 || errno == EINPROGRESS || errno == EISCONN ) break;
if ( errno == EINTR ) {
// http://www.madore.org/~david/computers/connect-intr.html
@@ -67,21 +70,26 @@ int sock_connect(const dnbd3_host_t * const addr, const int connect_ms, const in
struct pollfd unix_really_sucks = { .fd = client_sock, .events = POLLOUT | POLLIN };
while ( i-- > 0 ) {
int pr = poll( &unix_really_sucks, 1, connect_ms == 0 ? -1 : connect_ms );
+ e2 = errno;
if ( pr == 1 && ( unix_really_sucks.revents & POLLOUT ) ) break;
if ( pr == -1 && errno == EINTR ) continue;
close( client_sock );
+ errno = e2;
return -1;
}
sockaddr_storage junk;
socklen_t more_junk = sizeof(junk);
if ( getpeername( client_sock, (struct sockaddr*)&junk, &more_junk ) == -1 ) {
+ e2 = errno;
close( client_sock );
+ errno = e2;
return -1;
}
break;
#endif
} // EINTR
close( client_sock );
+ errno = e2;
return -1;
}
if ( connect_ms != -1 && connect_ms != rw_ms ) {