summaryrefslogtreecommitdiffstats
path: root/src/server/image.h
diff options
context:
space:
mode:
authorSimon Rettberg2020-03-13 16:03:29 +0100
committerSimon Rettberg2020-03-13 16:03:29 +0100
commit290d3478f245bb7d2112bb781286a9fbae42b983 (patch)
tree3cc825ae2249126d1f97f4e06592358ab9cfd81a /src/server/image.h
parent[SERVER] Fix data type (diff)
downloaddnbd3-290d3478f245bb7d2112bb781286a9fbae42b983.tar.gz
dnbd3-290d3478f245bb7d2112bb781286a9fbae42b983.tar.xz
dnbd3-290d3478f245bb7d2112bb781286a9fbae42b983.zip
[SERVER] Rewrite uplink queue handling
- Now uses linked lists instead of huge array - Does prefetch data on client requests - Can have multiple replication requests in-flight
Diffstat (limited to 'src/server/image.h')
-rw-r--r--src/server/image.h44
1 files changed, 44 insertions, 0 deletions
diff --git a/src/server/image.h b/src/server/image.h
index 4614c74..b23711b 100644
--- a/src/server/image.h
+++ b/src/server/image.h
@@ -51,6 +51,50 @@ bool image_ensureDiskSpaceLocked(uint64_t size, bool force);
bool image_saveCacheMap(dnbd3_image_t *image);
+/**
+ * Check if given range is cached. Be careful when using this function because:
+ * 1) you need to hold a reference to the cache map
+ * 2) start and end are assumed to be 4k aligned
+ * 3) start and end are not checked to be in bounds (we don't know the image in this context)
+ */
+static inline bool image_isRangeCachedUnsafe(dnbd3_cache_map_t *cache, uint64_t start, uint64_t end)
+{
+ const uint64_t firstByteInMap = start >> 15;
+ const uint64_t lastByteInMap = (end - 1) >> 15;
+ const uint8_t fb = (uint8_t)(0xff << ((start >> 12) & 7));
+ const uint8_t lb = (uint8_t)(~(0xff << ((((end - 1) >> 12) & 7) + 1)));
+ uint64_t pos;
+ uint8_t b;
+ bool isCached;
+ if ( firstByteInMap == lastByteInMap ) { // Single byte to check, much simpler
+ b = cache->map[firstByteInMap];
+ isCached = ( b & ( fb & lb ) ) == ( fb & lb );
+ } else {
+ isCached = true;
+ atomic_thread_fence( memory_order_acquire );
+ // First byte
+ if ( isCached ) {
+ b = atomic_load_explicit( &cache->map[firstByteInMap], memory_order_relaxed );
+ isCached = ( ( b & fb ) == fb );
+ }
+ // Last byte
+ if ( isCached ) {
+ b = atomic_load_explicit( &cache->map[lastByteInMap], memory_order_relaxed );
+ isCached = ( ( b & lb ) == lb );
+ }
+ // Middle, must be all bits set (0xff)
+ if ( isCached ) {
+ for ( pos = firstByteInMap + 1; pos < lastByteInMap; ++pos ) {
+ if ( atomic_load_explicit( &cache->map[pos], memory_order_relaxed ) != 0xff ) {
+ isCached = false;
+ break;
+ }
+ }
+ }
+ }
+ return isCached;
+}
+
// one byte in the map covers 8 4kib blocks, so 32kib per byte
// "+ (1 << 15) - 1" is required to account for the last bit of
// the image that is smaller than 32kib