summaryrefslogtreecommitdiffstats
path: root/dozentenmodulserver/src/main/java/fileserv/FileChunk.java
diff options
context:
space:
mode:
authorSimon Rettberg2015-06-11 18:40:49 +0200
committerSimon Rettberg2015-06-11 18:40:49 +0200
commite0005ceecfd9281230c4add7575b18ee88307774 (patch)
treea73bbcfc213df478c701aac120ae2b7c6e52bb1b /dozentenmodulserver/src/main/java/fileserv/FileChunk.java
parent[server] db stuff, new interface, ... (diff)
downloadtutor-module-e0005ceecfd9281230c4add7575b18ee88307774.tar.gz
tutor-module-e0005ceecfd9281230c4add7575b18ee88307774.tar.xz
tutor-module-e0005ceecfd9281230c4add7575b18ee88307774.zip
[server] On mah way (lots of restructuring, some early db classes, sql dump of current schema)
Diffstat (limited to 'dozentenmodulserver/src/main/java/fileserv/FileChunk.java')
-rw-r--r--dozentenmodulserver/src/main/java/fileserv/FileChunk.java66
1 files changed, 0 insertions, 66 deletions
diff --git a/dozentenmodulserver/src/main/java/fileserv/FileChunk.java b/dozentenmodulserver/src/main/java/fileserv/FileChunk.java
deleted file mode 100644
index 1a95d27c..00000000
--- a/dozentenmodulserver/src/main/java/fileserv/FileChunk.java
+++ /dev/null
@@ -1,66 +0,0 @@
-package fileserv;
-
-import java.nio.ByteBuffer;
-import java.util.Collection;
-import java.util.List;
-
-import org.openslx.filetransfer.FileRange;
-
-public class FileChunk {
-
- public static final int CHUNK_SIZE_MIB = 16;
- public static final int CHUNK_SIZE = CHUNK_SIZE_MIB * (1024 * 1024);
-
- public final FileRange range;
- public final byte[] sha1sum;
- private int failCount = 0;
-
- public FileChunk(long startOffset, long endOffset, byte[] sha1sum) {
- this.range = new FileRange(startOffset, endOffset);
- this.sha1sum = sha1sum;
- }
-
- /**
- * Signal that transferring this chunk seems to have failed (checksum
- * mismatch).
- *
- * @return Number of times the transfer failed now
- */
- public synchronized int incFailed() {
- return ++failCount;
- }
-
- //
-
- public static int fileSizeToChunkCount(long fileSize) {
- return (int) ((fileSize + CHUNK_SIZE - 1) / CHUNK_SIZE);
- }
-
- public static void createChunkList(Collection<FileChunk> list, long fileSize, List<ByteBuffer> sha1Sums) {
- if (fileSize < 0)
- throw new IllegalArgumentException("fileSize cannot be negative");
- long chunkCount = fileSizeToChunkCount(fileSize);
- if (sha1Sums != null) {
- if (sha1Sums.size() != chunkCount)
- throw new IllegalArgumentException(
- "Passed a sha1sum list, but hash count in list doesn't match expected chunk count");
- long offset = 0;
- for (ByteBuffer sha1sum : sha1Sums) { // Do this as we don't know how efficient List.get(index) is...
- long end = offset + CHUNK_SIZE;
- if (end > fileSize)
- end = fileSize;
- list.add(new FileChunk(offset, end, sha1sum.array()));
- offset = end;
- }
- return;
- }
- long offset = 0;
- while (offset < fileSize) { // ...otherwise we could share this code
- long end = offset + CHUNK_SIZE;
- if (end > fileSize)
- end = fileSize;
- list.add(new FileChunk(offset, end, null));
- offset = end;
- }
- }
-}