diff options
| author | Simon Rettberg | 2015-06-11 18:40:49 +0200 |
|---|---|---|
| committer | Simon Rettberg | 2015-06-11 18:40:49 +0200 |
| commit | e0005ceecfd9281230c4add7575b18ee88307774 (patch) | |
| tree | a73bbcfc213df478c701aac120ae2b7c6e52bb1b /dozentenmodulserver/src/main/java/fileserv | |
| parent | [server] db stuff, new interface, ... (diff) | |
| download | tutor-module-e0005ceecfd9281230c4add7575b18ee88307774.tar.gz tutor-module-e0005ceecfd9281230c4add7575b18ee88307774.tar.xz tutor-module-e0005ceecfd9281230c4add7575b18ee88307774.zip | |
[server] On mah way (lots of restructuring, some early db classes, sql dump of current schema)
Diffstat (limited to 'dozentenmodulserver/src/main/java/fileserv')
4 files changed, 0 insertions, 460 deletions
diff --git a/dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java b/dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java deleted file mode 100644 index 334345f3..00000000 --- a/dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java +++ /dev/null @@ -1,209 +0,0 @@ -package fileserv; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ThreadPoolExecutor; - -import models.Configuration; - -import org.apache.log4j.Logger; -import org.openslx.bwlp.thrift.iface.ImageDetailsRead; -import org.openslx.bwlp.thrift.iface.UserInfo; -import org.openslx.filetransfer.DataReceivedCallback; -import org.openslx.filetransfer.Downloader; -import org.openslx.filetransfer.FileRange; -import org.openslx.filetransfer.WantRangeCallback; - -import util.FileSystem; -import util.Formatter; - -public class ActiveUpload { - private static final Logger LOGGER = Logger.getLogger(ActiveUpload.class); - - /** - * This is an active upload, so on our end, we have a Downloader. - */ - private Downloader download = null; - - private final File destinationFile; - - private final RandomAccessFile outFile; - - private final ChunkList chunks; - - private final long fileSize; - - /** - * User owning this uploaded file. - */ - private final UserInfo owner; - - /** - * Base image this upload is a new version for. - */ - private final ImageDetailsRead image; - - // TODO: Use HashList for verification - - public ActiveUpload(UserInfo owner, ImageDetailsRead image, File destinationFile, long fileSize, - List<ByteBuffer> sha1Sums) throws FileNotFoundException { - this.destinationFile = destinationFile; - this.outFile = new RandomAccessFile(destinationFile, "rw"); - this.chunks = new ChunkList(fileSize, sha1Sums); - this.owner = owner; - this.image = image; - this.fileSize = fileSize; - } - - /** - * Add another connection for this file transfer. Currently only one - * connection is allowed, but this might change in the future. - * - * @param connection - * @return true if the connection is accepted, false if it should be - * discarded - */ - public synchronized boolean addConnection(Downloader connection, ThreadPoolExecutor pool) { - if (download != null || chunks.isComplete()) - return false; - download = connection; - pool.execute(new Runnable() { - @Override - public void run() { - CbHandler cbh = new CbHandler(); - if (!download.download(cbh, cbh) && cbh.currentChunk != null) { - // If the download failed and we have a current chunk, put it back into - // the queue, so it will be handled again later... - chunks.markFailed(cbh.currentChunk); - } - } - }); - return true; - } - - /** - * Write some data to the local file. Thread safe so we could - * have multiple concurrent connections later. - * - * @param fileOffset - * @param dataLength - * @param data - * @return - */ - private boolean writeFileData(long fileOffset, int dataLength, byte[] data) { - synchronized (outFile) { - try { - outFile.seek(fileOffset); - outFile.write(data, 0, dataLength); - } catch (IOException e) { - LOGGER.error("Cannot write to '" + destinationFile - + "'. Disk full, network storage error, bad permissions, ...?", e); - return false; - } - } - return true; - } - - private void finishUpload() { - File file = destinationFile; - // Ready to go. First step: Rename temp file to something usable - File destination = new File(file.getParent(), Formatter.vmName(owner, image.imageName)); - // Sanity check: destination should be a sub directory of the vmStorePath - String relPath = FileSystem.getRelativePath(destination, Configuration.getVmStoreBasePath()); - if (relPath == null) { - LOGGER.warn(destination.getAbsolutePath() + " is not a subdir of " - + Configuration.getVmStoreBasePath().getAbsolutePath()); - // TODO: Update state to failed... - } - - // Execute rename - boolean ret = false; - Exception renameException = null; - try { - ret = file.renameTo(destination); - } catch (Exception e) { - ret = false; - renameException = e; - } - if (!ret) { - // Rename failed :-( - LOGGER.warn( - "Could not rename '" + file.getAbsolutePath() + "' to '" + destination.getAbsolutePath() - + "'", renameException); - // TODO: Update state.... - } - - // Now insert meta data into DB - - final String imageVersionId = UUID.randomUUID().toString(); - - // TODO: SQL magic, update state - } - - public void cancel() { - // TODO Auto-generated method stub - - } - - /** - * Get user owning this upload. Can be null in special cases. - * - * @return instance of UserInfo for the according user. - */ - public UserInfo getOwner() { - return this.owner; - } - - public boolean isComplete() { - return chunks.isComplete() && destinationFile.length() == this.fileSize; - } - - public File getDestinationFile() { - return this.destinationFile; - } - - public long getSize() { - return this.fileSize; - } - - /** - * Callback class for an instance of the Downloader, which supplies - * the Downloader with wanted file ranges, and handles incoming data. - */ - private class CbHandler implements WantRangeCallback, DataReceivedCallback { - /** - * The current chunk being transfered. - */ - public FileChunk currentChunk = null; - - @Override - public boolean dataReceived(long fileOffset, int dataLength, byte[] data) { - // TODO: Maybe cache in RAM and write full CHUNK_SIZE blocks at a time? - // Would probably help with slower storage, especially if it's using - // rotating disks and we're running multiple uploads. - // Also we wouldn't have to re-read a block form disk for sha1 checking. - return writeFileData(fileOffset, dataLength, data); - } - - @Override - public FileRange get() { - if (currentChunk != null) { - // TODO: A chunk was requested before, check hash and requeue if not matching - // This needs to be async (own thread) so will be a little complicated - } - // Get next missing chunk - currentChunk = chunks.getMissing(); - if (currentChunk == null) - return null; // No more chunks, returning null tells the Downloader we're done. - return currentChunk.range; - } - } - - // TODO: Clean up old stale uploads - -} diff --git a/dozentenmodulserver/src/main/java/fileserv/ChunkList.java b/dozentenmodulserver/src/main/java/fileserv/ChunkList.java deleted file mode 100644 index 95b3e1fa..00000000 --- a/dozentenmodulserver/src/main/java/fileserv/ChunkList.java +++ /dev/null @@ -1,78 +0,0 @@ -package fileserv; - -import java.nio.ByteBuffer; -import java.util.LinkedList; -import java.util.List; - -import org.apache.log4j.Logger; - -public class ChunkList { - - private static final Logger LOGGER = Logger.getLogger(ChunkList.class); - - /** - * Chunks that are missing from the file - */ - private final List<FileChunk> missingChunks = new LinkedList<>(); - - /** - * Chunks that are currently being uploaded or hash-checked - */ - private final List<FileChunk> pendingChunks = new LinkedList<>(); - - // Do we need to keep valid chunks, or chunks that failed too many times? - - public ChunkList(long fileSize, List<ByteBuffer> sha1Sums) { - FileChunk.createChunkList(missingChunks, fileSize, sha1Sums); - } - - /** - * Get a missing chunk, marking it pending. - * - * @return chunk marked as missing - */ - public synchronized FileChunk getMissing() { - if (missingChunks.isEmpty()) - return null; - FileChunk c = missingChunks.remove(0); - pendingChunks.add(c); - return c; - } - - /** - * Mark a chunk currently transferring as successfully transfered. - * - * @param c The chunk in question - */ - public synchronized void markSuccessful(FileChunk c) { - if (!pendingChunks.remove(c)) { - LOGGER.warn("Inconsistent state: markTransferred called for Chunk " + c.toString() - + ", but chunk is not marked as currently transferring!"); - return; - } - } - - /** - * Mark a chunk currently transferring or being hash checked as failed - * transfer. This increases its fail count and re-adds it to the list of - * missing chunks. - * - * @param c The chunk in question - * @return Number of times transfer of this chunk failed - */ - public synchronized int markFailed(FileChunk c) { - if (!pendingChunks.remove(c)) { - LOGGER.warn("Inconsistent state: markTransferred called for Chunk " + c.toString() - + ", but chunk is not marked as currently transferring!"); - return -1; - } - // Add as first element so it will be re-transmitted immediately - missingChunks.add(0, c); - return c.incFailed(); - } - - public synchronized boolean isComplete() { - return missingChunks.isEmpty() && pendingChunks.isEmpty(); - } - -} diff --git a/dozentenmodulserver/src/main/java/fileserv/FileChunk.java b/dozentenmodulserver/src/main/java/fileserv/FileChunk.java deleted file mode 100644 index 1a95d27c..00000000 --- a/dozentenmodulserver/src/main/java/fileserv/FileChunk.java +++ /dev/null @@ -1,66 +0,0 @@ -package fileserv; - -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.List; - -import org.openslx.filetransfer.FileRange; - -public class FileChunk { - - public static final int CHUNK_SIZE_MIB = 16; - public static final int CHUNK_SIZE = CHUNK_SIZE_MIB * (1024 * 1024); - - public final FileRange range; - public final byte[] sha1sum; - private int failCount = 0; - - public FileChunk(long startOffset, long endOffset, byte[] sha1sum) { - this.range = new FileRange(startOffset, endOffset); - this.sha1sum = sha1sum; - } - - /** - * Signal that transferring this chunk seems to have failed (checksum - * mismatch). - * - * @return Number of times the transfer failed now - */ - public synchronized int incFailed() { - return ++failCount; - } - - // - - public static int fileSizeToChunkCount(long fileSize) { - return (int) ((fileSize + CHUNK_SIZE - 1) / CHUNK_SIZE); - } - - public static void createChunkList(Collection<FileChunk> list, long fileSize, List<ByteBuffer> sha1Sums) { - if (fileSize < 0) - throw new IllegalArgumentException("fileSize cannot be negative"); - long chunkCount = fileSizeToChunkCount(fileSize); - if (sha1Sums != null) { - if (sha1Sums.size() != chunkCount) - throw new IllegalArgumentException( - "Passed a sha1sum list, but hash count in list doesn't match expected chunk count"); - long offset = 0; - for (ByteBuffer sha1sum : sha1Sums) { // Do this as we don't know how efficient List.get(index) is... - long end = offset + CHUNK_SIZE; - if (end > fileSize) - end = fileSize; - list.add(new FileChunk(offset, end, sha1sum.array())); - offset = end; - } - return; - } - long offset = 0; - while (offset < fileSize) { // ...otherwise we could share this code - long end = offset + CHUNK_SIZE; - if (end > fileSize) - end = fileSize; - list.add(new FileChunk(offset, end, null)); - offset = end; - } - } -} diff --git a/dozentenmodulserver/src/main/java/fileserv/FileServer.java b/dozentenmodulserver/src/main/java/fileserv/FileServer.java deleted file mode 100644 index e82fa39c..00000000 --- a/dozentenmodulserver/src/main/java/fileserv/FileServer.java +++ /dev/null @@ -1,107 +0,0 @@ -package fileserv; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -import org.openslx.bwlp.thrift.iface.TTransferRejectedException; -import org.openslx.bwlp.thrift.iface.UserInfo; -import org.openslx.filetransfer.Downloader; -import org.openslx.filetransfer.IncomingEvent; -import org.openslx.filetransfer.Listener; -import org.openslx.filetransfer.Uploader; - -import util.Constants; -import util.Formatter; - -public class FileServer implements IncomingEvent { - - /** - * Listener for incoming unencrypted connections - */ - private Listener plainListener = new Listener(this, null, 9092); // TODO: Config - - /** - * All currently running uploads, indexed by token - */ - private Map<String, ActiveUpload> uploads = new ConcurrentHashMap<>(); - - private static final FileServer globalInstance = new FileServer(); - - private FileServer() { - } - - public static FileServer instance() { - return globalInstance; - } - - public boolean start() { - boolean ret = plainListener.start(); - // TODO: Start SSL listener too - return ret; - } - - @Override - public void incomingDownloadRequest(Uploader uploader) throws IOException { - // TODO Auto-generated method stub - - } - - @Override - public void incomingUploadRequest(Downloader downloader) throws IOException { - // TODO Auto-generated method stub - - } - - /** - * Get an upload instance by given token. - * - * @param uploadToken - * @return - */ - public ActiveUpload getUploadByToken(String uploadToken) { - return uploads.get(uploadToken); - } - - public String createNewUserUpload(UserInfo owner, long fileSize, List<ByteBuffer> sha1Sums) - throws TTransferRejectedException, FileNotFoundException { - Iterator<ActiveUpload> it = uploads.values().iterator(); - int activeUploads = 0; - while (it.hasNext()) { - ActiveUpload upload = it.next(); - if (upload.isComplete()) { - // TODO: Check age (short timeout) and remove - continue; - } else { - // Check age (long timeout) and remove - } - activeUploads++; - } - if (activeUploads > Constants.MAX_UPLOADS) - throw new TTransferRejectedException("Server busy. Too many running uploads."); - File destinationFile = null; - do { - destinationFile = Formatter.getTempImageName(); - } while (destinationFile.exists()); - // TODO: Pass image - ActiveUpload upload = new ActiveUpload(owner, null, destinationFile, fileSize, sha1Sums); - String key = UUID.randomUUID().toString(); - uploads.put(key, upload); - return key; - } - - public int getPlainPort() { - return plainListener.getPort(); - } - - public int getSslPort() { - return 0; // TODO - } - -} |
