summaryrefslogtreecommitdiffstats
path: root/dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java
diff options
context:
space:
mode:
authorSimon Rettberg2015-06-11 18:40:49 +0200
committerSimon Rettberg2015-06-11 18:40:49 +0200
commite0005ceecfd9281230c4add7575b18ee88307774 (patch)
treea73bbcfc213df478c701aac120ae2b7c6e52bb1b /dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java
parent[server] db stuff, new interface, ... (diff)
downloadtutor-module-e0005ceecfd9281230c4add7575b18ee88307774.tar.gz
tutor-module-e0005ceecfd9281230c4add7575b18ee88307774.tar.xz
tutor-module-e0005ceecfd9281230c4add7575b18ee88307774.zip
[server] On mah way (lots of restructuring, some early db classes, sql dump of current schema)
Diffstat (limited to 'dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java')
-rw-r--r--dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java209
1 files changed, 0 insertions, 209 deletions
diff --git a/dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java b/dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java
deleted file mode 100644
index 334345f3..00000000
--- a/dozentenmodulserver/src/main/java/fileserv/ActiveUpload.java
+++ /dev/null
@@ -1,209 +0,0 @@
-package fileserv;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.ThreadPoolExecutor;
-
-import models.Configuration;
-
-import org.apache.log4j.Logger;
-import org.openslx.bwlp.thrift.iface.ImageDetailsRead;
-import org.openslx.bwlp.thrift.iface.UserInfo;
-import org.openslx.filetransfer.DataReceivedCallback;
-import org.openslx.filetransfer.Downloader;
-import org.openslx.filetransfer.FileRange;
-import org.openslx.filetransfer.WantRangeCallback;
-
-import util.FileSystem;
-import util.Formatter;
-
-public class ActiveUpload {
- private static final Logger LOGGER = Logger.getLogger(ActiveUpload.class);
-
- /**
- * This is an active upload, so on our end, we have a Downloader.
- */
- private Downloader download = null;
-
- private final File destinationFile;
-
- private final RandomAccessFile outFile;
-
- private final ChunkList chunks;
-
- private final long fileSize;
-
- /**
- * User owning this uploaded file.
- */
- private final UserInfo owner;
-
- /**
- * Base image this upload is a new version for.
- */
- private final ImageDetailsRead image;
-
- // TODO: Use HashList for verification
-
- public ActiveUpload(UserInfo owner, ImageDetailsRead image, File destinationFile, long fileSize,
- List<ByteBuffer> sha1Sums) throws FileNotFoundException {
- this.destinationFile = destinationFile;
- this.outFile = new RandomAccessFile(destinationFile, "rw");
- this.chunks = new ChunkList(fileSize, sha1Sums);
- this.owner = owner;
- this.image = image;
- this.fileSize = fileSize;
- }
-
- /**
- * Add another connection for this file transfer. Currently only one
- * connection is allowed, but this might change in the future.
- *
- * @param connection
- * @return true if the connection is accepted, false if it should be
- * discarded
- */
- public synchronized boolean addConnection(Downloader connection, ThreadPoolExecutor pool) {
- if (download != null || chunks.isComplete())
- return false;
- download = connection;
- pool.execute(new Runnable() {
- @Override
- public void run() {
- CbHandler cbh = new CbHandler();
- if (!download.download(cbh, cbh) && cbh.currentChunk != null) {
- // If the download failed and we have a current chunk, put it back into
- // the queue, so it will be handled again later...
- chunks.markFailed(cbh.currentChunk);
- }
- }
- });
- return true;
- }
-
- /**
- * Write some data to the local file. Thread safe so we could
- * have multiple concurrent connections later.
- *
- * @param fileOffset
- * @param dataLength
- * @param data
- * @return
- */
- private boolean writeFileData(long fileOffset, int dataLength, byte[] data) {
- synchronized (outFile) {
- try {
- outFile.seek(fileOffset);
- outFile.write(data, 0, dataLength);
- } catch (IOException e) {
- LOGGER.error("Cannot write to '" + destinationFile
- + "'. Disk full, network storage error, bad permissions, ...?", e);
- return false;
- }
- }
- return true;
- }
-
- private void finishUpload() {
- File file = destinationFile;
- // Ready to go. First step: Rename temp file to something usable
- File destination = new File(file.getParent(), Formatter.vmName(owner, image.imageName));
- // Sanity check: destination should be a sub directory of the vmStorePath
- String relPath = FileSystem.getRelativePath(destination, Configuration.getVmStoreBasePath());
- if (relPath == null) {
- LOGGER.warn(destination.getAbsolutePath() + " is not a subdir of "
- + Configuration.getVmStoreBasePath().getAbsolutePath());
- // TODO: Update state to failed...
- }
-
- // Execute rename
- boolean ret = false;
- Exception renameException = null;
- try {
- ret = file.renameTo(destination);
- } catch (Exception e) {
- ret = false;
- renameException = e;
- }
- if (!ret) {
- // Rename failed :-(
- LOGGER.warn(
- "Could not rename '" + file.getAbsolutePath() + "' to '" + destination.getAbsolutePath()
- + "'", renameException);
- // TODO: Update state....
- }
-
- // Now insert meta data into DB
-
- final String imageVersionId = UUID.randomUUID().toString();
-
- // TODO: SQL magic, update state
- }
-
- public void cancel() {
- // TODO Auto-generated method stub
-
- }
-
- /**
- * Get user owning this upload. Can be null in special cases.
- *
- * @return instance of UserInfo for the according user.
- */
- public UserInfo getOwner() {
- return this.owner;
- }
-
- public boolean isComplete() {
- return chunks.isComplete() && destinationFile.length() == this.fileSize;
- }
-
- public File getDestinationFile() {
- return this.destinationFile;
- }
-
- public long getSize() {
- return this.fileSize;
- }
-
- /**
- * Callback class for an instance of the Downloader, which supplies
- * the Downloader with wanted file ranges, and handles incoming data.
- */
- private class CbHandler implements WantRangeCallback, DataReceivedCallback {
- /**
- * The current chunk being transfered.
- */
- public FileChunk currentChunk = null;
-
- @Override
- public boolean dataReceived(long fileOffset, int dataLength, byte[] data) {
- // TODO: Maybe cache in RAM and write full CHUNK_SIZE blocks at a time?
- // Would probably help with slower storage, especially if it's using
- // rotating disks and we're running multiple uploads.
- // Also we wouldn't have to re-read a block form disk for sha1 checking.
- return writeFileData(fileOffset, dataLength, data);
- }
-
- @Override
- public FileRange get() {
- if (currentChunk != null) {
- // TODO: A chunk was requested before, check hash and requeue if not matching
- // This needs to be async (own thread) so will be a little complicated
- }
- // Get next missing chunk
- currentChunk = chunks.getMissing();
- if (currentChunk == null)
- return null; // No more chunks, returning null tells the Downloader we're done.
- return currentChunk.range;
- }
- }
-
- // TODO: Clean up old stale uploads
-
-}