summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSimon Rettberg2016-04-13 18:38:47 +0200
committerSimon Rettberg2016-04-13 18:38:47 +0200
commit34ca2905c38d17bbded01cf7497eca790e760a39 (patch)
tree4dbaff08d7f88d48e685bd514b907c8d77571f16
parentremove ruleId from NetRule struct (diff)
downloadmaster-sync-shared-34ca2905c38d17bbded01cf7497eca790e760a39.tar.gz
master-sync-shared-34ca2905c38d17bbded01cf7497eca790e760a39.tar.xz
master-sync-shared-34ca2905c38d17bbded01cf7497eca790e760a39.zip
Preparations/changes for global image sync
-rw-r--r--src/main/java/org/openslx/bwlp/thrift/iface/MasterServer.java986
-rw-r--r--src/main/java/org/openslx/filetransfer/Transfer.java10
-rw-r--r--src/main/java/org/openslx/filetransfer/util/AbstractTransfer.java133
-rw-r--r--src/main/java/org/openslx/filetransfer/util/ChunkList.java6
-rw-r--r--src/main/java/org/openslx/filetransfer/util/FileChunk.java3
-rw-r--r--src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java475
-rw-r--r--src/main/java/org/openslx/util/FsUtil.java36
-rw-r--r--src/main/java/org/openslx/util/GrowingThreadPoolExecutor.java85
-rw-r--r--src/main/java/org/openslx/util/ThriftUtil.java34
-rw-r--r--src/main/thrift/bwlp.thrift5
10 files changed, 1697 insertions, 76 deletions
diff --git a/src/main/java/org/openslx/bwlp/thrift/iface/MasterServer.java b/src/main/java/org/openslx/bwlp/thrift/iface/MasterServer.java
index 1635f58..e7f7b06 100644
--- a/src/main/java/org/openslx/bwlp/thrift/iface/MasterServer.java
+++ b/src/main/java/org/openslx/bwlp/thrift/iface/MasterServer.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-01-04")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-04-07")
public class MasterServer {
public interface Iface {
@@ -45,6 +45,8 @@ public class MasterServer {
public ClientSessionData localAccountLogin(String login, String password) throws TAuthorizationException, TInvocationException, org.apache.thrift.TException;
+ public void setUsedSatellite(String sessionId, String satelliteName) throws org.apache.thrift.TException;
+
public List<UserInfo> findUser(String sessionId, String organizationId, String searchTerm) throws TAuthorizationException, TInvocationException, org.apache.thrift.TException;
public List<ImagePublishData> getPublicImages(String sessionId, int page) throws TAuthorizationException, TInvocationException, org.apache.thrift.TException;
@@ -61,7 +63,7 @@ public class MasterServer {
public ImagePublishData getImageData(String serverSessionId, String imageVersionId) throws TAuthorizationException, TInvocationException, TNotFoundException, org.apache.thrift.TException;
- public TransferInformation submitImage(String serverSessionId, ImagePublishData imageDescription, List<ByteBuffer> blockHashes) throws TAuthorizationException, TInvocationException, TTransferRejectedException, org.apache.thrift.TException;
+ public TransferInformation submitImage(String userToken, ImagePublishData imageDescription, List<ByteBuffer> blockHashes) throws TAuthorizationException, TInvocationException, TTransferRejectedException, org.apache.thrift.TException;
public int registerSatellite(String userToken, String displayName, List<String> addresses, String modulus, String exponent, ByteBuffer certsha256) throws TInvocationException, org.apache.thrift.TException;
@@ -89,6 +91,8 @@ public class MasterServer {
public void localAccountLogin(String login, String password, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void setUsedSatellite(String sessionId, String satelliteName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
public void findUser(String sessionId, String organizationId, String searchTerm, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void getPublicImages(String sessionId, int page, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -105,7 +109,7 @@ public class MasterServer {
public void getImageData(String serverSessionId, String imageVersionId, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
- public void submitImage(String serverSessionId, ImagePublishData imageDescription, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void submitImage(String userToken, ImagePublishData imageDescription, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void registerSatellite(String userToken, String displayName, List<String> addresses, String modulus, String exponent, ByteBuffer certsha256, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -227,6 +231,27 @@ public class MasterServer {
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "localAccountLogin failed: unknown result");
}
+ public void setUsedSatellite(String sessionId, String satelliteName) throws org.apache.thrift.TException
+ {
+ send_setUsedSatellite(sessionId, satelliteName);
+ recv_setUsedSatellite();
+ }
+
+ public void send_setUsedSatellite(String sessionId, String satelliteName) throws org.apache.thrift.TException
+ {
+ setUsedSatellite_args args = new setUsedSatellite_args();
+ args.setSessionId(sessionId);
+ args.setSatelliteName(satelliteName);
+ sendBase("setUsedSatellite", args);
+ }
+
+ public void recv_setUsedSatellite() throws org.apache.thrift.TException
+ {
+ setUsedSatellite_result result = new setUsedSatellite_result();
+ receiveBase(result, "setUsedSatellite");
+ return;
+ }
+
public List<UserInfo> findUser(String sessionId, String organizationId, String searchTerm) throws TAuthorizationException, TInvocationException, org.apache.thrift.TException
{
send_findUser(sessionId, organizationId, searchTerm);
@@ -452,16 +477,16 @@ public class MasterServer {
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getImageData failed: unknown result");
}
- public TransferInformation submitImage(String serverSessionId, ImagePublishData imageDescription, List<ByteBuffer> blockHashes) throws TAuthorizationException, TInvocationException, TTransferRejectedException, org.apache.thrift.TException
+ public TransferInformation submitImage(String userToken, ImagePublishData imageDescription, List<ByteBuffer> blockHashes) throws TAuthorizationException, TInvocationException, TTransferRejectedException, org.apache.thrift.TException
{
- send_submitImage(serverSessionId, imageDescription, blockHashes);
+ send_submitImage(userToken, imageDescription, blockHashes);
return recv_submitImage();
}
- public void send_submitImage(String serverSessionId, ImagePublishData imageDescription, List<ByteBuffer> blockHashes) throws org.apache.thrift.TException
+ public void send_submitImage(String userToken, ImagePublishData imageDescription, List<ByteBuffer> blockHashes) throws org.apache.thrift.TException
{
submitImage_args args = new submitImage_args();
- args.setServerSessionId(serverSessionId);
+ args.setUserToken(userToken);
args.setImageDescription(imageDescription);
args.setBlockHashes(blockHashes);
sendBase("submitImage", args);
@@ -825,6 +850,41 @@ public class MasterServer {
}
}
+ public void setUsedSatellite(String sessionId, String satelliteName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ setUsedSatellite_call method_call = new setUsedSatellite_call(sessionId, satelliteName, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ public static class setUsedSatellite_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private String sessionId;
+ private String satelliteName;
+ public setUsedSatellite_call(String sessionId, String satelliteName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.sessionId = sessionId;
+ this.satelliteName = satelliteName;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("setUsedSatellite", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ setUsedSatellite_args args = new setUsedSatellite_args();
+ args.setSessionId(sessionId);
+ args.setSatelliteName(satelliteName);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public void getResult() throws org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ (new Client(prot)).recv_setUsedSatellite();
+ }
+ }
+
public void findUser(String sessionId, String organizationId, String searchTerm, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
findUser_call method_call = new findUser_call(sessionId, organizationId, searchTerm, resultHandler, this, ___protocolFactory, ___transport);
@@ -1096,20 +1156,20 @@ public class MasterServer {
}
}
- public void submitImage(String serverSessionId, ImagePublishData imageDescription, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ public void submitImage(String userToken, ImagePublishData imageDescription, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
- submitImage_call method_call = new submitImage_call(serverSessionId, imageDescription, blockHashes, resultHandler, this, ___protocolFactory, ___transport);
+ submitImage_call method_call = new submitImage_call(userToken, imageDescription, blockHashes, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class submitImage_call extends org.apache.thrift.async.TAsyncMethodCall {
- private String serverSessionId;
+ private String userToken;
private ImagePublishData imageDescription;
private List<ByteBuffer> blockHashes;
- public submitImage_call(String serverSessionId, ImagePublishData imageDescription, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ public submitImage_call(String userToken, ImagePublishData imageDescription, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
- this.serverSessionId = serverSessionId;
+ this.userToken = userToken;
this.imageDescription = imageDescription;
this.blockHashes = blockHashes;
}
@@ -1117,7 +1177,7 @@ public class MasterServer {
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("submitImage", org.apache.thrift.protocol.TMessageType.CALL, 0));
submitImage_args args = new submitImage_args();
- args.setServerSessionId(serverSessionId);
+ args.setUserToken(userToken);
args.setImageDescription(imageDescription);
args.setBlockHashes(blockHashes);
args.write(prot);
@@ -1421,6 +1481,7 @@ public class MasterServer {
processMap.put("ping", new ping());
processMap.put("authenticate", new authenticate());
processMap.put("localAccountLogin", new localAccountLogin());
+ processMap.put("setUsedSatellite", new setUsedSatellite());
processMap.put("findUser", new findUser());
processMap.put("getPublicImages", new getPublicImages());
processMap.put("invalidateSession", new invalidateSession());
@@ -1514,6 +1575,26 @@ public class MasterServer {
}
}
+ public static class setUsedSatellite<I extends Iface> extends org.apache.thrift.ProcessFunction<I, setUsedSatellite_args> {
+ public setUsedSatellite() {
+ super("setUsedSatellite");
+ }
+
+ public setUsedSatellite_args getEmptyArgsInstance() {
+ return new setUsedSatellite_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public setUsedSatellite_result getResult(I iface, setUsedSatellite_args args) throws org.apache.thrift.TException {
+ setUsedSatellite_result result = new setUsedSatellite_result();
+ iface.setUsedSatellite(args.sessionId, args.satelliteName);
+ return result;
+ }
+ }
+
public static class findUser<I extends Iface> extends org.apache.thrift.ProcessFunction<I, findUser_args> {
public findUser() {
super("findUser");
@@ -1731,7 +1812,7 @@ public class MasterServer {
public submitImage_result getResult(I iface, submitImage_args args) throws org.apache.thrift.TException {
submitImage_result result = new submitImage_result();
try {
- result.success = iface.submitImage(args.serverSessionId, args.imageDescription, args.blockHashes);
+ result.success = iface.submitImage(args.userToken, args.imageDescription, args.blockHashes);
} catch (TAuthorizationException failure) {
result.failure = failure;
} catch (TInvocationException failure2) {
@@ -1959,6 +2040,7 @@ public class MasterServer {
processMap.put("ping", new ping());
processMap.put("authenticate", new authenticate());
processMap.put("localAccountLogin", new localAccountLogin());
+ processMap.put("setUsedSatellite", new setUsedSatellite());
processMap.put("findUser", new findUser());
processMap.put("getPublicImages", new getPublicImages());
processMap.put("invalidateSession", new invalidateSession());
@@ -2155,6 +2237,56 @@ public class MasterServer {
}
}
+ public static class setUsedSatellite<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, setUsedSatellite_args, Void> {
+ public setUsedSatellite() {
+ super("setUsedSatellite");
+ }
+
+ public setUsedSatellite_args getEmptyArgsInstance() {
+ return new setUsedSatellite_args();
+ }
+
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ setUsedSatellite_result result = new setUsedSatellite_result();
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ setUsedSatellite_result result = new setUsedSatellite_result();
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, setUsedSatellite_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.setUsedSatellite(args.sessionId, args.satelliteName,resultHandler);
+ }
+ }
+
public static class findUser<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, findUser_args, List<UserInfo>> {
public findUser() {
super("findUser");
@@ -2698,7 +2830,7 @@ public class MasterServer {
}
public void start(I iface, submitImage_args args, org.apache.thrift.async.AsyncMethodCallback<TransferInformation> resultHandler) throws TException {
- iface.submitImage(args.serverSessionId, args.imageDescription, args.blockHashes,resultHandler);
+ iface.submitImage(args.userToken, args.imageDescription, args.blockHashes,resultHandler);
}
}
@@ -5878,6 +6010,720 @@ public class MasterServer {
}
+ public static class setUsedSatellite_args implements org.apache.thrift.TBase<setUsedSatellite_args, setUsedSatellite_args._Fields>, java.io.Serializable, Cloneable, Comparable<setUsedSatellite_args> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setUsedSatellite_args");
+
+ private static final org.apache.thrift.protocol.TField SESSION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionId", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField SATELLITE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("satelliteName", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new setUsedSatellite_argsStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new setUsedSatellite_argsTupleSchemeFactory());
+ }
+
+ public String sessionId; // required
+ public String satelliteName; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ SESSION_ID((short)1, "sessionId"),
+ SATELLITE_NAME((short)2, "satelliteName");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // SESSION_ID
+ return SESSION_ID;
+ case 2: // SATELLITE_NAME
+ return SATELLITE_NAME;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.SESSION_ID, new org.apache.thrift.meta_data.FieldMetaData("sessionId", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Token")));
+ tmpMap.put(_Fields.SATELLITE_NAME, new org.apache.thrift.meta_data.FieldMetaData("satelliteName", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setUsedSatellite_args.class, metaDataMap);
+ }
+
+ public setUsedSatellite_args() {
+ }
+
+ public setUsedSatellite_args(
+ String sessionId,
+ String satelliteName)
+ {
+ this();
+ this.sessionId = sessionId;
+ this.satelliteName = satelliteName;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public setUsedSatellite_args(setUsedSatellite_args other) {
+ if (other.isSetSessionId()) {
+ this.sessionId = other.sessionId;
+ }
+ if (other.isSetSatelliteName()) {
+ this.satelliteName = other.satelliteName;
+ }
+ }
+
+ public setUsedSatellite_args deepCopy() {
+ return new setUsedSatellite_args(this);
+ }
+
+ @Override
+ public void clear() {
+ this.sessionId = null;
+ this.satelliteName = null;
+ }
+
+ public String getSessionId() {
+ return this.sessionId;
+ }
+
+ public setUsedSatellite_args setSessionId(String sessionId) {
+ this.sessionId = sessionId;
+ return this;
+ }
+
+ public void unsetSessionId() {
+ this.sessionId = null;
+ }
+
+ /** Returns true if field sessionId is set (has been assigned a value) and false otherwise */
+ public boolean isSetSessionId() {
+ return this.sessionId != null;
+ }
+
+ public void setSessionIdIsSet(boolean value) {
+ if (!value) {
+ this.sessionId = null;
+ }
+ }
+
+ public String getSatelliteName() {
+ return this.satelliteName;
+ }
+
+ public setUsedSatellite_args setSatelliteName(String satelliteName) {
+ this.satelliteName = satelliteName;
+ return this;
+ }
+
+ public void unsetSatelliteName() {
+ this.satelliteName = null;
+ }
+
+ /** Returns true if field satelliteName is set (has been assigned a value) and false otherwise */
+ public boolean isSetSatelliteName() {
+ return this.satelliteName != null;
+ }
+
+ public void setSatelliteNameIsSet(boolean value) {
+ if (!value) {
+ this.satelliteName = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case SESSION_ID:
+ if (value == null) {
+ unsetSessionId();
+ } else {
+ setSessionId((String)value);
+ }
+ break;
+
+ case SATELLITE_NAME:
+ if (value == null) {
+ unsetSatelliteName();
+ } else {
+ setSatelliteName((String)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case SESSION_ID:
+ return getSessionId();
+
+ case SATELLITE_NAME:
+ return getSatelliteName();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case SESSION_ID:
+ return isSetSessionId();
+ case SATELLITE_NAME:
+ return isSetSatelliteName();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof setUsedSatellite_args)
+ return this.equals((setUsedSatellite_args)that);
+ return false;
+ }
+
+ public boolean equals(setUsedSatellite_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_sessionId = true && this.isSetSessionId();
+ boolean that_present_sessionId = true && that.isSetSessionId();
+ if (this_present_sessionId || that_present_sessionId) {
+ if (!(this_present_sessionId && that_present_sessionId))
+ return false;
+ if (!this.sessionId.equals(that.sessionId))
+ return false;
+ }
+
+ boolean this_present_satelliteName = true && this.isSetSatelliteName();
+ boolean that_present_satelliteName = true && that.isSetSatelliteName();
+ if (this_present_satelliteName || that_present_satelliteName) {
+ if (!(this_present_satelliteName && that_present_satelliteName))
+ return false;
+ if (!this.satelliteName.equals(that.satelliteName))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_sessionId = true && (isSetSessionId());
+ list.add(present_sessionId);
+ if (present_sessionId)
+ list.add(sessionId);
+
+ boolean present_satelliteName = true && (isSetSatelliteName());
+ list.add(present_satelliteName);
+ if (present_satelliteName)
+ list.add(satelliteName);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(setUsedSatellite_args other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetSessionId()).compareTo(other.isSetSessionId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetSessionId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionId, other.sessionId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetSatelliteName()).compareTo(other.isSetSatelliteName());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetSatelliteName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.satelliteName, other.satelliteName);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("setUsedSatellite_args(");
+ boolean first = true;
+
+ sb.append("sessionId:");
+ if (this.sessionId == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.sessionId);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("satelliteName:");
+ if (this.satelliteName == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.satelliteName);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class setUsedSatellite_argsStandardSchemeFactory implements SchemeFactory {
+ public setUsedSatellite_argsStandardScheme getScheme() {
+ return new setUsedSatellite_argsStandardScheme();
+ }
+ }
+
+ private static class setUsedSatellite_argsStandardScheme extends StandardScheme<setUsedSatellite_args> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, setUsedSatellite_args struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // SESSION_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.sessionId = iprot.readString();
+ struct.setSessionIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // SATELLITE_NAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.satelliteName = iprot.readString();
+ struct.setSatelliteNameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ // check for required fields of primitive type, which can't be checked in the validate method
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, setUsedSatellite_args struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.sessionId != null) {
+ oprot.writeFieldBegin(SESSION_ID_FIELD_DESC);
+ oprot.writeString(struct.sessionId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.satelliteName != null) {
+ oprot.writeFieldBegin(SATELLITE_NAME_FIELD_DESC);
+ oprot.writeString(struct.satelliteName);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class setUsedSatellite_argsTupleSchemeFactory implements SchemeFactory {
+ public setUsedSatellite_argsTupleScheme getScheme() {
+ return new setUsedSatellite_argsTupleScheme();
+ }
+ }
+
+ private static class setUsedSatellite_argsTupleScheme extends TupleScheme<setUsedSatellite_args> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, setUsedSatellite_args struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetSessionId()) {
+ optionals.set(0);
+ }
+ if (struct.isSetSatelliteName()) {
+ optionals.set(1);
+ }
+ oprot.writeBitSet(optionals, 2);
+ if (struct.isSetSessionId()) {
+ oprot.writeString(struct.sessionId);
+ }
+ if (struct.isSetSatelliteName()) {
+ oprot.writeString(struct.satelliteName);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, setUsedSatellite_args struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(2);
+ if (incoming.get(0)) {
+ struct.sessionId = iprot.readString();
+ struct.setSessionIdIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.satelliteName = iprot.readString();
+ struct.setSatelliteNameIsSet(true);
+ }
+ }
+ }
+
+ }
+
+ public static class setUsedSatellite_result implements org.apache.thrift.TBase<setUsedSatellite_result, setUsedSatellite_result._Fields>, java.io.Serializable, Cloneable, Comparable<setUsedSatellite_result> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setUsedSatellite_result");
+
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new setUsedSatellite_resultStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new setUsedSatellite_resultTupleSchemeFactory());
+ }
+
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setUsedSatellite_result.class, metaDataMap);
+ }
+
+ public setUsedSatellite_result() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public setUsedSatellite_result(setUsedSatellite_result other) {
+ }
+
+ public setUsedSatellite_result deepCopy() {
+ return new setUsedSatellite_result(this);
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof setUsedSatellite_result)
+ return this.equals((setUsedSatellite_result)that);
+ return false;
+ }
+
+ public boolean equals(setUsedSatellite_result that) {
+ if (that == null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(setUsedSatellite_result other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("setUsedSatellite_result(");
+ boolean first = true;
+
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class setUsedSatellite_resultStandardSchemeFactory implements SchemeFactory {
+ public setUsedSatellite_resultStandardScheme getScheme() {
+ return new setUsedSatellite_resultStandardScheme();
+ }
+ }
+
+ private static class setUsedSatellite_resultStandardScheme extends StandardScheme<setUsedSatellite_result> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, setUsedSatellite_result struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ // check for required fields of primitive type, which can't be checked in the validate method
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, setUsedSatellite_result struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class setUsedSatellite_resultTupleSchemeFactory implements SchemeFactory {
+ public setUsedSatellite_resultTupleScheme getScheme() {
+ return new setUsedSatellite_resultTupleScheme();
+ }
+ }
+
+ private static class setUsedSatellite_resultTupleScheme extends TupleScheme<setUsedSatellite_result> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, setUsedSatellite_result struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, setUsedSatellite_result struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ }
+ }
+
+ }
+
public static class findUser_args implements org.apache.thrift.TBase<findUser_args, findUser_args._Fields>, java.io.Serializable, Cloneable, Comparable<findUser_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("findUser_args");
@@ -13605,7 +14451,7 @@ public class MasterServer {
public static class submitImage_args implements org.apache.thrift.TBase<submitImage_args, submitImage_args._Fields>, java.io.Serializable, Cloneable, Comparable<submitImage_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("submitImage_args");
- private static final org.apache.thrift.protocol.TField SERVER_SESSION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("serverSessionId", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField USER_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("userToken", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final org.apache.thrift.protocol.TField IMAGE_DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("imageDescription", org.apache.thrift.protocol.TType.STRUCT, (short)2);
private static final org.apache.thrift.protocol.TField BLOCK_HASHES_FIELD_DESC = new org.apache.thrift.protocol.TField("blockHashes", org.apache.thrift.protocol.TType.LIST, (short)3);
@@ -13615,13 +14461,13 @@ public class MasterServer {
schemes.put(TupleScheme.class, new submitImage_argsTupleSchemeFactory());
}
- public String serverSessionId; // required
+ public String userToken; // required
public ImagePublishData imageDescription; // required
public List<ByteBuffer> blockHashes; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- SERVER_SESSION_ID((short)1, "serverSessionId"),
+ USER_TOKEN((short)1, "userToken"),
IMAGE_DESCRIPTION((short)2, "imageDescription"),
BLOCK_HASHES((short)3, "blockHashes");
@@ -13638,8 +14484,8 @@ public class MasterServer {
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
- case 1: // SERVER_SESSION_ID
- return SERVER_SESSION_ID;
+ case 1: // USER_TOKEN
+ return USER_TOKEN;
case 2: // IMAGE_DESCRIPTION
return IMAGE_DESCRIPTION;
case 3: // BLOCK_HASHES
@@ -13687,7 +14533,7 @@ public class MasterServer {
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.SERVER_SESSION_ID, new org.apache.thrift.meta_data.FieldMetaData("serverSessionId", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.USER_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("userToken", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Token")));
tmpMap.put(_Fields.IMAGE_DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("imageDescription", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ImagePublishData.class)));
@@ -13702,12 +14548,12 @@ public class MasterServer {
}
public submitImage_args(
- String serverSessionId,
+ String userToken,
ImagePublishData imageDescription,
List<ByteBuffer> blockHashes)
{
this();
- this.serverSessionId = serverSessionId;
+ this.userToken = userToken;
this.imageDescription = imageDescription;
this.blockHashes = blockHashes;
}
@@ -13716,8 +14562,8 @@ public class MasterServer {
* Performs a deep copy on <i>other</i>.
*/
public submitImage_args(submitImage_args other) {
- if (other.isSetServerSessionId()) {
- this.serverSessionId = other.serverSessionId;
+ if (other.isSetUserToken()) {
+ this.userToken = other.userToken;
}
if (other.isSetImageDescription()) {
this.imageDescription = new ImagePublishData(other.imageDescription);
@@ -13734,32 +14580,32 @@ public class MasterServer {
@Override
public void clear() {
- this.serverSessionId = null;
+ this.userToken = null;
this.imageDescription = null;
this.blockHashes = null;
}
- public String getServerSessionId() {
- return this.serverSessionId;
+ public String getUserToken() {
+ return this.userToken;
}
- public submitImage_args setServerSessionId(String serverSessionId) {
- this.serverSessionId = serverSessionId;
+ public submitImage_args setUserToken(String userToken) {
+ this.userToken = userToken;
return this;
}
- public void unsetServerSessionId() {
- this.serverSessionId = null;
+ public void unsetUserToken() {
+ this.userToken = null;
}
- /** Returns true if field serverSessionId is set (has been assigned a value) and false otherwise */
- public boolean isSetServerSessionId() {
- return this.serverSessionId != null;
+ /** Returns true if field userToken is set (has been assigned a value) and false otherwise */
+ public boolean isSetUserToken() {
+ return this.userToken != null;
}
- public void setServerSessionIdIsSet(boolean value) {
+ public void setUserTokenIsSet(boolean value) {
if (!value) {
- this.serverSessionId = null;
+ this.userToken = null;
}
}
@@ -13828,11 +14674,11 @@ public class MasterServer {
public void setFieldValue(_Fields field, Object value) {
switch (field) {
- case SERVER_SESSION_ID:
+ case USER_TOKEN:
if (value == null) {
- unsetServerSessionId();
+ unsetUserToken();
} else {
- setServerSessionId((String)value);
+ setUserToken((String)value);
}
break;
@@ -13857,8 +14703,8 @@ public class MasterServer {
public Object getFieldValue(_Fields field) {
switch (field) {
- case SERVER_SESSION_ID:
- return getServerSessionId();
+ case USER_TOKEN:
+ return getUserToken();
case IMAGE_DESCRIPTION:
return getImageDescription();
@@ -13877,8 +14723,8 @@ public class MasterServer {
}
switch (field) {
- case SERVER_SESSION_ID:
- return isSetServerSessionId();
+ case USER_TOKEN:
+ return isSetUserToken();
case IMAGE_DESCRIPTION:
return isSetImageDescription();
case BLOCK_HASHES:
@@ -13900,12 +14746,12 @@ public class MasterServer {
if (that == null)
return false;
- boolean this_present_serverSessionId = true && this.isSetServerSessionId();
- boolean that_present_serverSessionId = true && that.isSetServerSessionId();
- if (this_present_serverSessionId || that_present_serverSessionId) {
- if (!(this_present_serverSessionId && that_present_serverSessionId))
+ boolean this_present_userToken = true && this.isSetUserToken();
+ boolean that_present_userToken = true && that.isSetUserToken();
+ if (this_present_userToken || that_present_userToken) {
+ if (!(this_present_userToken && that_present_userToken))
return false;
- if (!this.serverSessionId.equals(that.serverSessionId))
+ if (!this.userToken.equals(that.userToken))
return false;
}
@@ -13934,10 +14780,10 @@ public class MasterServer {
public int hashCode() {
List<Object> list = new ArrayList<Object>();
- boolean present_serverSessionId = true && (isSetServerSessionId());
- list.add(present_serverSessionId);
- if (present_serverSessionId)
- list.add(serverSessionId);
+ boolean present_userToken = true && (isSetUserToken());
+ list.add(present_userToken);
+ if (present_userToken)
+ list.add(userToken);
boolean present_imageDescription = true && (isSetImageDescription());
list.add(present_imageDescription);
@@ -13960,12 +14806,12 @@ public class MasterServer {
int lastComparison = 0;
- lastComparison = Boolean.valueOf(isSetServerSessionId()).compareTo(other.isSetServerSessionId());
+ lastComparison = Boolean.valueOf(isSetUserToken()).compareTo(other.isSetUserToken());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetServerSessionId()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serverSessionId, other.serverSessionId);
+ if (isSetUserToken()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.userToken, other.userToken);
if (lastComparison != 0) {
return lastComparison;
}
@@ -14010,11 +14856,11 @@ public class MasterServer {
StringBuilder sb = new StringBuilder("submitImage_args(");
boolean first = true;
- sb.append("serverSessionId:");
- if (this.serverSessionId == null) {
+ sb.append("userToken:");
+ if (this.userToken == null) {
sb.append("null");
} else {
- sb.append(this.serverSessionId);
+ sb.append(this.userToken);
}
first = false;
if (!first) sb.append(", ");
@@ -14079,10 +14925,10 @@ public class MasterServer {
break;
}
switch (schemeField.id) {
- case 1: // SERVER_SESSION_ID
+ case 1: // USER_TOKEN
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
- struct.serverSessionId = iprot.readString();
- struct.setServerSessionIdIsSet(true);
+ struct.userToken = iprot.readString();
+ struct.setUserTokenIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -14129,9 +14975,9 @@ public class MasterServer {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
- if (struct.serverSessionId != null) {
- oprot.writeFieldBegin(SERVER_SESSION_ID_FIELD_DESC);
- oprot.writeString(struct.serverSessionId);
+ if (struct.userToken != null) {
+ oprot.writeFieldBegin(USER_TOKEN_FIELD_DESC);
+ oprot.writeString(struct.userToken);
oprot.writeFieldEnd();
}
if (struct.imageDescription != null) {
@@ -14169,7 +15015,7 @@ public class MasterServer {
public void write(org.apache.thrift.protocol.TProtocol prot, submitImage_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
- if (struct.isSetServerSessionId()) {
+ if (struct.isSetUserToken()) {
optionals.set(0);
}
if (struct.isSetImageDescription()) {
@@ -14179,8 +15025,8 @@ public class MasterServer {
optionals.set(2);
}
oprot.writeBitSet(optionals, 3);
- if (struct.isSetServerSessionId()) {
- oprot.writeString(struct.serverSessionId);
+ if (struct.isSetUserToken()) {
+ oprot.writeString(struct.userToken);
}
if (struct.isSetImageDescription()) {
struct.imageDescription.write(oprot);
@@ -14201,8 +15047,8 @@ public class MasterServer {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
- struct.serverSessionId = iprot.readString();
- struct.setServerSessionIdIsSet(true);
+ struct.userToken = iprot.readString();
+ struct.setUserTokenIsSet(true);
}
if (incoming.get(1)) {
struct.imageDescription = new ImagePublishData();
diff --git a/src/main/java/org/openslx/filetransfer/Transfer.java b/src/main/java/org/openslx/filetransfer/Transfer.java
index f952bdc..cf9b475 100644
--- a/src/main/java/org/openslx/filetransfer/Transfer.java
+++ b/src/main/java/org/openslx/filetransfer/Transfer.java
@@ -110,7 +110,14 @@ public abstract class Transfer
return true;
}
- public boolean sendDone()
+ public void sendDoneAndClose()
+ {
+ sendDone();
+ sendEndOfMeta();
+ close( "Transfer finished" );
+ }
+
+ protected boolean sendDone()
{
try {
sendKeyValuePair( "DONE", "" );
@@ -202,7 +209,6 @@ public abstract class Transfer
sendErrorCode( error );
if ( callback != null )
callback.uploadError( error );
- log.info( error );
}
synchronized ( transferSocket ) {
safeClose( dataFromServer, outStream, transferSocket );
diff --git a/src/main/java/org/openslx/filetransfer/util/AbstractTransfer.java b/src/main/java/org/openslx/filetransfer/util/AbstractTransfer.java
new file mode 100644
index 0000000..75c68e8
--- /dev/null
+++ b/src/main/java/org/openslx/filetransfer/util/AbstractTransfer.java
@@ -0,0 +1,133 @@
+package org.openslx.filetransfer.util;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.openslx.bwlp.thrift.iface.TransferInformation;
+
+public abstract class AbstractTransfer
+{
+
+ /**
+ * How long to keep this transfer information when the transfer is
+ * (potentially) done
+ */
+ private static final long FINISH_TIMEOUT = TimeUnit.MINUTES.toMillis( 3 );
+
+ /**
+ * How long to keep this transfer information when there are no active
+ * connections and the transfer seems unfinished
+ */
+ private static final long IDLE_TIMEOUT = TimeUnit.HOURS.toMillis( 6 );
+
+ /**
+ * How long to count this transfer towards active transfers when it has
+ * no active connection.
+ */
+ private static final long HOT_IDLE_TIMEOUT = TimeUnit.MINUTES.toMillis( 10 );
+ /**
+ * Time stamp of when (we think) the transfer finished. Clients can/might
+ * not tell us they're done, and simply taking "no active connection" as a
+ * sign the download is done might have unwanted effects if the user's
+ * connection drops for a minute. If this time stamp (plus a FINISH_TIMEOUT)
+ * passed,
+ * we consider the download done and flag it for removal.
+ * If set to zero, the transfer is not finished, or not assumed to have
+ * finished.
+ */
+ protected final AtomicLong potentialFinishTime = new AtomicLong( 0 );
+
+ /**
+ * Time of last activity on this transfer.
+ */
+ protected final AtomicLong lastActivityTime = new AtomicLong( System.currentTimeMillis() );
+
+ private final String transferId;
+
+ public AbstractTransfer( String transferId )
+ {
+ this.transferId = transferId;
+ }
+
+ /**
+ * Returns true if the transfer is considered completed.
+ *
+ * @param now pass System.currentTimeMillis()
+ * @return true if the transfer is considered completed
+ */
+ public boolean isComplete( long now )
+ {
+ long val = potentialFinishTime.get();
+ return val != 0 && val + FINISH_TIMEOUT < now;
+ }
+
+ /**
+ * Returns true if there has been no activity on this transfer for a certain
+ * amount of time.
+ *
+ * @param now pass System.currentTimeMillis()
+ * @return true if the transfer reached its idle timeout
+ */
+ public final boolean hasReachedIdleTimeout( long now )
+ {
+ return getActiveConnectionCount() == 0 && lastActivityTime.get() + IDLE_TIMEOUT < now;
+ }
+
+ public final boolean countsTowardsConnectionLimit( long now )
+ {
+ return getActiveConnectionCount() > 0 || lastActivityTime.get() + HOT_IDLE_TIMEOUT > now;
+ }
+
+ public final String getId()
+ {
+ return transferId;
+ }
+
+ public abstract TransferInformation getTransferInfo();
+
+ /**
+ * Returns true if this transfer would potentially accept new connections.
+ * This should NOT return false if there are too many concurrent
+ * connections, as this is used to signal the client whether to keep trying
+ * to connect.
+ *
+ * @return true if this transfer would potentially accept new connections
+ */
+ public abstract boolean isActive();
+
+ /**
+ * Cancel this transfer, aborting all active connections and rejecting
+ * further incoming ones.
+ */
+ public abstract void cancel();
+
+ /**
+ * Returns number of active transfer connections.
+ *
+ * @return number of active transfer connections
+ */
+ public abstract int getActiveConnectionCount();
+
+ public abstract String getRelativePath();
+
+ /**
+ * Try to close everything passed to this method. Never throw an exception
+ * if it fails, just silently continue.
+ *
+ * @param item One or more objects that are AutoCloseable
+ */
+ static void safeClose( AutoCloseable... item )
+ {
+ if ( item == null )
+ return;
+ for ( AutoCloseable c : item ) {
+ if ( c == null )
+ continue;
+ try {
+ c.close();
+ } catch ( Exception e ) {
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/openslx/filetransfer/util/ChunkList.java b/src/main/java/org/openslx/filetransfer/util/ChunkList.java
index d0233ac..1b33102 100644
--- a/src/main/java/org/openslx/filetransfer/util/ChunkList.java
+++ b/src/main/java/org/openslx/filetransfer/util/ChunkList.java
@@ -31,7 +31,7 @@ public class ChunkList
private final List<FileChunk> completeChunks = new ArrayList<>( 100 );
- // 0 = complete, 1 = missing, 2 = uploading, 3 = queued for copying, 4 = copying
+ // 0 = complete, 1 = missing, 2 = uploading, 3 = queued for copying, 4 = copying, 5 = hashing
private final ByteBuffer statusArray;
/**
@@ -50,8 +50,8 @@ public class ChunkList
/**
* Update the sha1sums of all chunks. This is meant to be used if you passed an incomplete list
- * before (with null elements), so you don't have to has hthe whole file before starting the
- * upload, but periodically update it while thie upload is running.
+ * before (with null elements), so you don't have to hash the whole file before starting the
+ * upload, but periodically update it while the upload is running.
*
* @param sha1Sums list of sums
*/
diff --git a/src/main/java/org/openslx/filetransfer/util/FileChunk.java b/src/main/java/org/openslx/filetransfer/util/FileChunk.java
index 4b6ee74..62f7d46 100644
--- a/src/main/java/org/openslx/filetransfer/util/FileChunk.java
+++ b/src/main/java/org/openslx/filetransfer/util/FileChunk.java
@@ -8,6 +8,9 @@ import org.openslx.filetransfer.FileRange;
public class FileChunk
{
+ /**
+ * Length in bytes of binary sha1 representation
+ */
public static final int SHA1_LENGTH = 20;
public static final int CHUNK_SIZE_MIB = 16;
public static final int CHUNK_SIZE = CHUNK_SIZE_MIB * ( 1024 * 1024 );
diff --git a/src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java b/src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java
new file mode 100644
index 0000000..b738ef6
--- /dev/null
+++ b/src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java
@@ -0,0 +1,475 @@
+package org.openslx.filetransfer.util;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+
+import org.apache.log4j.Logger;
+import org.openslx.bwlp.thrift.iface.TransferState;
+import org.openslx.filetransfer.DataReceivedCallback;
+import org.openslx.filetransfer.Downloader;
+import org.openslx.filetransfer.FileRange;
+import org.openslx.filetransfer.WantRangeCallback;
+import org.openslx.filetransfer.util.HashChecker.HashCheckCallback;
+import org.openslx.filetransfer.util.HashChecker.HashResult;
+import org.openslx.util.ThriftUtil;
+
+public abstract class IncomingTransferBase extends AbstractTransfer implements HashCheckCallback
+{
+
+ private static final Logger LOGGER = Logger.getLogger( IncomingTransferBase.class );
+
+ /**
+ * Remote peer is uploading, so on our end, we have Downloaders
+ */
+ private List<Downloader> downloads = new ArrayList<>();
+
+ private final File tmpFileName;
+
+ private final RandomAccessFile tmpFileHandle;
+
+ private final ChunkList chunks;
+
+ private TransferState state = TransferState.IDLE;
+
+ private final long fileSize;
+
+ private static final HashChecker hashChecker;
+
+ /*
+ * Overridable constants
+ */
+
+ protected static int MAX_CONNECTIONS_PER_TRANSFER = 2;
+
+ /**
+ * Whether file is (still) writable. Used for the file transfer callbacks.
+ */
+ private boolean fileWritable = true;
+
+ static {
+ long maxMem = Runtime.getRuntime().maxMemory();
+ if ( maxMem == Long.MAX_VALUE ) {
+ maxMem = 512;
+ }
+ int hashQueueLen = (int) ( maxMem / 100 );
+ if ( hashQueueLen < 1 ) {
+ hashQueueLen = 1;
+ } else if ( hashQueueLen > 6 ) {
+ hashQueueLen = 6;
+ }
+ HashChecker hc;
+ try {
+ hc = new HashChecker( "SHA-1", hashQueueLen );
+ } catch ( NoSuchAlgorithmException e ) {
+ hc = null;
+ }
+ hashChecker = hc;
+ }
+
+ /*_*/
+
+ public IncomingTransferBase( String transferId, File absFilePath, long fileSize, List<byte[]> blockHashes )
+ throws FileNotFoundException
+ {
+ super( transferId );
+ this.fileSize = fileSize;
+ // Prepare path
+ tmpFileName = absFilePath;
+ tmpFileName.getParentFile().mkdirs();
+ tmpFileHandle = new RandomAccessFile( absFilePath, "rw" );
+ chunks = new ChunkList( fileSize, blockHashes );
+ }
+
+ @Override
+ public boolean isActive()
+ {
+ return state == TransferState.IDLE || state == TransferState.WORKING;
+ }
+
+ @Override
+ public synchronized void cancel()
+ {
+ if ( state != TransferState.FINISHED && state != TransferState.ERROR ) {
+ state = TransferState.ERROR;
+ }
+ synchronized ( downloads ) {
+ for ( Downloader download : downloads ) {
+ download.cancel();
+ }
+ }
+ lastActivityTime.set( 0 );
+ safeClose( tmpFileHandle );
+ }
+
+ @Override
+ public final int getActiveConnectionCount()
+ {
+ return downloads.size();
+ }
+
+ public final boolean hashesEqual( List<ByteBuffer> blockHashes )
+ {
+ List<FileChunk> existing = chunks.getAll();
+ if ( existing.size() != blockHashes.size() )
+ return false;
+ List<byte[]> hashes = ThriftUtil.unwrapByteBufferList( blockHashes );
+ FileChunk first = existing.get( 0 );
+ if ( first == null || first.getSha1Sum() == null )
+ return false;
+ Iterator<byte[]> it = hashes.iterator();
+ for ( FileChunk existingChunk : existing ) {
+ byte[] testChunk = it.next();
+ if ( !Arrays.equals( testChunk, existingChunk.getSha1Sum() ) )
+ return false;
+ }
+ return true;
+ }
+
+ /*
+ * Guettas for final/private fields
+ */
+
+ public final long getFileSize()
+ {
+ return fileSize;
+ }
+
+ public final File getTmpFileName()
+ {
+ return tmpFileName;
+ }
+
+ public final TransferState getState()
+ {
+ return state;
+ }
+
+ public final ChunkList getChunks()
+ {
+ return chunks;
+ }
+
+ /**
+ * It is possible to run a download where the remote peer didn't submit
+ * the full list of block hashes yet, as it might be about to hash the file
+ * while uploading. This method should be called to update the list
+ * of block hashes. This is a cumulative call, so the list must contain
+ * all hashes starting from block 0.
+ *
+ * @param hashList (incomplete) list of block hashes
+ */
+ public void updateBlockHashList( List<byte[]> hashList )
+ {
+ if ( state != TransferState.IDLE && state != TransferState.WORKING ) {
+ LOGGER.debug( this.getId() + ": Rejecting block hash list in state " + state );
+ return;
+ }
+ if ( hashList == null ) {
+ LOGGER.debug( this.getId() + ": Rejecting null block hash list" );
+ return;
+ }
+ chunks.updateSha1Sums( hashList );
+ if ( hashChecker == null )
+ return;
+ FileChunk chunk;
+ while ( null != ( chunk = chunks.getUnhashedComplete() ) ) {
+ byte[] data = loadChunkFromFile( chunk );
+ if ( data == null ) {
+ LOGGER.warn( "Will mark unloadable chunk as valid :-(" );
+ chunks.markSuccessful( chunk );
+ chunkStatusChanged( chunk );
+ continue;
+ }
+ try {
+ hashChecker.queue( chunk, data, this );
+ } catch ( InterruptedException e ) {
+ Thread.currentThread().interrupt();
+ return;
+ }
+ }
+ }
+
+ private byte[] loadChunkFromFile( FileChunk chunk )
+ {
+ synchronized ( tmpFileHandle ) {
+ if ( state != TransferState.IDLE && state != TransferState.WORKING )
+ return null;
+ try {
+ tmpFileHandle.seek( chunk.range.startOffset );
+ byte[] buffer = new byte[ chunk.range.getLength() ];
+ tmpFileHandle.readFully( buffer );
+ return buffer;
+ } catch ( IOException e ) {
+ LOGGER.error( "Could not read chunk " + chunk.getChunkIndex() + " of File " + getTmpFileName().toString(), e );
+ return null;
+ }
+ }
+ }
+
+ /**
+ * Callback class for an instance of the Downloader, which supplies
+ * the Downloader with wanted file ranges, and handles incoming data.
+ */
+ private class CbHandler implements WantRangeCallback, DataReceivedCallback
+ {
+ /**
+ * The current chunk being transfered.
+ */
+ private FileChunk currentChunk = null;
+ /**
+ * Current buffer to receive to
+ */
+ private byte[] buffer = new byte[ FileChunk.CHUNK_SIZE ];
+ /**
+ * Downloader object
+ */
+ private final Downloader downloader;
+
+ private CbHandler( Downloader downloader )
+ {
+ this.downloader = downloader;
+ }
+
+ @Override
+ public boolean dataReceived( long fileOffset, int dataLength, byte[] data )
+ {
+ if ( currentChunk == null )
+ throw new IllegalStateException( "dataReceived without current chunk" );
+ if ( !currentChunk.range.contains( fileOffset, fileOffset + dataLength ) )
+ throw new IllegalStateException( "dataReceived with file data out of range" );
+ System.arraycopy( data, 0, buffer, (int) ( fileOffset - currentChunk.range.startOffset ), dataLength );
+ return fileWritable;
+ }
+
+ @Override
+ public FileRange get()
+ {
+ if ( currentChunk != null ) {
+ if ( hashChecker != null && currentChunk.getSha1Sum() != null ) {
+ try {
+ hashChecker.queue( currentChunk, buffer, IncomingTransferBase.this );
+ } catch ( InterruptedException e ) {
+ Thread.currentThread().interrupt();
+ return null;
+ }
+ try {
+ buffer = new byte[ buffer.length ];
+ } catch ( OutOfMemoryError e ) {
+ // Usually catching OOM errors is a bad idea, but it's quite safe here as
+ // we know exactly where it happened, no hidden sub-calls through 20 objects.
+ // The most likely cause here is that the hash checker/disk cannot keep up
+ // writing out completed chunks, so we just sleep a bit and try again. If it still
+ // fails, we exit completely.
+ try {
+ Thread.sleep( 6000 );
+ } catch ( InterruptedException e1 ) {
+ Thread.currentThread().interrupt();
+ return null;
+ }
+ // Might raise OOM again, but THIS TIME I MEAN IT
+ try {
+ buffer = new byte[ buffer.length ];
+ } catch ( OutOfMemoryError e2 ) {
+ downloader.sendErrorCode( "Out of RAM" );
+ cancel();
+ }
+ }
+ } else {
+ // We have no hash checker or the hash for the current chunk is unknown - flush to disk
+ writeFileData( currentChunk.range.startOffset, currentChunk.range.getLength(), buffer );
+ chunks.markSuccessful( currentChunk );
+ chunkStatusChanged( currentChunk );
+ }
+ }
+ // Get next missing chunk
+ try {
+ currentChunk = chunks.getMissing();
+ } catch ( InterruptedException e ) {
+ Thread.currentThread().interrupt();
+ cancel();
+ return null;
+ }
+ if ( currentChunk == null ) {
+ return null; // No more chunks, returning null tells the Downloader we're done.
+ }
+ // Check remaining disk space and abort if it's too low
+ if ( !hasEnoughFreeSpace() ) {
+ downloader.sendErrorCode( "Out of disk space" );
+ LOGGER.error( "Out of space: Cancelling upload of " + getTmpFileName().getAbsolutePath() );
+ cancel();
+ return null;
+ }
+ return currentChunk.range;
+ }
+ }
+
+ public boolean addConnection( final Downloader connection, ExecutorService pool )
+ {
+ if ( state == TransferState.FINISHED ) {
+ handleIncomingWhenFinished( connection, pool );
+ return true;
+ }
+ if ( state == TransferState.ERROR )
+ return false;
+ synchronized ( downloads ) {
+ if ( downloads.size() >= MAX_CONNECTIONS_PER_TRANSFER )
+ return false;
+ downloads.add( connection );
+ }
+ try {
+ pool.execute( new Runnable() {
+ @Override
+ public void run()
+ {
+ CbHandler cbh = new CbHandler( connection );
+ if ( !connection.download( cbh, cbh ) ) {
+ if ( cbh.currentChunk != null ) {
+ // If the download failed and we have a current chunk, put it back into
+ // the queue, so it will be handled again later...
+ chunks.markFailed( cbh.currentChunk );
+ chunkStatusChanged( cbh.currentChunk );
+ }
+ LOGGER.warn( "Download of " + getTmpFileName().getAbsolutePath() + " failed" );
+ }
+ if ( state != TransferState.FINISHED && state != TransferState.ERROR ) {
+ LOGGER.debug( "Download from satellite complete" );
+ lastActivityTime.set( System.currentTimeMillis() );
+ }
+ synchronized ( downloads ) {
+ downloads.remove( connection );
+ }
+ if ( chunks.isComplete() ) {
+ finishUploadInternal();
+ }
+ }
+ } );
+ } catch ( Exception e ) {
+ LOGGER.warn( "threadpool rejected the incoming file transfer", e );
+ synchronized ( downloads ) {
+ downloads.remove( connection );
+ }
+ return false;
+ }
+ if ( state == TransferState.IDLE ) {
+ state = TransferState.WORKING;
+ }
+ return true;
+ }
+
+ private boolean handleIncomingWhenFinished( final Downloader connection, ExecutorService pool )
+ {
+ try {
+ pool.execute( new Runnable() {
+ @Override
+ public void run()
+ {
+ connection.sendDoneAndClose();
+ }
+ } );
+ } catch ( Exception e ) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Write some data to the local file. Thread safe so we can
+ * have multiple concurrent connections.
+ *
+ * @param fileOffset
+ * @param dataLength
+ * @param data
+ * @return
+ */
+ private void writeFileData( long fileOffset, int dataLength, byte[] data )
+ {
+ synchronized ( tmpFileHandle ) {
+ if ( state != TransferState.WORKING )
+ throw new IllegalStateException( "Cannot write to file if state != WORKING" );
+ try {
+ tmpFileHandle.seek( fileOffset );
+ tmpFileHandle.write( data, 0, dataLength );
+ } catch ( IOException e ) {
+ LOGGER.error( "Cannot write to '" + getTmpFileName()
+ + "'. Disk full, network storage error, bad permissions, ...?", e );
+ fileWritable = false;
+ }
+ }
+ if ( !fileWritable ) {
+ cancel();
+ }
+ }
+
+ @Override
+ public void hashCheckDone( HashResult result, byte[] data, FileChunk chunk )
+ {
+ if ( state != TransferState.IDLE && state != TransferState.WORKING )
+ return;
+ switch ( result ) {
+ case FAILURE:
+ LOGGER.warn( "Hash check of chunk " + chunk.toString()
+ + " could not be executed. Assuming valid :-(" );
+ // Fall through
+ case VALID:
+ if ( !chunk.isWrittenToDisk() ) {
+ writeFileData( chunk.range.startOffset, chunk.range.getLength(), data );
+ }
+ chunks.markSuccessful( chunk );
+ chunkStatusChanged( chunk );
+ if ( chunks.isComplete() ) {
+ finishUploadInternal();
+ }
+ break;
+ case INVALID:
+ LOGGER.warn( "Hash check of chunk " + chunk.getChunkIndex() + " resulted in mismatch "
+ + chunk.getFailCount() + "x :-(" );
+ chunks.markFailed( chunk );
+ chunkStatusChanged( chunk );
+ break;
+ }
+ }
+
+ private synchronized void finishUploadInternal()
+ {
+ if ( state == TransferState.FINISHED ) {
+ return;
+ }
+ safeClose( tmpFileHandle );
+ if ( state != TransferState.WORKING ) {
+ state = TransferState.ERROR;
+ } else {
+ state = TransferState.FINISHED; // Races...
+ if ( !finishIncomingTransfer() ) {
+ state = TransferState.ERROR;
+ }
+ }
+ }
+
+ /*
+ *
+ */
+
+ /**
+ * Override this and return true if the destination of this download has
+ * still enough free space so we don't run into disk full errors.
+ */
+ protected abstract boolean hasEnoughFreeSpace();
+
+ /**
+ * This will be called once the download is complete.
+ * The file handle used for writing has been closed before calling this.
+ */
+ protected abstract boolean finishIncomingTransfer();
+
+ protected abstract void chunkStatusChanged( FileChunk chunk );
+
+}
diff --git a/src/main/java/org/openslx/util/FsUtil.java b/src/main/java/org/openslx/util/FsUtil.java
new file mode 100644
index 0000000..76571ef
--- /dev/null
+++ b/src/main/java/org/openslx/util/FsUtil.java
@@ -0,0 +1,36 @@
+package org.openslx.util;
+
+import java.io.File;
+
+import org.apache.log4j.Logger;
+
+public class FsUtil
+{
+
+ private static final Logger LOGGER = Logger.getLogger( FsUtil.class );
+
+ public static String getRelativePath( File absolutePath, File parentDir )
+ {
+ String file;
+ String dir;
+ try {
+ file = absolutePath.getCanonicalPath();
+ dir = parentDir.getCanonicalPath() + File.separator;
+ } catch ( Exception e ) {
+ LOGGER.error( "Could not get relative path for " + absolutePath.toString(), e );
+ return null;
+ }
+ if ( !file.startsWith( dir ) )
+ return null;
+ return file.substring( dir.length() );
+ }
+
+ public static String sanitizeFileName( String fileName )
+ {
+ fileName = fileName.replaceAll( "[^a-zA-Z0-9_\\-]+", "_" );
+ if ( fileName.length() > 40 )
+ fileName = fileName.substring( 0, 40 );
+ return fileName;
+ }
+
+}
diff --git a/src/main/java/org/openslx/util/GrowingThreadPoolExecutor.java b/src/main/java/org/openslx/util/GrowingThreadPoolExecutor.java
new file mode 100644
index 0000000..7b0b2d9
--- /dev/null
+++ b/src/main/java/org/openslx/util/GrowingThreadPoolExecutor.java
@@ -0,0 +1,85 @@
+package org.openslx.util;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionHandler;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Grows to maximum pool size before queueing. See
+ * http://stackoverflow.com/a/20153234/2043481
+ */
+public class GrowingThreadPoolExecutor extends ThreadPoolExecutor {
+ private int userSpecifiedCorePoolSize;
+ private int taskCount;
+
+ /**
+ * The default rejected execution handler
+ */
+ private static final RejectedExecutionHandler defaultHandler =
+ new AbortPolicy();
+
+ public GrowingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime,
+ TimeUnit unit, BlockingQueue<Runnable> workQueue) {
+ this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, defaultHandler);
+ }
+
+ public GrowingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
+ BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
+ this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, defaultHandler);
+ }
+
+ public GrowingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
+ BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) {
+ this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, Executors.defaultThreadFactory(),
+ handler);
+ }
+
+ public GrowingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
+ BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler) {
+ super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
+ userSpecifiedCorePoolSize = corePoolSize;
+ }
+
+ @Override
+ public void execute(Runnable runnable) {
+ synchronized (this) {
+ taskCount++;
+ setCorePoolSizeToTaskCountWithinBounds();
+ }
+ super.execute(runnable);
+ }
+
+ @Override
+ protected void afterExecute(Runnable runnable, Throwable throwable) {
+ super.afterExecute(runnable, throwable);
+ synchronized (this) {
+ taskCount--;
+ setCorePoolSizeToTaskCountWithinBounds();
+ }
+ }
+
+ private void setCorePoolSizeToTaskCountWithinBounds() {
+ int threads = taskCount;
+ if (threads < userSpecifiedCorePoolSize)
+ threads = userSpecifiedCorePoolSize;
+ if (threads > getMaximumPoolSize())
+ threads = getMaximumPoolSize();
+ super.setCorePoolSize(threads);
+ }
+
+ public void setCorePoolSize(int corePoolSize) {
+ synchronized (this) {
+ userSpecifiedCorePoolSize = corePoolSize;
+ }
+ }
+
+ @Override
+ public int getCorePoolSize() {
+ synchronized (this) {
+ return userSpecifiedCorePoolSize;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/main/java/org/openslx/util/ThriftUtil.java b/src/main/java/org/openslx/util/ThriftUtil.java
new file mode 100644
index 0000000..58019a7
--- /dev/null
+++ b/src/main/java/org/openslx/util/ThriftUtil.java
@@ -0,0 +1,34 @@
+package org.openslx.util;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+public class ThriftUtil {
+
+ public static List<byte[]> unwrapByteBufferList(List<ByteBuffer> blockHashes) {
+ if (blockHashes == null || blockHashes.isEmpty())
+ return null;
+ List<byte[]> hashList = new ArrayList<>(blockHashes.size());
+ for (ByteBuffer hash : blockHashes) {
+ byte[] buffer = new byte[hash.remaining()];
+ hash.mark();
+ hash.get(buffer);
+ hash.reset();
+ hashList.add(buffer);
+ }
+ return hashList;
+ }
+
+ public static byte[] unwrapByteBuffer(ByteBuffer buffer) {
+ byte[] byteArray = null;
+ if (buffer != null) {
+ byteArray = new byte[buffer.remaining()];
+ buffer.mark();
+ buffer.get(byteArray);
+ buffer.reset();
+ }
+ return byteArray;
+ }
+
+}
diff --git a/src/main/thrift/bwlp.thrift b/src/main/thrift/bwlp.thrift
index e5478d1..4020be5 100644
--- a/src/main/thrift/bwlp.thrift
+++ b/src/main/thrift/bwlp.thrift
@@ -584,6 +584,9 @@ service MasterServer {
ClientSessionData localAccountLogin(1:string login, 2:string password)
throws (1:TAuthorizationException failure, 2:TInvocationException error),
+ // Client tells us which satellite it is using
+ void setUsedSatellite(1:Token sessionId, 2:string satelliteName),
+
// find a user in a given organization by a search term
list<UserInfo> findUser(1:Token sessionId, 2:string organizationId, 3:string searchTerm)
throws (1:TAuthorizationException failure, 2:TInvocationException error),
@@ -620,7 +623,7 @@ service MasterServer {
throws (1:TAuthorizationException failure, 2: TInvocationException failure2, 3:TNotFoundException f3),
// Request upload of an image to the master server
- TransferInformation submitImage(1:Token serverSessionId, 2:ImagePublishData imageDescription, 3:list<binary> blockHashes)
+ TransferInformation submitImage(1:Token userToken, 2:ImagePublishData imageDescription, 3:list<binary> blockHashes)
throws (1:TAuthorizationException failure, 2: TInvocationException failure2, 3: TTransferRejectedException failure3),
i32 registerSatellite(6:Token userToken, 5:string displayName, 2:list<string> addresses, 3:string modulus, 4:string exponent, 1:binary certsha256)