summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSimon Rettberg2018-05-11 17:35:51 +0200
committerSimon Rettberg2018-05-11 17:35:51 +0200
commit8cf60948213a141b86e9a7128359545040f97276 (patch)
tree20662a196e92717b2b1147c586b946472e9471d1
parentDo what the javadoc says... (diff)
downloadmaster-sync-shared-8cf60948213a141b86e9a7128359545040f97276.tar.gz
master-sync-shared-8cf60948213a141b86e9a7128359545040f97276.tar.xz
master-sync-shared-8cf60948213a141b86e9a7128359545040f97276.zip
Support copying existing chunks server side
Can speed up uploads if the storage backend is fast enough.
-rw-r--r--src/main/java/org/openslx/bwlp/thrift/iface/SatelliteServer.java1458
-rw-r--r--src/main/java/org/openslx/bwlp/thrift/iface/UploadOptions.java396
-rw-r--r--src/main/java/org/openslx/filetransfer/LocalChunkSource.java42
-rw-r--r--src/main/java/org/openslx/filetransfer/util/ChunkList.java87
-rw-r--r--src/main/java/org/openslx/filetransfer/util/FileChunk.java41
-rw-r--r--src/main/java/org/openslx/filetransfer/util/HashChecker.java45
-rw-r--r--src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java160
-rw-r--r--src/main/java/org/openslx/filetransfer/util/LocalCopyManager.java208
-rw-r--r--src/main/java/org/openslx/sat/thrift/version/Feature.java6
-rw-r--r--src/main/thrift/bwlp.thrift10
10 files changed, 2391 insertions, 62 deletions
diff --git a/src/main/java/org/openslx/bwlp/thrift/iface/SatelliteServer.java b/src/main/java/org/openslx/bwlp/thrift/iface/SatelliteServer.java
index 5559dca..53bfd52 100644
--- a/src/main/java/org/openslx/bwlp/thrift/iface/SatelliteServer.java
+++ b/src/main/java/org/openslx/bwlp/thrift/iface/SatelliteServer.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-09-06")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2018-05-11")
public class SatelliteServer {
public interface Iface {
@@ -47,7 +47,9 @@ public class SatelliteServer {
public TransferInformation requestImageVersionUpload(String userToken, String imageBaseId, long fileSize, List<ByteBuffer> blockHashes, ByteBuffer machineDescription) throws TTransferRejectedException, TAuthorizationException, TInvocationException, TNotFoundException, org.apache.thrift.TException;
- public void updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes) throws TInvalidTokenException, org.apache.thrift.TException;
+ public void updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes, String userToken) throws TInvalidTokenException, org.apache.thrift.TException;
+
+ public UploadOptions setUploadOptions(String userToken, String uploadToken, UploadOptions options) throws TAuthorizationException, TInvalidTokenException, org.apache.thrift.TException;
public void cancelUpload(String uploadToken) throws TInvalidTokenException, org.apache.thrift.TException;
@@ -137,7 +139,9 @@ public class SatelliteServer {
public void requestImageVersionUpload(String userToken, String imageBaseId, long fileSize, List<ByteBuffer> blockHashes, ByteBuffer machineDescription, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
- public void updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes, String userToken, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+ public void setUploadOptions(String userToken, String uploadToken, UploadOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void cancelUpload(String uploadToken, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -343,17 +347,18 @@ public class SatelliteServer {
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "requestImageVersionUpload failed: unknown result");
}
- public void updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes) throws TInvalidTokenException, org.apache.thrift.TException
+ public void updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes, String userToken) throws TInvalidTokenException, org.apache.thrift.TException
{
- send_updateBlockHashes(uploadToken, blockHashes);
+ send_updateBlockHashes(uploadToken, blockHashes, userToken);
recv_updateBlockHashes();
}
- public void send_updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes) throws org.apache.thrift.TException
+ public void send_updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes, String userToken) throws org.apache.thrift.TException
{
updateBlockHashes_args args = new updateBlockHashes_args();
args.setUploadToken(uploadToken);
args.setBlockHashes(blockHashes);
+ args.setUserToken(userToken);
sendBase("updateBlockHashes", args);
}
@@ -367,6 +372,37 @@ public class SatelliteServer {
return;
}
+ public UploadOptions setUploadOptions(String userToken, String uploadToken, UploadOptions options) throws TAuthorizationException, TInvalidTokenException, org.apache.thrift.TException
+ {
+ send_setUploadOptions(userToken, uploadToken, options);
+ return recv_setUploadOptions();
+ }
+
+ public void send_setUploadOptions(String userToken, String uploadToken, UploadOptions options) throws org.apache.thrift.TException
+ {
+ setUploadOptions_args args = new setUploadOptions_args();
+ args.setUserToken(userToken);
+ args.setUploadToken(uploadToken);
+ args.setOptions(options);
+ sendBase("setUploadOptions", args);
+ }
+
+ public UploadOptions recv_setUploadOptions() throws TAuthorizationException, TInvalidTokenException, org.apache.thrift.TException
+ {
+ setUploadOptions_result result = new setUploadOptions_result();
+ receiveBase(result, "setUploadOptions");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.frootloops != null) {
+ throw result.frootloops;
+ }
+ if (result.imcyborgbutthatsok != null) {
+ throw result.imcyborgbutthatsok;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "setUploadOptions failed: unknown result");
+ }
+
public void cancelUpload(String uploadToken) throws TInvalidTokenException, org.apache.thrift.TException
{
send_cancelUpload(uploadToken);
@@ -1637,9 +1673,9 @@ public class SatelliteServer {
}
}
- public void updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ public void updateBlockHashes(String uploadToken, List<ByteBuffer> blockHashes, String userToken, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
- updateBlockHashes_call method_call = new updateBlockHashes_call(uploadToken, blockHashes, resultHandler, this, ___protocolFactory, ___transport);
+ updateBlockHashes_call method_call = new updateBlockHashes_call(uploadToken, blockHashes, userToken, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
@@ -1647,10 +1683,12 @@ public class SatelliteServer {
public static class updateBlockHashes_call extends org.apache.thrift.async.TAsyncMethodCall {
private String uploadToken;
private List<ByteBuffer> blockHashes;
- public updateBlockHashes_call(String uploadToken, List<ByteBuffer> blockHashes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ private String userToken;
+ public updateBlockHashes_call(String uploadToken, List<ByteBuffer> blockHashes, String userToken, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.uploadToken = uploadToken;
this.blockHashes = blockHashes;
+ this.userToken = userToken;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
@@ -1658,6 +1696,7 @@ public class SatelliteServer {
updateBlockHashes_args args = new updateBlockHashes_args();
args.setUploadToken(uploadToken);
args.setBlockHashes(blockHashes);
+ args.setUserToken(userToken);
args.write(prot);
prot.writeMessageEnd();
}
@@ -1672,6 +1711,44 @@ public class SatelliteServer {
}
}
+ public void setUploadOptions(String userToken, String uploadToken, UploadOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ setUploadOptions_call method_call = new setUploadOptions_call(userToken, uploadToken, options, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ public static class setUploadOptions_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private String userToken;
+ private String uploadToken;
+ private UploadOptions options;
+ public setUploadOptions_call(String userToken, String uploadToken, UploadOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.userToken = userToken;
+ this.uploadToken = uploadToken;
+ this.options = options;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("setUploadOptions", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ setUploadOptions_args args = new setUploadOptions_args();
+ args.setUserToken(userToken);
+ args.setUploadToken(uploadToken);
+ args.setOptions(options);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public UploadOptions getResult() throws TAuthorizationException, TInvalidTokenException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_setUploadOptions();
+ }
+ }
+
public void cancelUpload(String uploadToken, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
cancelUpload_call method_call = new cancelUpload_call(uploadToken, resultHandler, this, ___protocolFactory, ___transport);
@@ -2999,6 +3076,7 @@ public class SatelliteServer {
processMap.put("getConfiguration", new getConfiguration());
processMap.put("requestImageVersionUpload", new requestImageVersionUpload());
processMap.put("updateBlockHashes", new updateBlockHashes());
+ processMap.put("setUploadOptions", new setUploadOptions());
processMap.put("cancelUpload", new cancelUpload());
processMap.put("queryUploadStatus", new queryUploadStatus());
processMap.put("requestDownload", new requestDownload());
@@ -3147,7 +3225,7 @@ public class SatelliteServer {
public updateBlockHashes_result getResult(I iface, updateBlockHashes_args args) throws org.apache.thrift.TException {
updateBlockHashes_result result = new updateBlockHashes_result();
try {
- iface.updateBlockHashes(args.uploadToken, args.blockHashes);
+ iface.updateBlockHashes(args.uploadToken, args.blockHashes, args.userToken);
} catch (TInvalidTokenException ex1) {
result.ex1 = ex1;
}
@@ -3155,6 +3233,32 @@ public class SatelliteServer {
}
}
+ public static class setUploadOptions<I extends Iface> extends org.apache.thrift.ProcessFunction<I, setUploadOptions_args> {
+ public setUploadOptions() {
+ super("setUploadOptions");
+ }
+
+ public setUploadOptions_args getEmptyArgsInstance() {
+ return new setUploadOptions_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public setUploadOptions_result getResult(I iface, setUploadOptions_args args) throws org.apache.thrift.TException {
+ setUploadOptions_result result = new setUploadOptions_result();
+ try {
+ result.success = iface.setUploadOptions(args.userToken, args.uploadToken, args.options);
+ } catch (TAuthorizationException frootloops) {
+ result.frootloops = frootloops;
+ } catch (TInvalidTokenException imcyborgbutthatsok) {
+ result.imcyborgbutthatsok = imcyborgbutthatsok;
+ }
+ return result;
+ }
+ }
+
public static class cancelUpload<I extends Iface> extends org.apache.thrift.ProcessFunction<I, cancelUpload_args> {
public cancelUpload() {
super("cancelUpload");
@@ -4175,6 +4279,7 @@ public class SatelliteServer {
processMap.put("getConfiguration", new getConfiguration());
processMap.put("requestImageVersionUpload", new requestImageVersionUpload());
processMap.put("updateBlockHashes", new updateBlockHashes());
+ processMap.put("setUploadOptions", new setUploadOptions());
processMap.put("cancelUpload", new cancelUpload());
processMap.put("queryUploadStatus", new queryUploadStatus());
processMap.put("requestDownload", new requestDownload());
@@ -4494,7 +4599,69 @@ public class SatelliteServer {
}
public void start(I iface, updateBlockHashes_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.updateBlockHashes(args.uploadToken, args.blockHashes,resultHandler);
+ iface.updateBlockHashes(args.uploadToken, args.blockHashes, args.userToken,resultHandler);
+ }
+ }
+
+ public static class setUploadOptions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, setUploadOptions_args, UploadOptions> {
+ public setUploadOptions() {
+ super("setUploadOptions");
+ }
+
+ public setUploadOptions_args getEmptyArgsInstance() {
+ return new setUploadOptions_args();
+ }
+
+ public AsyncMethodCallback<UploadOptions> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<UploadOptions>() {
+ public void onComplete(UploadOptions o) {
+ setUploadOptions_result result = new setUploadOptions_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ setUploadOptions_result result = new setUploadOptions_result();
+ if (e instanceof TAuthorizationException) {
+ result.frootloops = (TAuthorizationException) e;
+ result.setFrootloopsIsSet(true);
+ msg = result;
+ }
+ else if (e instanceof TInvalidTokenException) {
+ result.imcyborgbutthatsok = (TInvalidTokenException) e;
+ result.setImcyborgbutthatsokIsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, setUploadOptions_args args, org.apache.thrift.async.AsyncMethodCallback<UploadOptions> resultHandler) throws TException {
+ iface.setUploadOptions(args.userToken, args.uploadToken, args.options,resultHandler);
}
}
@@ -10474,6 +10641,7 @@ public class SatelliteServer {
private static final org.apache.thrift.protocol.TField UPLOAD_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("uploadToken", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final org.apache.thrift.protocol.TField BLOCK_HASHES_FIELD_DESC = new org.apache.thrift.protocol.TField("blockHashes", org.apache.thrift.protocol.TType.LIST, (short)2);
+ private static final org.apache.thrift.protocol.TField USER_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("userToken", org.apache.thrift.protocol.TType.STRING, (short)3);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -10483,11 +10651,13 @@ public class SatelliteServer {
public String uploadToken; // required
public List<ByteBuffer> blockHashes; // required
+ public String userToken; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
UPLOAD_TOKEN((short)1, "uploadToken"),
- BLOCK_HASHES((short)2, "blockHashes");
+ BLOCK_HASHES((short)2, "blockHashes"),
+ USER_TOKEN((short)3, "userToken");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -10506,6 +10676,8 @@ public class SatelliteServer {
return UPLOAD_TOKEN;
case 2: // BLOCK_HASHES
return BLOCK_HASHES;
+ case 3: // USER_TOKEN
+ return USER_TOKEN;
default:
return null;
}
@@ -10554,6 +10726,8 @@ public class SatelliteServer {
tmpMap.put(_Fields.BLOCK_HASHES, new org.apache.thrift.meta_data.FieldMetaData("blockHashes", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))));
+ tmpMap.put(_Fields.USER_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("userToken", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Token")));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(updateBlockHashes_args.class, metaDataMap);
}
@@ -10563,11 +10737,13 @@ public class SatelliteServer {
public updateBlockHashes_args(
String uploadToken,
- List<ByteBuffer> blockHashes)
+ List<ByteBuffer> blockHashes,
+ String userToken)
{
this();
this.uploadToken = uploadToken;
this.blockHashes = blockHashes;
+ this.userToken = userToken;
}
/**
@@ -10581,6 +10757,9 @@ public class SatelliteServer {
List<ByteBuffer> __this__blockHashes = new ArrayList<ByteBuffer>(other.blockHashes);
this.blockHashes = __this__blockHashes;
}
+ if (other.isSetUserToken()) {
+ this.userToken = other.userToken;
+ }
}
public updateBlockHashes_args deepCopy() {
@@ -10591,6 +10770,7 @@ public class SatelliteServer {
public void clear() {
this.uploadToken = null;
this.blockHashes = null;
+ this.userToken = null;
}
public String getUploadToken() {
@@ -10656,6 +10836,30 @@ public class SatelliteServer {
}
}
+ public String getUserToken() {
+ return this.userToken;
+ }
+
+ public updateBlockHashes_args setUserToken(String userToken) {
+ this.userToken = userToken;
+ return this;
+ }
+
+ public void unsetUserToken() {
+ this.userToken = null;
+ }
+
+ /** Returns true if field userToken is set (has been assigned a value) and false otherwise */
+ public boolean isSetUserToken() {
+ return this.userToken != null;
+ }
+
+ public void setUserTokenIsSet(boolean value) {
+ if (!value) {
+ this.userToken = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case UPLOAD_TOKEN:
@@ -10674,6 +10878,14 @@ public class SatelliteServer {
}
break;
+ case USER_TOKEN:
+ if (value == null) {
+ unsetUserToken();
+ } else {
+ setUserToken((String)value);
+ }
+ break;
+
}
}
@@ -10685,6 +10897,9 @@ public class SatelliteServer {
case BLOCK_HASHES:
return getBlockHashes();
+ case USER_TOKEN:
+ return getUserToken();
+
}
throw new IllegalStateException();
}
@@ -10700,6 +10915,8 @@ public class SatelliteServer {
return isSetUploadToken();
case BLOCK_HASHES:
return isSetBlockHashes();
+ case USER_TOKEN:
+ return isSetUserToken();
}
throw new IllegalStateException();
}
@@ -10735,6 +10952,15 @@ public class SatelliteServer {
return false;
}
+ boolean this_present_userToken = true && this.isSetUserToken();
+ boolean that_present_userToken = true && that.isSetUserToken();
+ if (this_present_userToken || that_present_userToken) {
+ if (!(this_present_userToken && that_present_userToken))
+ return false;
+ if (!this.userToken.equals(that.userToken))
+ return false;
+ }
+
return true;
}
@@ -10752,6 +10978,11 @@ public class SatelliteServer {
if (present_blockHashes)
list.add(blockHashes);
+ boolean present_userToken = true && (isSetUserToken());
+ list.add(present_userToken);
+ if (present_userToken)
+ list.add(userToken);
+
return list.hashCode();
}
@@ -10783,6 +11014,16 @@ public class SatelliteServer {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetUserToken()).compareTo(other.isSetUserToken());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetUserToken()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.userToken, other.userToken);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -10818,6 +11059,14 @@ public class SatelliteServer {
org.apache.thrift.TBaseHelper.toString(this.blockHashes, sb);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("userToken:");
+ if (this.userToken == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.userToken);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -10887,6 +11136,14 @@ public class SatelliteServer {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // USER_TOKEN
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.userToken = iprot.readString();
+ struct.setUserTokenIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -10919,6 +11176,11 @@ public class SatelliteServer {
}
oprot.writeFieldEnd();
}
+ if (struct.userToken != null) {
+ oprot.writeFieldBegin(USER_TOKEN_FIELD_DESC);
+ oprot.writeString(struct.userToken);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -10943,7 +11205,10 @@ public class SatelliteServer {
if (struct.isSetBlockHashes()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetUserToken()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetUploadToken()) {
oprot.writeString(struct.uploadToken);
}
@@ -10956,12 +11221,15 @@ public class SatelliteServer {
}
}
}
+ if (struct.isSetUserToken()) {
+ oprot.writeString(struct.userToken);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, updateBlockHashes_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.uploadToken = iprot.readString();
struct.setUploadTokenIsSet(true);
@@ -10979,6 +11247,10 @@ public class SatelliteServer {
}
struct.setBlockHashesIsSet(true);
}
+ if (incoming.get(2)) {
+ struct.userToken = iprot.readString();
+ struct.setUserTokenIsSet(true);
+ }
}
}
@@ -11347,6 +11619,1162 @@ public class SatelliteServer {
}
+ public static class setUploadOptions_args implements org.apache.thrift.TBase<setUploadOptions_args, setUploadOptions_args._Fields>, java.io.Serializable, Cloneable, Comparable<setUploadOptions_args> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setUploadOptions_args");
+
+ private static final org.apache.thrift.protocol.TField USER_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("userToken", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField UPLOAD_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("uploadToken", org.apache.thrift.protocol.TType.STRING, (short)2);
+ private static final org.apache.thrift.protocol.TField OPTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("options", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new setUploadOptions_argsStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new setUploadOptions_argsTupleSchemeFactory());
+ }
+
+ public String userToken; // required
+ public String uploadToken; // required
+ public UploadOptions options; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ USER_TOKEN((short)1, "userToken"),
+ UPLOAD_TOKEN((short)2, "uploadToken"),
+ OPTIONS((short)3, "options");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // USER_TOKEN
+ return USER_TOKEN;
+ case 2: // UPLOAD_TOKEN
+ return UPLOAD_TOKEN;
+ case 3: // OPTIONS
+ return OPTIONS;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.USER_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("userToken", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Token")));
+ tmpMap.put(_Fields.UPLOAD_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("uploadToken", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Token")));
+ tmpMap.put(_Fields.OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("options", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, UploadOptions.class)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setUploadOptions_args.class, metaDataMap);
+ }
+
+ public setUploadOptions_args() {
+ }
+
+ public setUploadOptions_args(
+ String userToken,
+ String uploadToken,
+ UploadOptions options)
+ {
+ this();
+ this.userToken = userToken;
+ this.uploadToken = uploadToken;
+ this.options = options;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public setUploadOptions_args(setUploadOptions_args other) {
+ if (other.isSetUserToken()) {
+ this.userToken = other.userToken;
+ }
+ if (other.isSetUploadToken()) {
+ this.uploadToken = other.uploadToken;
+ }
+ if (other.isSetOptions()) {
+ this.options = new UploadOptions(other.options);
+ }
+ }
+
+ public setUploadOptions_args deepCopy() {
+ return new setUploadOptions_args(this);
+ }
+
+ @Override
+ public void clear() {
+ this.userToken = null;
+ this.uploadToken = null;
+ this.options = null;
+ }
+
+ public String getUserToken() {
+ return this.userToken;
+ }
+
+ public setUploadOptions_args setUserToken(String userToken) {
+ this.userToken = userToken;
+ return this;
+ }
+
+ public void unsetUserToken() {
+ this.userToken = null;
+ }
+
+ /** Returns true if field userToken is set (has been assigned a value) and false otherwise */
+ public boolean isSetUserToken() {
+ return this.userToken != null;
+ }
+
+ public void setUserTokenIsSet(boolean value) {
+ if (!value) {
+ this.userToken = null;
+ }
+ }
+
+ public String getUploadToken() {
+ return this.uploadToken;
+ }
+
+ public setUploadOptions_args setUploadToken(String uploadToken) {
+ this.uploadToken = uploadToken;
+ return this;
+ }
+
+ public void unsetUploadToken() {
+ this.uploadToken = null;
+ }
+
+ /** Returns true if field uploadToken is set (has been assigned a value) and false otherwise */
+ public boolean isSetUploadToken() {
+ return this.uploadToken != null;
+ }
+
+ public void setUploadTokenIsSet(boolean value) {
+ if (!value) {
+ this.uploadToken = null;
+ }
+ }
+
+ public UploadOptions getOptions() {
+ return this.options;
+ }
+
+ public setUploadOptions_args setOptions(UploadOptions options) {
+ this.options = options;
+ return this;
+ }
+
+ public void unsetOptions() {
+ this.options = null;
+ }
+
+ /** Returns true if field options is set (has been assigned a value) and false otherwise */
+ public boolean isSetOptions() {
+ return this.options != null;
+ }
+
+ public void setOptionsIsSet(boolean value) {
+ if (!value) {
+ this.options = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case USER_TOKEN:
+ if (value == null) {
+ unsetUserToken();
+ } else {
+ setUserToken((String)value);
+ }
+ break;
+
+ case UPLOAD_TOKEN:
+ if (value == null) {
+ unsetUploadToken();
+ } else {
+ setUploadToken((String)value);
+ }
+ break;
+
+ case OPTIONS:
+ if (value == null) {
+ unsetOptions();
+ } else {
+ setOptions((UploadOptions)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case USER_TOKEN:
+ return getUserToken();
+
+ case UPLOAD_TOKEN:
+ return getUploadToken();
+
+ case OPTIONS:
+ return getOptions();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case USER_TOKEN:
+ return isSetUserToken();
+ case UPLOAD_TOKEN:
+ return isSetUploadToken();
+ case OPTIONS:
+ return isSetOptions();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof setUploadOptions_args)
+ return this.equals((setUploadOptions_args)that);
+ return false;
+ }
+
+ public boolean equals(setUploadOptions_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_userToken = true && this.isSetUserToken();
+ boolean that_present_userToken = true && that.isSetUserToken();
+ if (this_present_userToken || that_present_userToken) {
+ if (!(this_present_userToken && that_present_userToken))
+ return false;
+ if (!this.userToken.equals(that.userToken))
+ return false;
+ }
+
+ boolean this_present_uploadToken = true && this.isSetUploadToken();
+ boolean that_present_uploadToken = true && that.isSetUploadToken();
+ if (this_present_uploadToken || that_present_uploadToken) {
+ if (!(this_present_uploadToken && that_present_uploadToken))
+ return false;
+ if (!this.uploadToken.equals(that.uploadToken))
+ return false;
+ }
+
+ boolean this_present_options = true && this.isSetOptions();
+ boolean that_present_options = true && that.isSetOptions();
+ if (this_present_options || that_present_options) {
+ if (!(this_present_options && that_present_options))
+ return false;
+ if (!this.options.equals(that.options))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_userToken = true && (isSetUserToken());
+ list.add(present_userToken);
+ if (present_userToken)
+ list.add(userToken);
+
+ boolean present_uploadToken = true && (isSetUploadToken());
+ list.add(present_uploadToken);
+ if (present_uploadToken)
+ list.add(uploadToken);
+
+ boolean present_options = true && (isSetOptions());
+ list.add(present_options);
+ if (present_options)
+ list.add(options);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(setUploadOptions_args other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetUserToken()).compareTo(other.isSetUserToken());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetUserToken()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.userToken, other.userToken);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetUploadToken()).compareTo(other.isSetUploadToken());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetUploadToken()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uploadToken, other.uploadToken);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetOptions()).compareTo(other.isSetOptions());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetOptions()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.options, other.options);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("setUploadOptions_args(");
+ boolean first = true;
+
+ sb.append("userToken:");
+ if (this.userToken == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.userToken);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("uploadToken:");
+ if (this.uploadToken == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.uploadToken);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("options:");
+ if (this.options == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.options);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ if (options != null) {
+ options.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class setUploadOptions_argsStandardSchemeFactory implements SchemeFactory {
+ public setUploadOptions_argsStandardScheme getScheme() {
+ return new setUploadOptions_argsStandardScheme();
+ }
+ }
+
+ private static class setUploadOptions_argsStandardScheme extends StandardScheme<setUploadOptions_args> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, setUploadOptions_args struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // USER_TOKEN
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.userToken = iprot.readString();
+ struct.setUserTokenIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // UPLOAD_TOKEN
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.uploadToken = iprot.readString();
+ struct.setUploadTokenIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 3: // OPTIONS
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.options = new UploadOptions();
+ struct.options.read(iprot);
+ struct.setOptionsIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ // check for required fields of primitive type, which can't be checked in the validate method
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, setUploadOptions_args struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.userToken != null) {
+ oprot.writeFieldBegin(USER_TOKEN_FIELD_DESC);
+ oprot.writeString(struct.userToken);
+ oprot.writeFieldEnd();
+ }
+ if (struct.uploadToken != null) {
+ oprot.writeFieldBegin(UPLOAD_TOKEN_FIELD_DESC);
+ oprot.writeString(struct.uploadToken);
+ oprot.writeFieldEnd();
+ }
+ if (struct.options != null) {
+ oprot.writeFieldBegin(OPTIONS_FIELD_DESC);
+ struct.options.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class setUploadOptions_argsTupleSchemeFactory implements SchemeFactory {
+ public setUploadOptions_argsTupleScheme getScheme() {
+ return new setUploadOptions_argsTupleScheme();
+ }
+ }
+
+ private static class setUploadOptions_argsTupleScheme extends TupleScheme<setUploadOptions_args> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, setUploadOptions_args struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetUserToken()) {
+ optionals.set(0);
+ }
+ if (struct.isSetUploadToken()) {
+ optionals.set(1);
+ }
+ if (struct.isSetOptions()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
+ if (struct.isSetUserToken()) {
+ oprot.writeString(struct.userToken);
+ }
+ if (struct.isSetUploadToken()) {
+ oprot.writeString(struct.uploadToken);
+ }
+ if (struct.isSetOptions()) {
+ struct.options.write(oprot);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, setUploadOptions_args struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(3);
+ if (incoming.get(0)) {
+ struct.userToken = iprot.readString();
+ struct.setUserTokenIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.uploadToken = iprot.readString();
+ struct.setUploadTokenIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.options = new UploadOptions();
+ struct.options.read(iprot);
+ struct.setOptionsIsSet(true);
+ }
+ }
+ }
+
+ }
+
+ public static class setUploadOptions_result implements org.apache.thrift.TBase<setUploadOptions_result, setUploadOptions_result._Fields>, java.io.Serializable, Cloneable, Comparable<setUploadOptions_result> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setUploadOptions_result");
+
+ private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
+ private static final org.apache.thrift.protocol.TField FROOTLOOPS_FIELD_DESC = new org.apache.thrift.protocol.TField("frootloops", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField IMCYBORGBUTTHATSOK_FIELD_DESC = new org.apache.thrift.protocol.TField("imcyborgbutthatsok", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new setUploadOptions_resultStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new setUploadOptions_resultTupleSchemeFactory());
+ }
+
+ public UploadOptions success; // required
+ public TAuthorizationException frootloops; // required
+ public TInvalidTokenException imcyborgbutthatsok; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ SUCCESS((short)0, "success"),
+ FROOTLOOPS((short)1, "frootloops"),
+ IMCYBORGBUTTHATSOK((short)2, "imcyborgbutthatsok");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 0: // SUCCESS
+ return SUCCESS;
+ case 1: // FROOTLOOPS
+ return FROOTLOOPS;
+ case 2: // IMCYBORGBUTTHATSOK
+ return IMCYBORGBUTTHATSOK;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, UploadOptions.class)));
+ tmpMap.put(_Fields.FROOTLOOPS, new org.apache.thrift.meta_data.FieldMetaData("frootloops", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.IMCYBORGBUTTHATSOK, new org.apache.thrift.meta_data.FieldMetaData("imcyborgbutthatsok", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setUploadOptions_result.class, metaDataMap);
+ }
+
+ public setUploadOptions_result() {
+ }
+
+ public setUploadOptions_result(
+ UploadOptions success,
+ TAuthorizationException frootloops,
+ TInvalidTokenException imcyborgbutthatsok)
+ {
+ this();
+ this.success = success;
+ this.frootloops = frootloops;
+ this.imcyborgbutthatsok = imcyborgbutthatsok;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public setUploadOptions_result(setUploadOptions_result other) {
+ if (other.isSetSuccess()) {
+ this.success = new UploadOptions(other.success);
+ }
+ if (other.isSetFrootloops()) {
+ this.frootloops = new TAuthorizationException(other.frootloops);
+ }
+ if (other.isSetImcyborgbutthatsok()) {
+ this.imcyborgbutthatsok = new TInvalidTokenException(other.imcyborgbutthatsok);
+ }
+ }
+
+ public setUploadOptions_result deepCopy() {
+ return new setUploadOptions_result(this);
+ }
+
+ @Override
+ public void clear() {
+ this.success = null;
+ this.frootloops = null;
+ this.imcyborgbutthatsok = null;
+ }
+
+ public UploadOptions getSuccess() {
+ return this.success;
+ }
+
+ public setUploadOptions_result setSuccess(UploadOptions success) {
+ this.success = success;
+ return this;
+ }
+
+ public void unsetSuccess() {
+ this.success = null;
+ }
+
+ /** Returns true if field success is set (has been assigned a value) and false otherwise */
+ public boolean isSetSuccess() {
+ return this.success != null;
+ }
+
+ public void setSuccessIsSet(boolean value) {
+ if (!value) {
+ this.success = null;
+ }
+ }
+
+ public TAuthorizationException getFrootloops() {
+ return this.frootloops;
+ }
+
+ public setUploadOptions_result setFrootloops(TAuthorizationException frootloops) {
+ this.frootloops = frootloops;
+ return this;
+ }
+
+ public void unsetFrootloops() {
+ this.frootloops = null;
+ }
+
+ /** Returns true if field frootloops is set (has been assigned a value) and false otherwise */
+ public boolean isSetFrootloops() {
+ return this.frootloops != null;
+ }
+
+ public void setFrootloopsIsSet(boolean value) {
+ if (!value) {
+ this.frootloops = null;
+ }
+ }
+
+ public TInvalidTokenException getImcyborgbutthatsok() {
+ return this.imcyborgbutthatsok;
+ }
+
+ public setUploadOptions_result setImcyborgbutthatsok(TInvalidTokenException imcyborgbutthatsok) {
+ this.imcyborgbutthatsok = imcyborgbutthatsok;
+ return this;
+ }
+
+ public void unsetImcyborgbutthatsok() {
+ this.imcyborgbutthatsok = null;
+ }
+
+ /** Returns true if field imcyborgbutthatsok is set (has been assigned a value) and false otherwise */
+ public boolean isSetImcyborgbutthatsok() {
+ return this.imcyborgbutthatsok != null;
+ }
+
+ public void setImcyborgbutthatsokIsSet(boolean value) {
+ if (!value) {
+ this.imcyborgbutthatsok = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case SUCCESS:
+ if (value == null) {
+ unsetSuccess();
+ } else {
+ setSuccess((UploadOptions)value);
+ }
+ break;
+
+ case FROOTLOOPS:
+ if (value == null) {
+ unsetFrootloops();
+ } else {
+ setFrootloops((TAuthorizationException)value);
+ }
+ break;
+
+ case IMCYBORGBUTTHATSOK:
+ if (value == null) {
+ unsetImcyborgbutthatsok();
+ } else {
+ setImcyborgbutthatsok((TInvalidTokenException)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case SUCCESS:
+ return getSuccess();
+
+ case FROOTLOOPS:
+ return getFrootloops();
+
+ case IMCYBORGBUTTHATSOK:
+ return getImcyborgbutthatsok();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case SUCCESS:
+ return isSetSuccess();
+ case FROOTLOOPS:
+ return isSetFrootloops();
+ case IMCYBORGBUTTHATSOK:
+ return isSetImcyborgbutthatsok();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof setUploadOptions_result)
+ return this.equals((setUploadOptions_result)that);
+ return false;
+ }
+
+ public boolean equals(setUploadOptions_result that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_success = true && this.isSetSuccess();
+ boolean that_present_success = true && that.isSetSuccess();
+ if (this_present_success || that_present_success) {
+ if (!(this_present_success && that_present_success))
+ return false;
+ if (!this.success.equals(that.success))
+ return false;
+ }
+
+ boolean this_present_frootloops = true && this.isSetFrootloops();
+ boolean that_present_frootloops = true && that.isSetFrootloops();
+ if (this_present_frootloops || that_present_frootloops) {
+ if (!(this_present_frootloops && that_present_frootloops))
+ return false;
+ if (!this.frootloops.equals(that.frootloops))
+ return false;
+ }
+
+ boolean this_present_imcyborgbutthatsok = true && this.isSetImcyborgbutthatsok();
+ boolean that_present_imcyborgbutthatsok = true && that.isSetImcyborgbutthatsok();
+ if (this_present_imcyborgbutthatsok || that_present_imcyborgbutthatsok) {
+ if (!(this_present_imcyborgbutthatsok && that_present_imcyborgbutthatsok))
+ return false;
+ if (!this.imcyborgbutthatsok.equals(that.imcyborgbutthatsok))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_success = true && (isSetSuccess());
+ list.add(present_success);
+ if (present_success)
+ list.add(success);
+
+ boolean present_frootloops = true && (isSetFrootloops());
+ list.add(present_frootloops);
+ if (present_frootloops)
+ list.add(frootloops);
+
+ boolean present_imcyborgbutthatsok = true && (isSetImcyborgbutthatsok());
+ list.add(present_imcyborgbutthatsok);
+ if (present_imcyborgbutthatsok)
+ list.add(imcyborgbutthatsok);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(setUploadOptions_result other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetSuccess()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetFrootloops()).compareTo(other.isSetFrootloops());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetFrootloops()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.frootloops, other.frootloops);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetImcyborgbutthatsok()).compareTo(other.isSetImcyborgbutthatsok());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetImcyborgbutthatsok()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.imcyborgbutthatsok, other.imcyborgbutthatsok);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("setUploadOptions_result(");
+ boolean first = true;
+
+ sb.append("success:");
+ if (this.success == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.success);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("frootloops:");
+ if (this.frootloops == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.frootloops);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("imcyborgbutthatsok:");
+ if (this.imcyborgbutthatsok == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.imcyborgbutthatsok);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ if (success != null) {
+ success.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class setUploadOptions_resultStandardSchemeFactory implements SchemeFactory {
+ public setUploadOptions_resultStandardScheme getScheme() {
+ return new setUploadOptions_resultStandardScheme();
+ }
+ }
+
+ private static class setUploadOptions_resultStandardScheme extends StandardScheme<setUploadOptions_result> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, setUploadOptions_result struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 0: // SUCCESS
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.success = new UploadOptions();
+ struct.success.read(iprot);
+ struct.setSuccessIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 1: // FROOTLOOPS
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.frootloops = new TAuthorizationException();
+ struct.frootloops.read(iprot);
+ struct.setFrootloopsIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // IMCYBORGBUTTHATSOK
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.imcyborgbutthatsok = new TInvalidTokenException();
+ struct.imcyborgbutthatsok.read(iprot);
+ struct.setImcyborgbutthatsokIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ // check for required fields of primitive type, which can't be checked in the validate method
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, setUploadOptions_result struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.success != null) {
+ oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+ struct.success.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ if (struct.frootloops != null) {
+ oprot.writeFieldBegin(FROOTLOOPS_FIELD_DESC);
+ struct.frootloops.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ if (struct.imcyborgbutthatsok != null) {
+ oprot.writeFieldBegin(IMCYBORGBUTTHATSOK_FIELD_DESC);
+ struct.imcyborgbutthatsok.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class setUploadOptions_resultTupleSchemeFactory implements SchemeFactory {
+ public setUploadOptions_resultTupleScheme getScheme() {
+ return new setUploadOptions_resultTupleScheme();
+ }
+ }
+
+ private static class setUploadOptions_resultTupleScheme extends TupleScheme<setUploadOptions_result> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, setUploadOptions_result struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetSuccess()) {
+ optionals.set(0);
+ }
+ if (struct.isSetFrootloops()) {
+ optionals.set(1);
+ }
+ if (struct.isSetImcyborgbutthatsok()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
+ if (struct.isSetSuccess()) {
+ struct.success.write(oprot);
+ }
+ if (struct.isSetFrootloops()) {
+ struct.frootloops.write(oprot);
+ }
+ if (struct.isSetImcyborgbutthatsok()) {
+ struct.imcyborgbutthatsok.write(oprot);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, setUploadOptions_result struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(3);
+ if (incoming.get(0)) {
+ struct.success = new UploadOptions();
+ struct.success.read(iprot);
+ struct.setSuccessIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.frootloops = new TAuthorizationException();
+ struct.frootloops.read(iprot);
+ struct.setFrootloopsIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.imcyborgbutthatsok = new TInvalidTokenException();
+ struct.imcyborgbutthatsok.read(iprot);
+ struct.setImcyborgbutthatsokIsSet(true);
+ }
+ }
+ }
+
+ }
+
public static class cancelUpload_args implements org.apache.thrift.TBase<cancelUpload_args, cancelUpload_args._Fields>, java.io.Serializable, Cloneable, Comparable<cancelUpload_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cancelUpload_args");
diff --git a/src/main/java/org/openslx/bwlp/thrift/iface/UploadOptions.java b/src/main/java/org/openslx/bwlp/thrift/iface/UploadOptions.java
new file mode 100644
index 0000000..3a36fce
--- /dev/null
+++ b/src/main/java/org/openslx/bwlp/thrift/iface/UploadOptions.java
@@ -0,0 +1,396 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.openslx.bwlp.thrift.iface;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2018-05-11")
+public class UploadOptions implements org.apache.thrift.TBase<UploadOptions, UploadOptions._Fields>, java.io.Serializable, Cloneable, Comparable<UploadOptions> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UploadOptions");
+
+ private static final org.apache.thrift.protocol.TField SERVER_SIDE_COPYING_FIELD_DESC = new org.apache.thrift.protocol.TField("serverSideCopying", org.apache.thrift.protocol.TType.BOOL, (short)1);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new UploadOptionsStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new UploadOptionsTupleSchemeFactory());
+ }
+
+ public boolean serverSideCopying; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ SERVER_SIDE_COPYING((short)1, "serverSideCopying");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // SERVER_SIDE_COPYING
+ return SERVER_SIDE_COPYING;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final int __SERVERSIDECOPYING_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.SERVER_SIDE_COPYING, new org.apache.thrift.meta_data.FieldMetaData("serverSideCopying", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(UploadOptions.class, metaDataMap);
+ }
+
+ public UploadOptions() {
+ }
+
+ public UploadOptions(
+ boolean serverSideCopying)
+ {
+ this();
+ this.serverSideCopying = serverSideCopying;
+ setServerSideCopyingIsSet(true);
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public UploadOptions(UploadOptions other) {
+ __isset_bitfield = other.__isset_bitfield;
+ this.serverSideCopying = other.serverSideCopying;
+ }
+
+ public UploadOptions deepCopy() {
+ return new UploadOptions(this);
+ }
+
+ @Override
+ public void clear() {
+ setServerSideCopyingIsSet(false);
+ this.serverSideCopying = false;
+ }
+
+ public boolean isServerSideCopying() {
+ return this.serverSideCopying;
+ }
+
+ public UploadOptions setServerSideCopying(boolean serverSideCopying) {
+ this.serverSideCopying = serverSideCopying;
+ setServerSideCopyingIsSet(true);
+ return this;
+ }
+
+ public void unsetServerSideCopying() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SERVERSIDECOPYING_ISSET_ID);
+ }
+
+ /** Returns true if field serverSideCopying is set (has been assigned a value) and false otherwise */
+ public boolean isSetServerSideCopying() {
+ return EncodingUtils.testBit(__isset_bitfield, __SERVERSIDECOPYING_ISSET_ID);
+ }
+
+ public void setServerSideCopyingIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SERVERSIDECOPYING_ISSET_ID, value);
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case SERVER_SIDE_COPYING:
+ if (value == null) {
+ unsetServerSideCopying();
+ } else {
+ setServerSideCopying((Boolean)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case SERVER_SIDE_COPYING:
+ return isServerSideCopying();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case SERVER_SIDE_COPYING:
+ return isSetServerSideCopying();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof UploadOptions)
+ return this.equals((UploadOptions)that);
+ return false;
+ }
+
+ public boolean equals(UploadOptions that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_serverSideCopying = true;
+ boolean that_present_serverSideCopying = true;
+ if (this_present_serverSideCopying || that_present_serverSideCopying) {
+ if (!(this_present_serverSideCopying && that_present_serverSideCopying))
+ return false;
+ if (this.serverSideCopying != that.serverSideCopying)
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_serverSideCopying = true;
+ list.add(present_serverSideCopying);
+ if (present_serverSideCopying)
+ list.add(serverSideCopying);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(UploadOptions other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetServerSideCopying()).compareTo(other.isSetServerSideCopying());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetServerSideCopying()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serverSideCopying, other.serverSideCopying);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("UploadOptions(");
+ boolean first = true;
+
+ sb.append("serverSideCopying:");
+ sb.append(this.serverSideCopying);
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class UploadOptionsStandardSchemeFactory implements SchemeFactory {
+ public UploadOptionsStandardScheme getScheme() {
+ return new UploadOptionsStandardScheme();
+ }
+ }
+
+ private static class UploadOptionsStandardScheme extends StandardScheme<UploadOptions> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, UploadOptions struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // SERVER_SIDE_COPYING
+ if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+ struct.serverSideCopying = iprot.readBool();
+ struct.setServerSideCopyingIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ // check for required fields of primitive type, which can't be checked in the validate method
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, UploadOptions struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldBegin(SERVER_SIDE_COPYING_FIELD_DESC);
+ oprot.writeBool(struct.serverSideCopying);
+ oprot.writeFieldEnd();
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class UploadOptionsTupleSchemeFactory implements SchemeFactory {
+ public UploadOptionsTupleScheme getScheme() {
+ return new UploadOptionsTupleScheme();
+ }
+ }
+
+ private static class UploadOptionsTupleScheme extends TupleScheme<UploadOptions> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, UploadOptions struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.isSetServerSideCopying()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetServerSideCopying()) {
+ oprot.writeBool(struct.serverSideCopying);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, UploadOptions struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.serverSideCopying = iprot.readBool();
+ struct.setServerSideCopyingIsSet(true);
+ }
+ }
+ }
+
+}
+
diff --git a/src/main/java/org/openslx/filetransfer/LocalChunkSource.java b/src/main/java/org/openslx/filetransfer/LocalChunkSource.java
new file mode 100644
index 0000000..c6f5fc3
--- /dev/null
+++ b/src/main/java/org/openslx/filetransfer/LocalChunkSource.java
@@ -0,0 +1,42 @@
+package org.openslx.filetransfer;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public interface LocalChunkSource
+{
+
+ public static class ChunkSource
+ {
+ public final List<SourceFile> sourceCandidates;
+ public final byte[] sha1sum;
+
+ public ChunkSource( byte[] sha1sum )
+ {
+ this.sha1sum = sha1sum;
+ this.sourceCandidates = new ArrayList<>();
+ }
+
+ public void addFile( String file, long offset, int size )
+ {
+ this.sourceCandidates.add( new SourceFile( file, offset, size ) );
+ }
+ }
+
+ public List<ChunkSource> getCloneSources( List<byte[]> sums );
+
+ public static class SourceFile
+ {
+ public final String fileName;
+ public final long offset;
+ public final int chunkSize;
+
+ public SourceFile( String file, long offset, int size )
+ {
+ this.fileName = file;
+ this.offset = offset;
+ this.chunkSize = size;
+ }
+ }
+
+}
diff --git a/src/main/java/org/openslx/filetransfer/util/ChunkList.java b/src/main/java/org/openslx/filetransfer/util/ChunkList.java
index cd1bc69..11f64e8 100644
--- a/src/main/java/org/openslx/filetransfer/util/ChunkList.java
+++ b/src/main/java/org/openslx/filetransfer/util/ChunkList.java
@@ -11,6 +11,7 @@ import java.util.List;
import java.util.zip.CRC32;
import org.apache.log4j.Logger;
+import org.openslx.filetransfer.LocalChunkSource.ChunkSource;
import org.openslx.util.ThriftUtil;
public class ChunkList
@@ -26,7 +27,7 @@ public class ChunkList
/**
* Chunks that are missing from the file
*/
- private final List<FileChunk> missingChunks = new LinkedList<>();
+ private final LinkedList<FileChunk> missingChunks = new LinkedList<>();
/**
* Chunks that are currently being uploaded or hash-checked
@@ -58,21 +59,26 @@ public class ChunkList
* upload, but periodically update it while the upload is running.
*
* @param sha1Sums list of sums
+ * @return lowest index of chunk that didn't have a sha1sum before, -1 if no new ones
*/
- public synchronized void updateSha1Sums( List<byte[]> sha1Sums )
+ public synchronized int updateSha1Sums( List<byte[]> sha1Sums )
{
int index = 0;
+ int firstNew = -1;
for ( byte[] sum : sha1Sums ) {
if ( index >= allChunks.size() )
break;
if ( sum != null ) {
- allChunks.get( index ).setSha1Sum( sum );
+ if ( allChunks.get( index ).setSha1Sum( sum ) && firstNew == -1 ) {
+ firstNew = index;
+ }
if ( !hasChecksum ) {
hasChecksum = true;
}
}
index++;
}
+ return firstNew;
}
/**
@@ -120,13 +126,84 @@ public class ChunkList
if ( missingChunks.isEmpty() )
return null;
}
- FileChunk c = missingChunks.remove( 0 );
+ FileChunk c = missingChunks.removeFirst();
c.setStatus( ChunkStatus.UPLOADING );
pendingChunks.add( c );
return c;
}
/**
+ * Returns true if this list contains a chunk with state MISSING,
+ * which means the chunk doesn't have a sha1 known to exist in
+ * another image.
+ * @return
+ */
+ public synchronized boolean hasLocallyMissingChunk()
+ {
+ return !missingChunks.isEmpty() && missingChunks.peekFirst().status == ChunkStatus.MISSING;
+ }
+
+ /**
+ * Get a chunk that is marked as candidate for copying.
+ * Returns null if none are available.
+ */
+ public synchronized FileChunk getCopyCandidate()
+ {
+ if ( missingChunks.isEmpty() )
+ return null;
+ FileChunk last = missingChunks.removeLast();
+ if ( last.status != ChunkStatus.QUEUED_FOR_COPY ) {
+ // Put back
+ missingChunks.add( last );
+ return null;
+ }
+ // Is a candidate
+ last.setStatus( ChunkStatus.COPYING );
+ pendingChunks.add( last );
+ return last;
+ }
+
+ /**
+ * Mark the given chunks for potential local copying instead of receiving them
+ * from peer.
+ * @param firstNew
+ * @param sources
+ */
+ public synchronized void markLocalCopyCandidates( List<ChunkSource> sources )
+ {
+ for ( ChunkSource src : sources ) {
+ try {
+ if ( src.sourceCandidates.isEmpty() )
+ continue;
+ List<FileChunk> append = null;
+ for ( Iterator<FileChunk> it = missingChunks.iterator(); it.hasNext(); ) {
+ FileChunk chunk = it.next();
+ if ( !Arrays.equals( chunk.sha1sum, src.sha1sum ) )
+ continue;
+ if ( chunk.status == ChunkStatus.QUEUED_FOR_COPY )
+ continue;
+ // Bingo
+ if ( append == null ) {
+ append = new ArrayList<>( 20 );
+ }
+ it.remove();
+ chunk.setStatus( ChunkStatus.QUEUED_FOR_COPY );
+ chunk.setSource( src );
+ append.add( chunk );
+ }
+ if ( append != null ) {
+ // Move all the chunks queued for copying to the end of the list, so when
+ // we getMissing() a chunk for upload from client, these ones would only
+ // come last, in case reading from storage and writing back is really slow
+ missingChunks.addAll( append );
+ }
+ } catch ( Exception e ) {
+ LOGGER.warn( "chunk clone list if messed up", e );
+ }
+ }
+ }
+
+ /**
* Get the block status as byte representation.
*/
public synchronized ByteBuffer getStatusArray()
@@ -235,7 +312,7 @@ public class ChunkList
}
// Add as first element so it will be re-transmitted immediately
c.setStatus( ChunkStatus.MISSING );
- missingChunks.add( 0, c );
+ missingChunks.addFirst( c );
this.notifyAll();
return c.incFailed();
}
diff --git a/src/main/java/org/openslx/filetransfer/util/FileChunk.java b/src/main/java/org/openslx/filetransfer/util/FileChunk.java
index 6450af2..f302b3c 100644
--- a/src/main/java/org/openslx/filetransfer/util/FileChunk.java
+++ b/src/main/java/org/openslx/filetransfer/util/FileChunk.java
@@ -4,11 +4,15 @@ import java.util.Iterator;
import java.util.List;
import java.util.zip.CRC32;
+import org.apache.log4j.Logger;
import org.openslx.filetransfer.FileRange;
+import org.openslx.filetransfer.LocalChunkSource.ChunkSource;
public class FileChunk
{
+ private static final Logger LOGGER = Logger.getLogger( FileChunk.class );
+
/**
* Length in bytes of binary sha1 representation
*/
@@ -22,6 +26,7 @@ public class FileChunk
protected CRC32 crc32;
protected ChunkStatus status = ChunkStatus.MISSING;
private boolean writtenToDisk = false;
+ private ChunkSource localSource = null;
public FileChunk( long startOffset, long endOffset, byte[] sha1sum )
{
@@ -33,14 +38,15 @@ public class FileChunk
}
}
- synchronized void setSha1Sum( byte[] sha1sum )
+ synchronized boolean setSha1Sum( byte[] sha1sum )
{
if ( this.sha1sum != null || sha1sum == null || sha1sum.length != SHA1_LENGTH )
- return;
+ return false;
this.sha1sum = sha1sum;
if ( this.status == ChunkStatus.COMPLETE ) {
this.status = ChunkStatus.HASHING;
}
+ return true;
}
/**
@@ -79,19 +85,29 @@ public class FileChunk
{
// As this is usually called before we validated the sha1, handle the case where
// this gets called multiple times and only remember the last result
+ long old = Long.MAX_VALUE;
if ( crc32 == null ) {
crc32 = new CRC32();
} else {
+ LOGGER.info( "Redoing CRC32 of Chunk " + getChunkIndex() );
+ old = crc32.getValue();
crc32.reset();
}
- int chunkLength = range.getLength();
- crc32.update( data, 0, chunkLength );
- if ( ( chunkLength % 4096 ) != 0 ) {
+ int expectedLength = range.getLength();
+ if ( expectedLength > data.length ) {
+ LOGGER.error( "Chunk #" + getChunkIndex() + ": " + data.length + " instead of " + expectedLength + " for " + getChunkIndex() );
+ }
+ crc32.update( data, 0, expectedLength );
+ if ( ( expectedLength % 4096 ) != 0 ) {
// DNBD3 virtually pads all images to be a multiple of 4KiB in size,
// so simulate that here too
- byte[] padding = new byte[ 4096 - ( chunkLength % 4096 ) ];
+ LOGGER.debug( "Block " + getChunkIndex() + " not multiple of 4k." );
+ byte[] padding = new byte[ 4096 - ( expectedLength % 4096 ) ];
crc32.update( padding );
}
+ if ( old != Long.MAX_VALUE && old != crc32.getValue() ) {
+ LOGGER.warn( String.format( "Changed from %x to %x", old, crc32.getValue() ) );
+ }
}
public synchronized void getCrc32Le( byte[] buffer, int offset )
@@ -119,7 +135,7 @@ public class FileChunk
if ( status != null ) {
if ( status == ChunkStatus.COMPLETE ) {
this.writtenToDisk = true;
- } else if ( status == ChunkStatus.MISSING ) {
+ } else if ( status == ChunkStatus.MISSING || status == ChunkStatus.QUEUED_FOR_COPY ) {
this.writtenToDisk = false;
}
this.status = status;
@@ -161,4 +177,15 @@ public class FileChunk
{
return failCount;
}
+
+ public void setSource( ChunkSource src )
+ {
+ this.localSource = src;
+ }
+
+ public ChunkSource getSources()
+ {
+ return this.localSource;
+ }
+
}
diff --git a/src/main/java/org/openslx/filetransfer/util/HashChecker.java b/src/main/java/org/openslx/filetransfer/util/HashChecker.java
index 273bc7e..2c404db 100644
--- a/src/main/java/org/openslx/filetransfer/util/HashChecker.java
+++ b/src/main/java/org/openslx/filetransfer/util/HashChecker.java
@@ -7,6 +7,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
import org.apache.log4j.Logger;
@@ -24,7 +25,9 @@ public class HashChecker
private final String algorithm;
- private volatile boolean invalid = false;
+ private boolean invalid = false;
+
+ private final int queueCapacity;
public HashChecker( String algorithm ) throws NoSuchAlgorithmException
{
@@ -34,6 +37,7 @@ public class HashChecker
public HashChecker( String algorithm, int queueLen ) throws NoSuchAlgorithmException
{
this.algorithm = algorithm;
+ this.queueCapacity = queueLen;
this.queue = new LinkedBlockingQueue<>( queueLen );
CheckThread thread = new CheckThread( false );
thread.start();
@@ -44,6 +48,7 @@ public class HashChecker
{
synchronized ( threads ) {
threads.remove( thread );
+ LOGGER.debug( "Check threads: " + threads.size() );
if ( thread.extraThread )
return;
invalid = true;
@@ -109,6 +114,7 @@ public class HashChecker
CheckThread thread = new CheckThread( true );
thread.start();
threads.add( thread );
+ LOGGER.debug( "Check threads: " + threads.size() );
} catch ( Exception e ) {
LOGGER.warn( "Could not create additional hash checking thread", e );
}
@@ -127,6 +133,19 @@ public class HashChecker
return true;
}
+ /**
+ * Get number of chunks currently waiting for a worker thread.
+ */
+ public int getQueueFill()
+ {
+ return queue.size();
+ }
+
+ public int getQueueCapacity()
+ {
+ return queueCapacity;
+ }
+
// ############################################################# \\
private class CheckThread extends Thread
@@ -156,16 +175,23 @@ public class HashChecker
HashTask task;
// Wait for work
try {
- task = queue.take();
- if ( task == null )
- continue;
+ if ( extraThread ) {
+ task = queue.poll( 30, TimeUnit.SECONDS );
+ if ( task == null ) {
+ break;
+ }
+ } else {
+ task = queue.take();
+ if ( task == null )
+ continue;
+ }
} catch ( InterruptedException e ) {
LOGGER.info( "Interrupted while waiting for hash task", e );
break;
}
HashResult result = HashResult.NONE;
if ( task.doHash ) {
- // Calculate digest
+ // Calculate digest
md.update( task.data, 0, task.chunk.range.getLength() );
byte[] digest = md.digest();
result = Arrays.equals( digest, task.chunk.getSha1Sum() ) ? HashResult.VALID : HashResult.INVALID;
@@ -175,14 +201,9 @@ public class HashChecker
task.chunk.calculateDnbd3Crc32( task.data );
}
execCallback( task, result );
- if ( extraThread && queue.isEmpty() ) {
- break;
- }
}
- if ( extraThread ) {
- LOGGER.info( "Stopped additional hash checker" );
- } else {
- LOGGER.info( "Stopped MAIN hash checker" );
+ if ( !extraThread ) {
+ LOGGER.warn( "Stopped MAIN hash checker" );
}
threadFailed( this );
}
diff --git a/src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java b/src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java
index 4fe6d88..c2a9443 100644
--- a/src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java
+++ b/src/main/java/org/openslx/filetransfer/util/IncomingTransferBase.java
@@ -9,6 +9,7 @@ import java.nio.ByteBuffer;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ExecutorService;
@@ -19,6 +20,8 @@ import org.openslx.bwlp.thrift.iface.TransferStatus;
import org.openslx.filetransfer.DataReceivedCallback;
import org.openslx.filetransfer.Downloader;
import org.openslx.filetransfer.FileRange;
+import org.openslx.filetransfer.LocalChunkSource;
+import org.openslx.filetransfer.LocalChunkSource.ChunkSource;
import org.openslx.filetransfer.WantRangeCallback;
import org.openslx.filetransfer.util.HashChecker.HashCheckCallback;
import org.openslx.filetransfer.util.HashChecker.HashResult;
@@ -57,17 +60,32 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
*/
private boolean fileWritable = true;
+ /**
+ * Called for getting local sources for certain chunks by checksum
+ */
+ private final LocalChunkSource localChunkSource;
+
+ /**
+ * Non-null if local copying is requested
+ */
+ private final LocalCopyManager localCopyManager;
+
static {
long maxMem = Runtime.getRuntime().maxMemory();
if ( maxMem == Long.MAX_VALUE ) {
- maxMem = 512;
+ LOGGER.warn( "Cannot determine maximum JVM memory -- assuming 1GB -- this might not be safe" );
+ maxMem = 1024;
+ } else {
+ maxMem /= ( 1024 * 1024 );
}
- int hashQueueLen = (int) ( maxMem / 100 );
+ final int maxLen = Math.max( 6, Runtime.getRuntime().availableProcessors() );
+ int hashQueueLen = (int) ( maxMem / 150 );
if ( hashQueueLen < 1 ) {
hashQueueLen = 1;
- } else if ( hashQueueLen > 6 ) {
- hashQueueLen = 6;
+ } else if ( hashQueueLen > maxLen ) {
+ hashQueueLen = maxLen;
}
+ LOGGER.debug( "Queue length: " + hashQueueLen );
HashChecker hc;
try {
hc = new HashChecker( "SHA-1", hashQueueLen );
@@ -79,11 +97,12 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
/*_*/
- public IncomingTransferBase( String transferId, File absFilePath, long fileSize, List<byte[]> blockHashes )
+ public IncomingTransferBase( String transferId, File absFilePath, long fileSize, List<byte[]> blockHashes, LocalChunkSource localChunkSource )
throws FileNotFoundException
{
super( transferId );
this.fileSize = fileSize;
+ this.localChunkSource = localChunkSource;
// Prepare path
tmpFileName = absFilePath;
tmpFileName.getParentFile().mkdirs();
@@ -96,6 +115,13 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
LOGGER.debug( "File " + tmpFileName + " is too long and could not be truncated" );
}
chunks = new ChunkList( fileSize, blockHashes );
+ if ( this.localChunkSource != null ) {
+ this.localCopyManager = new LocalCopyManager( this, this.chunks );
+ this.localCopyManager.start();
+ checkLocalCopyCandidates( blockHashes, 0 );
+ } else {
+ this.localCopyManager = null;
+ }
}
@Override
@@ -116,6 +142,9 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
}
}
potentialFinishTime.set( 0 );
+ if ( localCopyManager != null ) {
+ localCopyManager.interrupt();
+ }
safeClose( tmpFileHandle );
}
@@ -191,14 +220,16 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
LOGGER.debug( this.getId() + ": Rejecting null block hash list" );
return;
}
- chunks.updateSha1Sums( hashList );
+ int firstNew = chunks.updateSha1Sums( hashList );
+ // No hash checker? Neither hashing nor server side dedup will make sense
if ( hashChecker == null )
return;
+ // Check hashes of completed blocks
for ( int cnt = 0; cnt < 3; ++cnt ) {
FileChunk chunk = chunks.getUnhashedComplete();
if ( chunk == null )
break;
- byte[] data;
+ byte[] data = null;
try {
data = loadChunkFromFile( chunk );
} catch ( EOFException e1 ) {
@@ -227,6 +258,33 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
return;
}
}
+ // See if we have any candidates for local copy
+ checkLocalCopyCandidates( hashList, firstNew );
+ }
+
+ private void checkLocalCopyCandidates( List<byte[]> hashList, int firstNew )
+ {
+ if ( localChunkSource == null || hashList == null || hashList.isEmpty() )
+ return;
+ List<byte[]> sums;
+ if ( firstNew == 0 ) {
+ sums = hashList;
+ } else {
+ sums = hashList.subList( firstNew, hashList.size() );
+ }
+ if ( sums == null )
+ return;
+ sums = Collections.unmodifiableList( sums );
+ List<ChunkSource> sources = null;
+ try {
+ sources = localChunkSource.getCloneSources( sums );
+ } catch ( Exception e ) {
+ LOGGER.warn( "Could not get chunk sources", e );
+ }
+ if ( sources != null && !sources.isEmpty() ) {
+ chunks.markLocalCopyCandidates( sources );
+ }
+ localCopyManager.trigger();
}
private byte[] loadChunkFromFile( FileChunk chunk ) throws EOFException
@@ -288,22 +346,14 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
{
boolean needNewBuffer = false;
if ( currentChunk != null ) {
- needNewBuffer = chunkReceived( currentChunk, buffer );
- if ( hashChecker != null && currentChunk.getSha1Sum() != null ) {
- try {
- hashChecker.queue( currentChunk, buffer, IncomingTransferBase.this, HashChecker.BLOCKING | HashChecker.CALC_HASH );
+ try {
+ if ( chunkReceivedInternal( currentChunk, buffer ) ) {
needNewBuffer = true;
- } catch ( InterruptedException e ) {
- chunks.markCompleted( currentChunk, false );
- currentChunk = null;
- Thread.currentThread().interrupt();
- return null;
}
- } else {
- // We have no hash checker or the hash for the current chunk is unknown - flush to disk
- writeFileData( currentChunk.range.startOffset, currentChunk.range.getLength(), buffer );
- chunks.markCompleted( currentChunk, false );
- chunkStatusChanged( currentChunk );
+ } catch ( InterruptedException e3 ) {
+ LOGGER.info( "Downloader was interrupted when trying to hash" );
+ currentChunk = null;
+ return null;
}
if ( needNewBuffer ) {
try {
@@ -357,6 +407,42 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
}
}
+ /**
+ *
+ * @param currentChunk
+ * @param buffer
+ * @return true if buffer is used internally and should not be modified in the future, false if
+ * reuse is safe
+ * @throws InterruptedException
+ */
+ final boolean chunkReceivedInternal( FileChunk currentChunk, byte[] buffer ) throws InterruptedException
+ {
+ boolean needNewBuffer = false;
+ try {
+ needNewBuffer = chunkReceived( currentChunk, buffer );
+ } catch (Exception e) {
+ LOGGER.warn( "Callback chunkReceived caused exception", e );
+ needNewBuffer = true; // To be on the safe side
+ }
+ InterruptedException passEx = null;
+ if ( hashChecker != null && currentChunk.getSha1Sum() != null ) {
+ try {
+ hashChecker.queue( currentChunk, buffer, IncomingTransferBase.this, HashChecker.BLOCKING | HashChecker.CALC_HASH );
+ return true;
+ } catch ( InterruptedException e ) {
+ passEx = e;
+ }
+ }
+ // We have no hash checker, or hasher rejected block,
+ // or the hash for the current chunk is unknown - flush to disk
+ writeFileData( currentChunk.range.startOffset, currentChunk.range.getLength(), buffer );
+ chunks.markCompleted( currentChunk, false );
+ chunkStatusChanged( currentChunk );
+ if ( passEx != null )
+ throw passEx;
+ return needNewBuffer;
+ }
+
public boolean addConnection( final Downloader connection, ExecutorService pool )
{
if ( state == TransferState.FINISHED ) {
@@ -384,6 +470,12 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
// If the download failed and we have a current chunk, put it back into
// the queue, so it will be handled again later...
chunks.markFailed( cbh.currentChunk );
+ // Possibly queue for local copy
+ if ( localCopyManager != null && cbh.currentChunk.sha1sum != null ) {
+ List<byte[]> lst = new ArrayList<>( 1 );
+ lst.add( cbh.currentChunk.sha1sum );
+ checkLocalCopyCandidates( lst, 0 );
+ }
chunkStatusChanged( cbh.currentChunk );
}
LOGGER.debug( "Connection for " + getTmpFileName().getAbsolutePath() + " dropped" );
@@ -399,6 +491,9 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
} else {
// Keep pumping unhashed chunks into the hasher
queueUnhashedChunk( true );
+ if ( localCopyManager != null ) {
+ localCopyManager.trigger();
+ }
}
}
} );
@@ -494,9 +589,15 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
chunks.markFailed( chunk );
chunkStatusChanged( chunk );
break;
+ case NONE:
+ LOGGER.warn( "Got hashCheckDone with result NONE" );
+ break;
}
// A block finished, see if we can queue a new one
queueUnhashedChunk( false );
+ if ( localCopyManager != null ) {
+ localCopyManager.trigger();
+ }
}
/**
@@ -537,12 +638,15 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
}
}
- private synchronized void finishUploadInternal()
+ final synchronized void finishUploadInternal()
{
if ( state == TransferState.FINISHED ) {
return;
}
safeClose( tmpFileHandle );
+ if ( localCopyManager != null ) {
+ localCopyManager.interrupt();
+ }
if ( state != TransferState.WORKING ) {
state = TransferState.ERROR;
} else {
@@ -585,4 +689,16 @@ public abstract class IncomingTransferBase extends AbstractTransfer implements H
return false;
}
+ public boolean isServerSideCopyingEnabled()
+ {
+ return localCopyManager != null && !localCopyManager.isPaused();
+ }
+
+ public void enableServerSideCopying( boolean serverSideCopying )
+ {
+ if ( localCopyManager != null ) {
+ localCopyManager.setPaused( !serverSideCopying );
+ }
+ }
+
}
diff --git a/src/main/java/org/openslx/filetransfer/util/LocalCopyManager.java b/src/main/java/org/openslx/filetransfer/util/LocalCopyManager.java
new file mode 100644
index 0000000..8943524
--- /dev/null
+++ b/src/main/java/org/openslx/filetransfer/util/LocalCopyManager.java
@@ -0,0 +1,208 @@
+package org.openslx.filetransfer.util;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.log4j.Logger;
+import org.openslx.filetransfer.LocalChunkSource.ChunkSource;
+import org.openslx.filetransfer.LocalChunkSource.SourceFile;
+import org.openslx.util.Util;
+
+public class LocalCopyManager extends Thread
+{
+
+ private static final Logger LOGGER = Logger.getLogger( LocalCopyManager.class );
+
+ private FileChunk currentChunk = null;
+
+ private final ChunkList chunkList;
+
+ private final IncomingTransferBase transfer;
+
+ private final Map<String, RandomAccessFile> sources = new HashMap<>();
+
+ private Semaphore hasWork = new Semaphore( 0 );
+
+ private AtomicInteger copyCount = new AtomicInteger();
+
+ private boolean paused = true;
+
+ public LocalCopyManager( IncomingTransferBase transfer, ChunkList list )
+ {
+ super( "LocalCopyManager" );
+ this.transfer = transfer;
+ this.chunkList = list;
+ }
+
+ /**
+ * Trigger copying of another block if possible
+ */
+ public synchronized void trigger()
+ {
+ if ( this.paused )
+ return;
+ if ( !isAlive() ) {
+ LOGGER.warn( "Cannot be triggered when Thread is not running." );
+ if ( currentChunk != null ) {
+ chunkList.markFailed( currentChunk );
+ currentChunk = null;
+ }
+ return;
+ }
+ if ( currentChunk == null ) {
+ currentChunk = chunkList.getCopyCandidate();
+ hasWork.release();
+ }
+ }
+
+ @Override
+ public void run()
+ {
+ try {
+ while ( !interrupted() ) {
+ while ( currentChunk != null ) {
+ hasWork.drainPermits();
+ copyChunk();
+ }
+ if ( !hasWork.tryAcquire( 10, TimeUnit.SECONDS ) ) {
+ if ( chunkList.isComplete() ) {
+ transfer.finishUploadInternal();
+ break;
+ } else if ( !transfer.isActive() ) {
+ break;
+ } else {
+ trigger();
+ }
+ }
+ }
+ } catch ( InterruptedException | IllegalStateException e ) {
+ interrupt();
+ }
+ synchronized ( this ) {
+ if ( currentChunk != null ) {
+ LOGGER.warn( "Still had a chunk when thread was interrupted." );
+ chunkList.markFailed( currentChunk );
+ currentChunk = null;
+ }
+ }
+ for ( RandomAccessFile file : sources.values() ) {
+ Util.safeClose( file );
+ }
+ LOGGER.debug( "My work here is done. Copied " + copyCount.get() + " chunks from " + sources.size() + " files." );
+ }
+
+ private void copyChunk() throws InterruptedException
+ {
+ ChunkSource source = currentChunk.getSources();
+ if ( source != null ) {
+ // OK
+ for ( ;; ) {
+ // Try every possible source file
+ SourceFile sourceFile = getOpenFile( source, currentChunk.range.getLength() );
+ if ( sourceFile == null ) {
+ // Was marked as having a source file, but now we got null -- most likely
+ // the source file doesn't exist or isn't readable
+ LOGGER.warn( "No open file for local copying!" );
+ break;
+ }
+ // OK
+ RandomAccessFile raf = sources.get( sourceFile.fileName );
+ byte[] buffer;
+ try {
+ raf.seek( sourceFile.offset );
+ // In order not to hinder (fast) upload of unknown blocks, throttle
+ // local copying as long as chunks are missing - do before allocating buffer
+ // so we don't hold allocated unused memory for no reason, but the seek has
+ // been done so we know the file handle is not goofed up
+ if ( chunkList.hasLocallyMissingChunk() ) {
+ int delay;
+ HashChecker hc = transfer.getHashChecker();
+ if ( hc == null ) {
+ delay = 50;
+ } else {
+ delay = ( hc.getQueueFill() * 500 ) / hc.getQueueCapacity();
+ }
+ Thread.sleep( delay );
+ }
+ buffer = new byte[ sourceFile.chunkSize ];
+ raf.readFully( buffer );
+ } catch ( InterruptedException e ) {
+ throw e;
+ } catch ( Exception e ) {
+ LOGGER.warn( "Could not read chunk to replicate from " + sourceFile.fileName, e );
+ buffer = null;
+ if ( e instanceof IOException ) {
+ // Mark file as messed up
+ sources.put( sourceFile.fileName, null );
+ }
+ }
+ if ( buffer != null ) {
+ // All is well, read chunk locally, pass on
+ transfer.chunkReceivedInternal( currentChunk, buffer );
+ synchronized ( this ) {
+ currentChunk = null;
+ }
+ copyCount.incrementAndGet();
+ trigger();
+ return;
+ }
+ // Reaching here means failure
+ // We'll keep looping as long as there are source files available
+ }
+ // End of loop over source files
+ }
+ // FAILED
+ LOGGER.info( "Local copying failed, queueing for normal upload..." );
+ synchronized ( this ) {
+ chunkList.markFailed( currentChunk );
+ currentChunk = null;
+ }
+ }
+
+ private SourceFile getOpenFile( ChunkSource source, int requiredSize )
+ {
+ for ( SourceFile candidate : source.sourceCandidates ) {
+ if ( sources.get( candidate.fileName ) != null )
+ return candidate;
+ }
+ // Have to open
+ for ( SourceFile candidate : source.sourceCandidates ) {
+ if ( sources.containsKey( candidate.fileName ) ) // Maps to null (otherwise upper loop would have returned)
+ continue; // File is broken, don't use
+ if ( candidate.chunkSize != requiredSize )
+ continue;
+ File f = new File( candidate.fileName );
+ if ( !f.exists() ) {
+ sources.put( candidate.fileName, null ); // Mark for future
+ continue;
+ }
+ try {
+ RandomAccessFile raf = new RandomAccessFile( f, "r" );
+ sources.put( candidate.fileName, raf );
+ return candidate;
+ } catch ( Exception e ) {
+ LOGGER.info( "Cannot open " + candidate.fileName, e );
+ sources.put( candidate.fileName, null ); // Mark for future
+ }
+ }
+ // Nothing worked
+ return null;
+ }
+
+ public boolean isPaused()
+ {
+ return paused;
+ }
+
+ public void setPaused( boolean paused )
+ {
+ this.paused = paused;
+ }
+
+}
diff --git a/src/main/java/org/openslx/sat/thrift/version/Feature.java b/src/main/java/org/openslx/sat/thrift/version/Feature.java
index e359e3c..02acae6 100644
--- a/src/main/java/org/openslx/sat/thrift/version/Feature.java
+++ b/src/main/java/org/openslx/sat/thrift/version/Feature.java
@@ -20,4 +20,10 @@ public enum Feature {
* multiple components needs to be compatible to fully support them.
*/
MULTIPLE_HYPERVISORS,
+
+ /**
+ * Server supports copying existing blocks server side.
+ */
+ SERVER_SIDE_COPY,
+
}
diff --git a/src/main/thrift/bwlp.thrift b/src/main/thrift/bwlp.thrift
index 103f8df..4f4f4b1 100644
--- a/src/main/thrift/bwlp.thrift
+++ b/src/main/thrift/bwlp.thrift
@@ -352,6 +352,10 @@ struct TransferStatus {
2: TransferState state,
}
+struct UploadOptions {
+ 1: bool serverSideCopying,
+}
+
struct SatelliteConfig {
// Get number of items returned per page (for calls that have a page parameter)
1: i32 pageSize,
@@ -440,9 +444,13 @@ service SatelliteServer {
throws (1:TTransferRejectedException rejection, 2:TAuthorizationException authError, 3:TInvocationException ffff, 4:TNotFoundException sdf),
// Client updates block hashes of an upload
- void updateBlockHashes(1: Token uploadToken, 2: list<binary> blockHashes)
+ void updateBlockHashes(1: Token uploadToken, 2: list<binary> blockHashes, 3: Token userToken)
throws (1:TInvalidTokenException ex1),
+ // Change settings for a specific upload
+ UploadOptions setUploadOptions(1: Token userToken, 2: Token uploadToken, 3: UploadOptions options)
+ throws (1:TAuthorizationException frootloops, 2:TInvalidTokenException imcyborgbutthatsok),
+
// Client cancels an upload
void cancelUpload(1: Token uploadToken)
throws (1:TInvalidTokenException ex1),