forked from Qortal/qortal
Major upgrade of arbitrary data functionality, to support on-chain data for small payloads.
Max size for on-chain data is 239 bytes, due to 16-byte IV. Must be a single file resource, without .qortal folder.
This commit is contained in:
parent
7deb9328fa
commit
a83e332c11
@ -4,6 +4,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.utils.Base58;
|
||||
@ -15,7 +16,6 @@ import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Arrays.stream;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
@ -85,6 +85,7 @@ public class ArbitraryDataFile {
|
||||
return;
|
||||
}
|
||||
|
||||
this.chunks = new ArrayList<>();
|
||||
this.hash58 = Base58.encode(Crypto.digest(fileContent));
|
||||
this.signature = signature;
|
||||
LOGGER.trace(String.format("File digest: %s, size: %d bytes", this.hash58, fileContent.length));
|
||||
@ -111,6 +112,41 @@ public class ArbitraryDataFile {
|
||||
return ArbitraryDataFile.fromHash58(Base58.encode(hash), signature);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFile fromRawData(byte[] data, byte[] signature) throws DataException {
|
||||
if (data == null) {
|
||||
return null;
|
||||
}
|
||||
return new ArbitraryDataFile(data, signature);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFile fromTransactionData(ArbitraryTransactionData transactionData) throws DataException {
|
||||
ArbitraryDataFile arbitraryDataFile = null;
|
||||
byte[] signature = transactionData.getSignature();
|
||||
byte[] data = transactionData.getData();
|
||||
|
||||
if (data == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Create data file
|
||||
switch (transactionData.getDataType()) {
|
||||
case DATA_HASH:
|
||||
arbitraryDataFile = ArbitraryDataFile.fromHash(data, signature);
|
||||
break;
|
||||
|
||||
case RAW_DATA:
|
||||
arbitraryDataFile = ArbitraryDataFile.fromRawData(data, signature);
|
||||
break;
|
||||
}
|
||||
|
||||
// Set metadata hash
|
||||
if (arbitraryDataFile != null) {
|
||||
arbitraryDataFile.setMetadataHash(transactionData.getMetadataHash());
|
||||
}
|
||||
|
||||
return arbitraryDataFile;
|
||||
}
|
||||
|
||||
public static ArbitraryDataFile fromPath(Path path, byte[] signature) {
|
||||
if (path == null) {
|
||||
return null;
|
||||
@ -260,6 +296,11 @@ public class ArbitraryDataFile {
|
||||
this.chunks = new ArrayList<>();
|
||||
|
||||
if (file != null) {
|
||||
if (file.exists() && file.length() <= chunkSize) {
|
||||
// No need to split into chunks if we're already below the chunk size
|
||||
return 0;
|
||||
}
|
||||
|
||||
try (FileInputStream fileInputStream = new FileInputStream(file);
|
||||
BufferedInputStream bis = new BufferedInputStream(fileInputStream)) {
|
||||
|
||||
|
@ -362,11 +362,6 @@ public class ArbitraryDataReader {
|
||||
throw new DataException(String.format("Transaction data not found for signature %s", this.resourceId));
|
||||
}
|
||||
|
||||
// Load hashes
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
// Load secret
|
||||
byte[] secret = transactionData.getSecret();
|
||||
if (secret != null) {
|
||||
@ -374,16 +369,14 @@ public class ArbitraryDataReader {
|
||||
}
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
ArbitraryTransactionUtils.checkAndRelocateMiscFiles(transactionData);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
|
||||
if (!arbitraryDataFile.allFilesExist()) {
|
||||
if (ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName())) {
|
||||
throw new DataException(
|
||||
String.format("Unable to request missing data for file %s because the name is blocked", arbitraryDataFile));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// Ask the arbitrary data manager to fetch data for this transaction
|
||||
String message;
|
||||
if (this.canRequestMissingFiles) {
|
||||
@ -394,8 +387,7 @@ public class ArbitraryDataReader {
|
||||
} else {
|
||||
message = String.format("Unable to reissue request for missing file %s for signature %s due to rate limit. Please try again later.", arbitraryDataFile, Base58.encode(transactionData.getSignature()));
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
message = String.format("Missing data for file %s", arbitraryDataFile);
|
||||
}
|
||||
|
||||
@ -405,21 +397,25 @@ public class ArbitraryDataReader {
|
||||
}
|
||||
}
|
||||
|
||||
if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) {
|
||||
// We have all the chunks but not the complete file, so join them
|
||||
arbitraryDataFile.join();
|
||||
// Data hashes need some extra processing
|
||||
if (transactionData.getDataType() == DataType.DATA_HASH) {
|
||||
if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) {
|
||||
// We have all the chunks but not the complete file, so join them
|
||||
arbitraryDataFile.join();
|
||||
}
|
||||
|
||||
// If the complete file still doesn't exist then something went wrong
|
||||
if (!arbitraryDataFile.exists()) {
|
||||
throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile));
|
||||
}
|
||||
// Ensure the complete hash matches the joined chunks
|
||||
if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) {
|
||||
// Delete the invalid file
|
||||
arbitraryDataFile.delete();
|
||||
throw new DataException("Unable to validate complete file hash");
|
||||
}
|
||||
}
|
||||
|
||||
// If the complete file still doesn't exist then something went wrong
|
||||
if (!arbitraryDataFile.exists()) {
|
||||
throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile));
|
||||
}
|
||||
// Ensure the complete hash matches the joined chunks
|
||||
if (!Arrays.equals(arbitraryDataFile.digest(), digest)) {
|
||||
// Delete the invalid file
|
||||
arbitraryDataFile.delete();
|
||||
throw new DataException("Unable to validate complete file hash");
|
||||
}
|
||||
// Ensure the file's size matches the size reported by the transaction (throws a DataException if not)
|
||||
arbitraryDataFile.validateFileSize(transactionData.getSize());
|
||||
|
||||
|
@ -150,11 +150,7 @@ public class ArbitraryDataResource {
|
||||
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
|
||||
|
||||
for (ArbitraryTransactionData transactionData : transactionDataList) {
|
||||
byte[] hash = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// Delete any chunks or complete files from each transaction
|
||||
arbitraryDataFile.deleteAll(deleteMetadata);
|
||||
|
@ -9,6 +9,7 @@ import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch;
|
||||
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
|
||||
import org.qortal.arbitrary.misc.Category;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.crypto.AES;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.PaymentData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
@ -181,6 +182,7 @@ public class ArbitraryDataTransactionBuilder {
|
||||
for (ModifiedPath path : metadata.getModifiedPaths()) {
|
||||
if (path.getDiffType() != DiffType.COMPLETE_FILE) {
|
||||
atLeastOnePatch = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -229,10 +231,12 @@ public class ArbitraryDataTransactionBuilder {
|
||||
random.nextBytes(lastReference);
|
||||
}
|
||||
|
||||
Compression compression = Compression.ZIP;
|
||||
// Single file resources are handled differently, especially for very small data payloads, as these go on chain
|
||||
final boolean isSingleFileResource = FilesystemUtils.isSingleFileResource(path, false);
|
||||
final boolean shouldUseOnChainData = (isSingleFileResource && AES.getEncryptedFileSize(FilesystemUtils.getSingleFileContents(path).length) <= ArbitraryTransaction.MAX_DATA_SIZE);
|
||||
|
||||
// FUTURE? Use zip compression for directories, or no compression for single files
|
||||
// Compression compression = (path.toFile().isDirectory()) ? Compression.ZIP : Compression.NONE;
|
||||
// Use zip compression if data isn't going on chain
|
||||
Compression compression = shouldUseOnChainData ? Compression.NONE : Compression.ZIP;
|
||||
|
||||
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(path, name, service, identifier, method,
|
||||
compression, title, description, tags, category);
|
||||
@ -250,16 +254,21 @@ public class ArbitraryDataTransactionBuilder {
|
||||
throw new DataException("Arbitrary data file is null");
|
||||
}
|
||||
|
||||
// Get chunks metadata file
|
||||
// Get metadata file
|
||||
ArbitraryDataFile metadataFile = arbitraryDataFile.getMetadataFile();
|
||||
if (metadataFile == null && arbitraryDataFile.chunkCount() > 1) {
|
||||
throw new DataException(String.format("Chunks metadata data file is null but there are %d chunks", arbitraryDataFile.chunkCount()));
|
||||
}
|
||||
|
||||
String digest58 = arbitraryDataFile.digest58();
|
||||
if (digest58 == null) {
|
||||
LOGGER.error("Unable to calculate file digest");
|
||||
throw new DataException("Unable to calculate file digest");
|
||||
// Default to using a data hash, with data held off-chain
|
||||
ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
|
||||
byte[] data = arbitraryDataFile.digest();
|
||||
|
||||
// For small, single-chunk resources, we can store the data directly on chain
|
||||
if (shouldUseOnChainData && arbitraryDataFile.getBytes().length <= ArbitraryTransaction.MAX_DATA_SIZE && arbitraryDataFile.chunkCount() == 0) {
|
||||
// Within allowed on-chain data size
|
||||
dataType = DataType.RAW_DATA;
|
||||
data = arbitraryDataFile.getBytes();
|
||||
}
|
||||
|
||||
final BaseTransactionData baseTransactionData = new BaseTransactionData(now, Group.NO_GROUP,
|
||||
@ -268,22 +277,21 @@ public class ArbitraryDataTransactionBuilder {
|
||||
final int version = 5;
|
||||
final int nonce = 0;
|
||||
byte[] secret = arbitraryDataFile.getSecret();
|
||||
final ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
|
||||
final byte[] digest = arbitraryDataFile.digest();
|
||||
|
||||
final byte[] metadataHash = (metadataFile != null) ? metadataFile.getHash() : null;
|
||||
final List<PaymentData> payments = new ArrayList<>();
|
||||
|
||||
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
|
||||
version, service.value, nonce, size, name, identifier, method,
|
||||
secret, compression, digest, dataType, metadataHash, payments);
|
||||
secret, compression, data, dataType, metadataHash, payments);
|
||||
|
||||
this.arbitraryTransactionData = transactionData;
|
||||
|
||||
} catch (DataException e) {
|
||||
} catch (DataException | IOException e) {
|
||||
if (arbitraryDataFile != null) {
|
||||
arbitraryDataFile.deleteAll(true);
|
||||
}
|
||||
throw(e);
|
||||
throw new DataException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -107,10 +107,9 @@ public class ArbitraryDataWriter {
|
||||
private void preExecute() throws DataException {
|
||||
this.checkEnabled();
|
||||
|
||||
// Enforce compression when uploading a directory
|
||||
File file = new File(this.filePath.toString());
|
||||
if (file.isDirectory() && compression == Compression.NONE) {
|
||||
throw new DataException("Unable to upload a directory without compression");
|
||||
// Enforce compression when uploading multiple files
|
||||
if (!FilesystemUtils.isSingleFileResource(this.filePath, false) && compression == Compression.NONE) {
|
||||
throw new DataException("Unable to publish multiple files without compression");
|
||||
}
|
||||
|
||||
// Create temporary working directory
|
||||
@ -168,6 +167,9 @@ public class ArbitraryDataWriter {
|
||||
|
||||
if (this.files.size() == 1) {
|
||||
singleFilePath = Paths.get(this.filePath.toString(), this.files.get(0));
|
||||
|
||||
// Update filePath to point to the single file (instead of the directory containing the file)
|
||||
this.filePath = singleFilePath;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -314,9 +316,6 @@ public class ArbitraryDataWriter {
|
||||
if (chunkCount > 0) {
|
||||
LOGGER.info(String.format("Successfully split into %d chunk%s", chunkCount, (chunkCount == 1 ? "" : "s")));
|
||||
}
|
||||
else {
|
||||
throw new DataException("Unable to split file into chunks");
|
||||
}
|
||||
}
|
||||
|
||||
private void createMetadataFile() throws IOException, DataException {
|
||||
|
@ -258,8 +258,6 @@ public class ArbitraryDataFileListManager {
|
||||
// Lookup file lists by signature (and optionally hashes)
|
||||
|
||||
public boolean fetchArbitraryDataFileList(ArbitraryTransactionData arbitraryTransactionData) {
|
||||
byte[] digest = arbitraryTransactionData.getData();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
String signature58 = Base58.encode(signature);
|
||||
|
||||
@ -286,8 +284,7 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
// Find hashes that we are missing
|
||||
try {
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
missingHashes = arbitraryDataFile.missingHashes();
|
||||
} catch (DataException e) {
|
||||
// Leave missingHashes as null, so that all hashes are requested
|
||||
@ -460,10 +457,9 @@ public class ArbitraryDataFileListManager {
|
||||
|
||||
arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
|
||||
arbitraryDataFile.setMetadataHash(arbitraryTransactionData.getMetadataHash());
|
||||
|
||||
// // Load data file(s)
|
||||
// ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
//
|
||||
// // Check all hashes exist
|
||||
// for (byte[] hash : hashes) {
|
||||
// //LOGGER.debug("Received hash {}", Base58.encode(hash));
|
||||
@ -594,12 +590,8 @@ public class ArbitraryDataFileListManager {
|
||||
// Check if we're even allowed to serve data for this transaction
|
||||
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
|
||||
|
||||
byte[] hash = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
|
||||
// Load file(s) and add any that exist to the list of hashes
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// If the peer didn't supply a hash list, we need to return all hashes for this transaction
|
||||
if (requestedHashes == null || requestedHashes.isEmpty()) {
|
||||
|
@ -132,9 +132,7 @@ public class ArbitraryDataFileManager extends Thread {
|
||||
List<byte[]> hashes) throws DataException {
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
boolean receivedAtLeastOneFile = false;
|
||||
|
||||
// Now fetch actual data from this peer
|
||||
|
@ -202,4 +202,12 @@ public class AES {
|
||||
.decode(cipherText)));
|
||||
}
|
||||
|
||||
public static long getEncryptedFileSize(long inFileSize) {
|
||||
// To calculate the resulting file size, add 16 (for the IV), then round up to the nearest multiple of 16
|
||||
final int ivSize = 16;
|
||||
final int chunkSize = 16;
|
||||
final int expectedSize = Math.round((inFileSize + ivSize) / chunkSize) * chunkSize + chunkSize;
|
||||
return expectedSize;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -5,9 +5,7 @@ import org.apache.logging.log4j.Logger;
|
||||
import org.bouncycastle.util.Longs;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceInfo;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceNameInfo;
|
||||
import org.qortal.data.network.ArbitraryPeerData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData.*;
|
||||
import org.qortal.data.transaction.BaseTransactionData;
|
||||
@ -15,6 +13,7 @@ import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.ArbitraryRepository;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.transaction.ArbitraryTransaction;
|
||||
import org.qortal.transaction.Transaction.ApprovalStatus;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
@ -27,8 +26,6 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBArbitraryRepository.class);
|
||||
|
||||
private static final int MAX_RAW_DATA_SIZE = 255; // size of VARBINARY
|
||||
|
||||
protected HSQLDBRepository repository;
|
||||
|
||||
public HSQLDBArbitraryRepository(HSQLDBRepository repository) {
|
||||
@ -55,13 +52,8 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Load hashes
|
||||
byte[] hash = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// Check if we already have the complete data file or all chunks
|
||||
if (arbitraryDataFile.allFilesExist()) {
|
||||
@ -84,13 +76,8 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
return transactionData.getData();
|
||||
}
|
||||
|
||||
// Load hashes
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
|
||||
// Load data file(s)
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// If we have the complete data file, return it
|
||||
if (arbitraryDataFile.exists()) {
|
||||
@ -105,6 +92,7 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
arbitraryDataFile.join();
|
||||
|
||||
// Verify that the combined hash matches the expected hash
|
||||
byte[] digest = transactionData.getData();
|
||||
if (!digest.equals(arbitraryDataFile.digest())) {
|
||||
LOGGER.info(String.format("Hash mismatch for transaction: %s", Base58.encode(signature)));
|
||||
return null;
|
||||
@ -132,11 +120,11 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
}
|
||||
|
||||
// Trivial-sized payloads can remain in raw form
|
||||
if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA && arbitraryTransactionData.getData().length <= MAX_RAW_DATA_SIZE) {
|
||||
if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA && arbitraryTransactionData.getData().length <= ArbitraryTransaction.MAX_DATA_SIZE) {
|
||||
return;
|
||||
}
|
||||
|
||||
throw new IllegalStateException(String.format("Supplied data is larger than maximum size (%d bytes). Please use ArbitraryDataWriter.", MAX_RAW_DATA_SIZE));
|
||||
throw new IllegalStateException(String.format("Supplied data is larger than maximum size (%d bytes). Please use ArbitraryDataWriter.", ArbitraryTransaction.MAX_DATA_SIZE));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -146,14 +134,8 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
|
||||
return;
|
||||
}
|
||||
|
||||
// Load hashes
|
||||
byte[] hash = arbitraryTransactionData.getData();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
|
||||
// Load data file(s)
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
|
||||
// Delete file, chunks, and metadata
|
||||
arbitraryDataFile.deleteAll(true);
|
||||
|
@ -33,7 +33,7 @@ public class ArbitraryTransaction extends Transaction {
|
||||
private ArbitraryTransactionData arbitraryTransactionData;
|
||||
|
||||
// Other useful constants
|
||||
public static final int MAX_DATA_SIZE = 4000;
|
||||
public static final int MAX_DATA_SIZE = 256;
|
||||
public static final int MAX_METADATA_LENGTH = 32;
|
||||
public static final int HASH_LENGTH = TransactionTransformer.SHA256_LENGTH;
|
||||
public static final int MAX_IDENTIFIER_LENGTH = 64;
|
||||
|
@ -110,13 +110,8 @@ public class ArbitraryTransactionUtils {
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
return arbitraryDataFile.allChunksExist();
|
||||
}
|
||||
@ -126,18 +121,13 @@ public class ArbitraryTransactionUtils {
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
if (metadataHash == null) {
|
||||
if (transactionData.getMetadataHash() == null) {
|
||||
// This file doesn't have any metadata/chunks, which means none exist
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
return arbitraryDataFile.anyChunksExist();
|
||||
}
|
||||
@ -147,12 +137,7 @@ public class ArbitraryTransactionUtils {
|
||||
return 0;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
// Find the folder containing the files
|
||||
Path parentPath = arbitraryDataFile.getFilePath().getParent();
|
||||
@ -180,18 +165,13 @@ public class ArbitraryTransactionUtils {
|
||||
return 0;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] metadataHash = transactionData.getMetadataHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
|
||||
if (metadataHash == null) {
|
||||
if (transactionData.getMetadataHash() == null) {
|
||||
// This file doesn't have any metadata, therefore it has a single (complete) chunk
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
|
||||
return arbitraryDataFile.fileCount();
|
||||
}
|
||||
@ -243,31 +223,24 @@ public class ArbitraryTransactionUtils {
|
||||
}
|
||||
|
||||
public static void deleteCompleteFileAndChunks(ArbitraryTransactionData arbitraryTransactionData) throws DataException {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash, signature);
|
||||
arbitraryDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
arbitraryDataFile.deleteAll(true);
|
||||
}
|
||||
|
||||
public static void convertFileToChunks(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) throws DataException {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
|
||||
// Find the expected chunk hashes
|
||||
ArbitraryDataFile expectedDataFile = ArbitraryDataFile.fromHash(completeHash, signature);
|
||||
expectedDataFile.setMetadataHash(metadataHash);
|
||||
ArbitraryDataFile expectedDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
|
||||
if (metadataHash == null || !expectedDataFile.getMetadataFile().exists()) {
|
||||
if (arbitraryTransactionData.getMetadataHash() == null || !expectedDataFile.getMetadataFile().exists()) {
|
||||
// We don't have the metadata file, or this transaction doesn't have one - nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
|
||||
// Split the file into chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash, signature);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
|
||||
int chunkCount = arbitraryDataFile.split(ArbitraryDataFile.CHUNK_SIZE);
|
||||
if (chunkCount > 1) {
|
||||
LOGGER.info(String.format("Successfully split %s into %d chunk%s",
|
||||
|
@ -250,6 +250,39 @@ public class FilesystemUtils {
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* isSingleFileResource
|
||||
* Returns true if the path points to a file, or a
|
||||
* directory containing a single file only.
|
||||
*
|
||||
* @param path to file or directory
|
||||
* @param excludeQortalDirectory - if true, a directory containing a single file and a .qortal directory is considered a single file resource
|
||||
* @return
|
||||
* @throws IOException
|
||||
*/
|
||||
public static boolean isSingleFileResource(Path path, boolean excludeQortalDirectory) {
|
||||
// If the path is a file, read the contents directly
|
||||
if (path.toFile().isFile()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Or if it's a directory, only load file contents if there is a single file inside it
|
||||
else if (path.toFile().isDirectory()) {
|
||||
String[] files = path.toFile().list();
|
||||
if (excludeQortalDirectory) {
|
||||
files = ArrayUtils.removeElement(files, ".qortal");
|
||||
}
|
||||
if (files.length == 1) {
|
||||
Path filePath = Paths.get(path.toString(), files[0]);
|
||||
if (filePath.toFile().isFile()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public static byte[] readFromFile(String filePath, long position, int size) throws IOException {
|
||||
RandomAccessFile file = new RandomAccessFile(filePath, "r");
|
||||
file.seek(position);
|
||||
|
@ -111,8 +111,8 @@ public class ArbitraryDataTests extends Common {
|
||||
fail("Creating transaction should fail due to nonexistent PUT transaction");
|
||||
|
||||
} catch (DataException expectedException) {
|
||||
assertEquals(String.format("Couldn't find PUT transaction for " +
|
||||
"name %s, service %s and identifier ", name, service), expectedException.getMessage());
|
||||
assertTrue(expectedException.getMessage().contains(String.format("Couldn't find PUT transaction for " +
|
||||
"name %s, service %s and identifier ", name, service)));
|
||||
}
|
||||
|
||||
}
|
||||
@ -358,7 +358,7 @@ public class ArbitraryDataTests extends Common {
|
||||
byte[] path1FileDigest = Crypto.digest(path1.toFile());
|
||||
ArbitraryDataDigest path1DirectoryDigest = new ArbitraryDataDigest(path1.getParent());
|
||||
path1DirectoryDigest.compute();
|
||||
ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name, identifier, Method.PUT, service, alice);
|
||||
|
||||
// Now build the latest data state for this name
|
||||
ArbitraryDataReader arbitraryDataReader1 = new ArbitraryDataReader(name, ResourceIdType.NAME, service, identifier);
|
||||
|
@ -0,0 +1,135 @@
|
||||
package org.qortal.test.arbitrary;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.qortal.arbitrary.ArbitraryDataDigest;
|
||||
import org.qortal.crypto.AES;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.test.common.Common;
|
||||
import org.qortal.utils.ZipUtils;
|
||||
|
||||
import javax.crypto.BadPaddingException;
|
||||
import javax.crypto.IllegalBlockSizeException;
|
||||
import javax.crypto.NoSuchPaddingException;
|
||||
import javax.crypto.SecretKey;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.security.InvalidAlgorithmParameterException;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class ArbitraryEncryptionTests extends Common {
|
||||
|
||||
@Before
|
||||
public void beforeTest() throws DataException {
|
||||
Common.useDefaultSettings();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEncryption() throws IOException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException, InvalidKeyException {
|
||||
String enclosingFolderName = "data";
|
||||
Path inputFilePath = Files.createTempFile("inputFile", null);
|
||||
Path outputDirectory = Files.createTempDirectory("outputDirectory");
|
||||
Path outputFilePath = Paths.get(outputDirectory.toString(), enclosingFolderName);
|
||||
inputFilePath.toFile().deleteOnExit();
|
||||
outputDirectory.toFile().deleteOnExit();
|
||||
|
||||
// Write random data to the input file
|
||||
byte[] data = new byte[10];
|
||||
new Random().nextBytes(data);
|
||||
Files.write(inputFilePath, data, StandardOpenOption.CREATE);
|
||||
|
||||
assertTrue(Files.exists(inputFilePath));
|
||||
assertFalse(Files.exists(outputFilePath));
|
||||
|
||||
// Encrypt...
|
||||
String algorithm = "AES/CBC/PKCS5Padding";
|
||||
SecretKey aesKey = AES.generateKey(256);
|
||||
AES.encryptFile(algorithm, aesKey, inputFilePath.toString(), outputFilePath.toString());
|
||||
|
||||
assertTrue(Files.exists(inputFilePath));
|
||||
assertTrue(Files.exists(outputFilePath));
|
||||
|
||||
// Ensure encrypted file's hash differs from the original
|
||||
assertFalse(Arrays.equals(Crypto.digest(inputFilePath.toFile()), Crypto.digest(outputFilePath.toFile())));
|
||||
|
||||
// Create paths for decrypting
|
||||
Path decryptedDirectory = Files.createTempDirectory("decryptedDirectory");
|
||||
Path decryptedFile = Paths.get(decryptedDirectory.toString(), enclosingFolderName, inputFilePath.getFileName().toString());
|
||||
decryptedDirectory.toFile().deleteOnExit();
|
||||
assertFalse(Files.exists(decryptedFile));
|
||||
|
||||
// Now decrypt...
|
||||
AES.decryptFile(algorithm, aesKey, outputFilePath.toString(), decryptedFile.toString());
|
||||
|
||||
// Ensure resulting file exists
|
||||
assertTrue(Files.exists(decryptedFile));
|
||||
|
||||
// And make sure it matches the original input file
|
||||
assertTrue(Arrays.equals(Crypto.digest(inputFilePath.toFile()), Crypto.digest(decryptedFile.toFile())));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEncryptionSizeOverhead() throws IOException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException, InvalidKeyException {
|
||||
for (int size = 1; size < 256; size++) {
|
||||
String enclosingFolderName = "data";
|
||||
Path inputFilePath = Files.createTempFile("inputFile", null);
|
||||
Path outputDirectory = Files.createTempDirectory("outputDirectory");
|
||||
Path outputFilePath = Paths.get(outputDirectory.toString(), enclosingFolderName);
|
||||
inputFilePath.toFile().deleteOnExit();
|
||||
outputDirectory.toFile().deleteOnExit();
|
||||
|
||||
// Write random data to the input file
|
||||
byte[] data = new byte[size];
|
||||
new Random().nextBytes(data);
|
||||
Files.write(inputFilePath, data, StandardOpenOption.CREATE);
|
||||
|
||||
assertTrue(Files.exists(inputFilePath));
|
||||
assertFalse(Files.exists(outputFilePath));
|
||||
|
||||
// Ensure input file is the same size as the data
|
||||
assertEquals(size, inputFilePath.toFile().length());
|
||||
|
||||
// Encrypt...
|
||||
String algorithm = "AES/CBC/PKCS5Padding";
|
||||
SecretKey aesKey = AES.generateKey(256);
|
||||
AES.encryptFile(algorithm, aesKey, inputFilePath.toString(), outputFilePath.toString());
|
||||
|
||||
assertTrue(Files.exists(inputFilePath));
|
||||
assertTrue(Files.exists(outputFilePath));
|
||||
|
||||
final long expectedSize = AES.getEncryptedFileSize(inputFilePath.toFile().length());
|
||||
System.out.println(String.format("Plaintext size: %d bytes, Ciphertext size: %d bytes", inputFilePath.toFile().length(), outputFilePath.toFile().length()));
|
||||
|
||||
// Ensure encryption added a fixed amount of space to the output file
|
||||
assertEquals(expectedSize, outputFilePath.toFile().length());
|
||||
|
||||
// Ensure encrypted file's hash differs from the original
|
||||
assertFalse(Arrays.equals(Crypto.digest(inputFilePath.toFile()), Crypto.digest(outputFilePath.toFile())));
|
||||
|
||||
// Create paths for decrypting
|
||||
Path decryptedDirectory = Files.createTempDirectory("decryptedDirectory");
|
||||
Path decryptedFile = Paths.get(decryptedDirectory.toString(), enclosingFolderName, inputFilePath.getFileName().toString());
|
||||
decryptedDirectory.toFile().deleteOnExit();
|
||||
assertFalse(Files.exists(decryptedFile));
|
||||
|
||||
// Now decrypt...
|
||||
AES.decryptFile(algorithm, aesKey, outputFilePath.toString(), decryptedFile.toString());
|
||||
|
||||
// Ensure resulting file exists
|
||||
assertTrue(Files.exists(decryptedFile));
|
||||
|
||||
// And make sure it matches the original input file
|
||||
assertTrue(Arrays.equals(Crypto.digest(inputFilePath.toFile()), Crypto.digest(decryptedFile.toFile())));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -91,8 +91,8 @@ public class ArbitraryTransactionMetadataTests extends Common {
|
||||
String name = "TEST"; // Can be anything for this test
|
||||
String identifier = null; // Not used for this test
|
||||
Service service = Service.ARBITRARY_DATA;
|
||||
int chunkSize = 1000;
|
||||
int dataLength = 10; // Actual data length will be longer due to encryption
|
||||
int chunkSize = 10000;
|
||||
int dataLength = 1000; // Actual data length will be longer due to encryption
|
||||
|
||||
String title = "Test title";
|
||||
String description = "Test description";
|
||||
@ -142,8 +142,8 @@ public class ArbitraryTransactionMetadataTests extends Common {
|
||||
String name = "TEST"; // Can be anything for this test
|
||||
String identifier = null; // Not used for this test
|
||||
Service service = Service.ARBITRARY_DATA;
|
||||
int chunkSize = 1000;
|
||||
int dataLength = 10; // Actual data length will be longer due to encryption
|
||||
int chunkSize = 10000;
|
||||
int dataLength = 1000; // Actual data length will be longer due to encryption
|
||||
|
||||
String title = "Test title";
|
||||
String description = "Test description";
|
||||
@ -265,7 +265,7 @@ public class ArbitraryTransactionMetadataTests extends Common {
|
||||
Category category = Category.CRYPTOCURRENCY;
|
||||
|
||||
String expectedTitle = "title Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent feugiat "; // 80 chars
|
||||
String expectedDescription = "description Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent feugiat pretium massa, non pulvinar mi pretium id. Ut gravida sapien vitae dui posuere tincidunt. Quisque in nibh est. Curabitur at blandit nunc, id aliquet neque. Nulla condimentum eget dolor a egestas. Vestibulum vel tincidunt ex. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Cras congue lacus in risus mattis suscipit. Quisque nisl eros, facilisis a lorem quis, vehicula biben"; // 500 chars
|
||||
String expectedDescription = "description Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent feugiat pretium massa, non pulvinar mi pretium id. Ut gravida sapien vitae dui posuere tincidunt. Quisque in nibh est. Curabitur at blandit nunc, id aliquet neque"; // 240 chars
|
||||
List<String> expectedTags = Arrays.asList("tag 1", "tag 2", "tag 4", "tag 5", "tag 6");
|
||||
|
||||
// Register the name to Alice
|
||||
|
@ -5,9 +5,13 @@ import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.arbitrary.ArbitraryDataReader;
|
||||
import org.qortal.arbitrary.ArbitraryDataTransactionBuilder;
|
||||
import org.qortal.arbitrary.exception.MissingDataException;
|
||||
import org.qortal.arbitrary.misc.Category;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataManager;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.PaymentData;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.BaseTransactionData;
|
||||
@ -24,12 +28,16 @@ import org.qortal.test.common.transaction.TestTransaction;
|
||||
import org.qortal.transaction.ArbitraryTransaction;
|
||||
import org.qortal.transaction.RegisterNameTransaction;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
@ -467,4 +475,173 @@ public class ArbitraryTransactionTests extends Common {
|
||||
secret, compression, digest, dataType, metadataHash, payments);
|
||||
assertNull(transactionData.getService());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOnChainData() throws DataException, IOException, MissingDataException, IllegalAccessException {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Set difficulty to 1 to speed up the tests
|
||||
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
|
||||
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
String publicKey58 = Base58.encode(alice.getPublicKey());
|
||||
String name = "TEST"; // Can be anything for this test
|
||||
String identifier = null; // Not used for this test
|
||||
Service service = Service.ARBITRARY_DATA;
|
||||
int chunkSize = 1000;
|
||||
int dataLength = 239; // Max possible size. Becomes 256 bytes after encryption.
|
||||
|
||||
// Register the name to Alice
|
||||
RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
|
||||
transactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(transactionData.getTimestamp()));
|
||||
TransactionUtils.signAndMint(repository, transactionData, alice);
|
||||
|
||||
// Create PUT transaction
|
||||
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength, true);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name,
|
||||
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true,
|
||||
null, null, null, null);
|
||||
|
||||
byte[] signature = arbitraryDataFile.getSignature();
|
||||
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) repository.getTransactionRepository().fromSignature(signature);
|
||||
|
||||
// Check that the data is published on chain
|
||||
assertEquals(ArbitraryTransactionData.DataType.RAW_DATA, arbitraryTransactionData.getDataType());
|
||||
assertEquals(arbitraryDataFile.getBytes().length, arbitraryTransactionData.getData().length);
|
||||
assertArrayEquals(arbitraryDataFile.getBytes(), arbitraryTransactionData.getData());
|
||||
|
||||
// Check that we have no chunks because the complete file is already less than the chunk size
|
||||
assertEquals(0, arbitraryDataFile.chunkCount());
|
||||
|
||||
// Check that we have one file total - just the complete file (no chunks or metadata)
|
||||
assertEquals(1, arbitraryDataFile.fileCount());
|
||||
|
||||
// Check the metadata isn't present
|
||||
assertNull(arbitraryDataFile.getMetadata());
|
||||
|
||||
// Now build the latest data state for this name
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
|
||||
arbitraryDataReader.loadSynchronously(true);
|
||||
|
||||
// Filename will be "data" because it's been held as raw bytes in the transaction,
|
||||
// so there is nowhere to store the original filename
|
||||
File outputFile = Paths.get(arbitraryDataReader.getFilePath().toString(), "data").toFile();
|
||||
|
||||
assertArrayEquals(Crypto.digest(outputFile), Crypto.digest(path1.toFile()));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOnChainDataWithMetadata() throws DataException, IOException, MissingDataException, IllegalAccessException {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Set difficulty to 1 to speed up the tests
|
||||
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
|
||||
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
String publicKey58 = Base58.encode(alice.getPublicKey());
|
||||
String name = "TEST"; // Can be anything for this test
|
||||
String identifier = null; // Not used for this test
|
||||
Service service = Service.ARBITRARY_DATA;
|
||||
int chunkSize = 1000;
|
||||
int dataLength = 239; // Max possible size. Becomes 256 bytes after encryption.
|
||||
|
||||
String title = "Test title";
|
||||
String description = "Test description";
|
||||
List<String> tags = Arrays.asList("Test", "tag", "another tag");
|
||||
Category category = Category.QORTAL;
|
||||
|
||||
// Register the name to Alice
|
||||
RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
|
||||
transactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(transactionData.getTimestamp()));
|
||||
TransactionUtils.signAndMint(repository, transactionData, alice);
|
||||
|
||||
// Create PUT transaction
|
||||
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength, true);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name,
|
||||
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true,
|
||||
title, description, tags, category);
|
||||
|
||||
byte[] signature = arbitraryDataFile.getSignature();
|
||||
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) repository.getTransactionRepository().fromSignature(signature);
|
||||
|
||||
// Check that the data is published on chain
|
||||
assertEquals(ArbitraryTransactionData.DataType.RAW_DATA, arbitraryTransactionData.getDataType());
|
||||
assertEquals(arbitraryDataFile.getBytes().length, arbitraryTransactionData.getData().length);
|
||||
assertArrayEquals(arbitraryDataFile.getBytes(), arbitraryTransactionData.getData());
|
||||
|
||||
// Check that we have no chunks because the complete file is already less than the chunk size
|
||||
assertEquals(0, arbitraryDataFile.chunkCount());
|
||||
|
||||
// Check that we have two files total - one for the complete file, and the other for the metadata
|
||||
assertEquals(2, arbitraryDataFile.fileCount());
|
||||
|
||||
// Check the metadata is correct
|
||||
assertEquals(title, arbitraryDataFile.getMetadata().getTitle());
|
||||
assertEquals(description, arbitraryDataFile.getMetadata().getDescription());
|
||||
assertEquals(tags, arbitraryDataFile.getMetadata().getTags());
|
||||
assertEquals(category, arbitraryDataFile.getMetadata().getCategory());
|
||||
assertEquals("text/plain", arbitraryDataFile.getMetadata().getMimeType());
|
||||
|
||||
// Now build the latest data state for this name
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
|
||||
arbitraryDataReader.loadSynchronously(true);
|
||||
|
||||
// Filename will be "data" because it's been held as raw bytes in the transaction,
|
||||
// so there is nowhere to store the original filename
|
||||
File outputFile = Paths.get(arbitraryDataReader.getFilePath().toString(), "data").toFile();
|
||||
|
||||
assertArrayEquals(Crypto.digest(outputFile), Crypto.digest(path1.toFile()));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffChainData() throws DataException, IOException, MissingDataException, IllegalAccessException {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Set difficulty to 1 to speed up the tests
|
||||
FieldUtils.writeField(ArbitraryDataManager.getInstance(), "powDifficulty", 1, true);
|
||||
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
String publicKey58 = Base58.encode(alice.getPublicKey());
|
||||
String name = "TEST"; // Can be anything for this test
|
||||
String identifier = null; // Not used for this test
|
||||
Service service = Service.ARBITRARY_DATA;
|
||||
int chunkSize = 1000;
|
||||
int dataLength = 240; // Min possible size. Becomes 257 bytes after encryption.
|
||||
|
||||
// Register the name to Alice
|
||||
RegisterNameTransactionData transactionData = new RegisterNameTransactionData(TestTransaction.generateBase(alice), name, "");
|
||||
transactionData.setFee(new RegisterNameTransaction(null, null).getUnitFee(transactionData.getTimestamp()));
|
||||
TransactionUtils.signAndMint(repository, transactionData, alice);
|
||||
|
||||
// Create PUT transaction
|
||||
Path path1 = ArbitraryUtils.generateRandomDataPath(dataLength, true);
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryUtils.createAndMintTxn(repository, publicKey58, path1, name,
|
||||
identifier, ArbitraryTransactionData.Method.PUT, service, alice, chunkSize, 0L, true,
|
||||
null, null, null, null);
|
||||
|
||||
byte[] signature = arbitraryDataFile.getSignature();
|
||||
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) repository.getTransactionRepository().fromSignature(signature);
|
||||
|
||||
// Check that the data is published on chain
|
||||
assertEquals(ArbitraryTransactionData.DataType.DATA_HASH, arbitraryTransactionData.getDataType());
|
||||
assertEquals(TransactionTransformer.SHA256_LENGTH, arbitraryTransactionData.getData().length);
|
||||
assertFalse(Arrays.equals(arbitraryDataFile.getBytes(), arbitraryTransactionData.getData()));
|
||||
|
||||
// Check that we have no chunks because the complete file is already less than the chunk size
|
||||
assertEquals(0, arbitraryDataFile.chunkCount());
|
||||
|
||||
// Check that we have one file total - just the complete file (no chunks or metadata)
|
||||
assertEquals(1, arbitraryDataFile.fileCount());
|
||||
|
||||
// Check the metadata isn't present
|
||||
assertNull(arbitraryDataFile.getMetadata());
|
||||
|
||||
// Now build the latest data state for this name
|
||||
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
|
||||
arbitraryDataReader.loadSynchronously(true);
|
||||
|
||||
// File content should match original file
|
||||
File outputFile = Paths.get(arbitraryDataReader.getFilePath().toString(), "file.txt").toFile();
|
||||
assertArrayEquals(Crypto.digest(outputFile), Crypto.digest(path1.toFile()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -51,12 +51,8 @@ public class ArbitraryUtils {
|
||||
BlockUtils.mintBlock(repository);
|
||||
|
||||
// We need a new ArbitraryDataFile instance because the files will have been moved to the signature's folder
|
||||
byte[] hash = txnBuilder.getArbitraryDataFile().getHash();
|
||||
byte[] signature = transactionData.getSignature();
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
|
||||
arbitraryDataFile.setMetadataHash(transactionData.getMetadataHash());
|
||||
|
||||
return arbitraryDataFile;
|
||||
// Or, it may now be using RAW_DATA instead of a hash
|
||||
return ArbitraryDataFile.fromTransactionData(transactionData);
|
||||
}
|
||||
|
||||
public static ArbitraryDataFile createAndMintTxn(Repository repository, String publicKey58, Path path, String name, String identifier,
|
||||
@ -68,6 +64,17 @@ public class ArbitraryUtils {
|
||||
}
|
||||
|
||||
public static Path generateRandomDataPath(int length) throws IOException {
|
||||
return generateRandomDataPath(length, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate random data, held in a single file within a directory
|
||||
* @param length - size of file to create
|
||||
* @param returnFilePath - if true, the file's path is returned. If false, the outer directory's path is returned.
|
||||
* @return - path to file or directory, depending on the "returnFilePath" boolean
|
||||
* @throws IOException
|
||||
*/
|
||||
public static Path generateRandomDataPath(int length, boolean returnFilePath) throws IOException {
|
||||
// Create a file in a random temp directory
|
||||
Path tempDir = Files.createTempDirectory("generateRandomDataPath");
|
||||
File file = new File(Paths.get(tempDir.toString(), "file.txt").toString());
|
||||
@ -84,6 +91,10 @@ public class ArbitraryUtils {
|
||||
file1Writer.newLine();
|
||||
file1Writer.close();
|
||||
|
||||
if (returnFilePath) {
|
||||
return file.toPath();
|
||||
}
|
||||
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user