forked from Qortal/qortal
Delete files related to transactions that have a more recent PUT
A PUT creates a new base layer meaning anything before that point is no longer needed. These files are now deleted automatically by the cleanup manager. This involved relocating a lot of the cleanup manager methods into a shared utility, so that they could be used by the arbitrary data manager. Without this, they would be fetched from the network again as soon as they were deleted.
This commit is contained in:
parent
8f3620e07b
commit
8fa61e628c
@ -422,6 +422,22 @@ public class ArbitraryDataFile {
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean anyChunksExist(byte[] chunks) {
|
||||
if (chunks == null) {
|
||||
return false;
|
||||
}
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(chunks);
|
||||
while (byteBuffer.remaining() >= TransactionTransformer.SHA256_LENGTH) {
|
||||
byte[] chunkHash = new byte[TransactionTransformer.SHA256_LENGTH];
|
||||
byteBuffer.get(chunkHash);
|
||||
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash);
|
||||
if (chunk.exists()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean containsChunk(byte[] hash) {
|
||||
for (ArbitraryDataFileChunk chunk : this.chunks) {
|
||||
if (Arrays.equals(hash, chunk.getHash())) {
|
||||
|
@ -3,10 +3,8 @@ package org.qortal.controller.arbitrary;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.repository.DataException;
|
||||
@ -14,13 +12,10 @@ import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.Transaction.TransactionType;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
@ -43,6 +38,14 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
private static long STALE_FILE_TIMEOUT = 60*60*1000; // 1 hour
|
||||
|
||||
|
||||
/*
|
||||
TODO:
|
||||
- Discard all files relating to transactions for a name/service combination before the most recent PUT
|
||||
- Delete old files from _temp
|
||||
- Delete old files not associated with transactions
|
||||
*/
|
||||
|
||||
|
||||
private ArbitraryDataCleanupManager() {
|
||||
}
|
||||
|
||||
@ -104,7 +107,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
}
|
||||
|
||||
// Fetch the transaction data
|
||||
ArbitraryTransactionData arbitraryTransactionData = this.fetchTransactionData(repository, signature);
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
|
||||
// Raw data doesn't have any associated files to clean up
|
||||
if (arbitraryTransactionData.getDataType() == ArbitraryTransactionData.DataType.RAW_DATA) {
|
||||
@ -112,24 +115,49 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
}
|
||||
|
||||
// Check if we have the complete file
|
||||
boolean completeFileExists = this.completeFileExists(arbitraryTransactionData);
|
||||
boolean completeFileExists = ArbitraryTransactionUtils.completeFileExists(arbitraryTransactionData);
|
||||
|
||||
// Check if we have all the chunks
|
||||
boolean allChunksExist = this.allChunksExist(arbitraryTransactionData);
|
||||
// Check if we have any of the chunks
|
||||
boolean anyChunksExist = ArbitraryTransactionUtils.anyChunksExist(arbitraryTransactionData);
|
||||
boolean transactionHasChunks = (arbitraryTransactionData.getChunkHashes() != null);
|
||||
|
||||
if (completeFileExists && arbitraryTransactionData.getChunkHashes() == null) {
|
||||
// This file doesn't have any chunks because it is too small
|
||||
// We must not delete anything
|
||||
if (!completeFileExists && !anyChunksExist) {
|
||||
// We don't have any files at all for this transaction - nothing to do
|
||||
continue;
|
||||
}
|
||||
|
||||
// We have at least 1 chunk or file for this transaction, so we might need to delete them...
|
||||
|
||||
|
||||
// Check to see if we have had a more recent PUT
|
||||
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
|
||||
if (hasMoreRecentPutTransaction) {
|
||||
// There is a more recent PUT transaction than the one we are currently processing.
|
||||
// When a PUT is issued, it replaces any layers that would have been there before.
|
||||
// Therefore any data relating to this older transaction is no longer needed.
|
||||
LOGGER.info(String.format("Newer PUT found for %s %s since transaction %s. " +
|
||||
"Deleting all files.", arbitraryTransactionData.getService(),
|
||||
arbitraryTransactionData.getName(), Base58.encode(signature)));
|
||||
|
||||
ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData);
|
||||
}
|
||||
|
||||
if (completeFileExists && !transactionHasChunks) {
|
||||
// This file doesn't have any chunks because it is too small.
|
||||
// We must not delete anything.
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if we have all of the chunks
|
||||
boolean allChunksExist = ArbitraryTransactionUtils.allChunksExist(arbitraryTransactionData);
|
||||
|
||||
if (completeFileExists && allChunksExist) {
|
||||
// We have the complete file and all the chunks, so we can delete
|
||||
// the complete file if it has reached a certain age.
|
||||
LOGGER.info(String.format("Transaction %s has complete file and all chunks",
|
||||
Base58.encode(arbitraryTransactionData.getSignature())));
|
||||
|
||||
this.deleteCompleteFile(arbitraryTransactionData, now);
|
||||
ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
|
||||
}
|
||||
|
||||
if (completeFileExists && !allChunksExist) {
|
||||
@ -137,7 +165,7 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
LOGGER.info(String.format("Transaction %s has complete file but no chunks",
|
||||
Base58.encode(arbitraryTransactionData.getSignature())));
|
||||
|
||||
this.createChunks(arbitraryTransactionData, now);
|
||||
ArbitraryTransactionUtils.convertFileToChunks(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,127 +183,4 @@ public class ArbitraryDataCleanupManager extends Thread {
|
||||
this.interrupt();
|
||||
}
|
||||
|
||||
|
||||
private ArbitraryTransactionData fetchTransactionData(final Repository repository, final byte[] signature) {
|
||||
try {
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
if (!(transactionData instanceof ArbitraryTransactionData))
|
||||
return null;
|
||||
|
||||
return (ArbitraryTransactionData) transactionData;
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean completeFileExists(ArbitraryTransactionData transactionData) {
|
||||
if (transactionData == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
|
||||
// Load complete file
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest);
|
||||
return arbitraryDataFile.exists();
|
||||
|
||||
}
|
||||
|
||||
private boolean allChunksExist(ArbitraryTransactionData transactionData) {
|
||||
if (transactionData == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
|
||||
if (chunkHashes == null) {
|
||||
// This file doesn't have any chunks
|
||||
return true;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
return arbitraryDataFile.allChunksExist(chunkHashes);
|
||||
|
||||
}
|
||||
|
||||
private boolean isFileHashRecent(byte[] hash, long now) {
|
||||
try {
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash);
|
||||
if (arbitraryDataFile == null || !arbitraryDataFile.exists()) {
|
||||
// No hash, or file doesn't exist, so it's not recent
|
||||
return false;
|
||||
}
|
||||
Path filePath = arbitraryDataFile.getFilePath();
|
||||
|
||||
BasicFileAttributes attr = Files.readAttributes(filePath, BasicFileAttributes.class);
|
||||
long timeSinceCreated = now - attr.creationTime().toMillis();
|
||||
long timeSinceModified = now - attr.lastModifiedTime().toMillis();
|
||||
|
||||
// Check if the file has been created or modified recently
|
||||
if (timeSinceCreated < STALE_FILE_TIMEOUT) {
|
||||
return true;
|
||||
}
|
||||
if (timeSinceModified < STALE_FILE_TIMEOUT) {
|
||||
return true;
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
// Can't read file attributes, so assume it's not recent
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void deleteCompleteFile(ArbitraryTransactionData arbitraryTransactionData, long now) {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash);
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
|
||||
if (!this.isFileHashRecent(completeHash, now)) {
|
||||
LOGGER.info("Deleting file {} because it can be rebuilt from chunks " +
|
||||
"if needed", Base58.encode(completeHash));
|
||||
|
||||
arbitraryDataFile.delete();
|
||||
}
|
||||
}
|
||||
|
||||
private void createChunks(ArbitraryTransactionData arbitraryTransactionData, long now) {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
|
||||
// Split the file into chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash);
|
||||
int chunkCount = arbitraryDataFile.split(ArbitraryDataFile.CHUNK_SIZE);
|
||||
if (chunkCount > 1) {
|
||||
LOGGER.info(String.format("Successfully split %s into %d chunk%s",
|
||||
Base58.encode(completeHash), chunkCount, (chunkCount == 1 ? "" : "s")));
|
||||
|
||||
// Verify that the chunk hashes match those in the transaction
|
||||
if (chunkHashes != null && Arrays.equals(chunkHashes, arbitraryDataFile.chunkHashes())) {
|
||||
// Ensure they exist on disk
|
||||
if (arbitraryDataFile.allChunksExist(chunkHashes)) {
|
||||
|
||||
// Now delete the original file if it's not recent
|
||||
if (!this.isFileHashRecent(completeHash, now)) {
|
||||
LOGGER.info("Deleting file {} because it can now be rebuilt from " +
|
||||
"chunks if needed", Base58.encode(completeHash));
|
||||
|
||||
this.deleteCompleteFile(arbitraryTransactionData, now);
|
||||
}
|
||||
else {
|
||||
// File might be in use. It's best to leave it and it it will be cleaned up later.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import org.qortal.arbitrary.ArbitraryDataFileChunk;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.ArbitraryTransaction;
|
||||
import org.qortal.transaction.Transaction.TransactionType;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.Triple;
|
||||
@ -143,6 +144,21 @@ public class ArbitraryDataManager extends Thread {
|
||||
final int index = new Random().nextInt(signatures.size());
|
||||
byte[] signature = signatures.get(index);
|
||||
|
||||
if (signature == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check to see if we have had a more recent PUT
|
||||
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
|
||||
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
|
||||
if (hasMoreRecentPutTransaction) {
|
||||
// There is a more recent PUT transaction than the one we are currently processing.
|
||||
// When a PUT is issued, it replaces any layers that would have been there before.
|
||||
// Therefore any data relating to this older transaction is no longer needed and we
|
||||
// shouldn't fetch it from the network.
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ask our connected peers if they have files for this signature
|
||||
// This process automatically then fetches the files themselves if a peer is found
|
||||
fetchArbitraryDataFileList(signature);
|
||||
|
237
src/main/java/org/qortal/utils/ArbitraryTransactionUtils.java
Normal file
237
src/main/java/org/qortal/utils/ArbitraryTransactionUtils.java
Normal file
@ -0,0 +1,237 @@
|
||||
package org.qortal.utils;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataCleanupManager;
|
||||
import org.qortal.data.transaction.ArbitraryTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class ArbitraryTransactionUtils {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(ArbitraryTransactionUtils.class);
|
||||
|
||||
public static ArbitraryTransactionData fetchTransactionData(final Repository repository, final byte[] signature) {
|
||||
try {
|
||||
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
|
||||
if (!(transactionData instanceof ArbitraryTransactionData))
|
||||
return null;
|
||||
|
||||
return (ArbitraryTransactionData) transactionData;
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static ArbitraryTransactionData fetchLatestPut(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
|
||||
if (arbitraryTransactionData == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
String name = arbitraryTransactionData.getName();
|
||||
ArbitraryTransactionData.Service service = arbitraryTransactionData.getService();
|
||||
|
||||
if (name == null || service == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Get the most recent PUT for this name and service
|
||||
ArbitraryTransactionData latestPut;
|
||||
try {
|
||||
latestPut = repository.getArbitraryRepository()
|
||||
.getLatestTransaction(name, service, ArbitraryTransactionData.Method.PUT);
|
||||
} catch (DataException e) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return latestPut;
|
||||
}
|
||||
|
||||
public static boolean hasMoreRecentPutTransaction(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
|
||||
byte[] signature = arbitraryTransactionData.getSignature();
|
||||
if (signature == null) {
|
||||
// We can't make a sensible decision without a signature
|
||||
// so it's best to assume there is nothing newer
|
||||
return false;
|
||||
}
|
||||
|
||||
ArbitraryTransactionData latestPut = ArbitraryTransactionUtils.fetchLatestPut(repository, arbitraryTransactionData);
|
||||
if (latestPut == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If the latest PUT transaction has a newer timestamp, it will override the existing transaction
|
||||
// Any data relating to the older transaction is no longer needed
|
||||
boolean hasNewerPut = (latestPut.getTimestamp() > arbitraryTransactionData.getTimestamp());
|
||||
return hasNewerPut;
|
||||
}
|
||||
|
||||
public static boolean completeFileExists(ArbitraryTransactionData transactionData) {
|
||||
if (transactionData == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
|
||||
// Load complete file
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest);
|
||||
return arbitraryDataFile.exists();
|
||||
|
||||
}
|
||||
|
||||
public static boolean allChunksExist(ArbitraryTransactionData transactionData) {
|
||||
if (transactionData == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
|
||||
if (chunkHashes == null) {
|
||||
// This file doesn't have any chunks, which is the same as us having them all
|
||||
return true;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
return arbitraryDataFile.allChunksExist(chunkHashes);
|
||||
}
|
||||
|
||||
public static boolean anyChunksExist(ArbitraryTransactionData transactionData) {
|
||||
if (transactionData == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
|
||||
if (chunkHashes == null) {
|
||||
// This file doesn't have any chunks, which means none exist
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
return arbitraryDataFile.anyChunksExist(chunkHashes);
|
||||
}
|
||||
|
||||
public static int ourChunkCount(ArbitraryTransactionData transactionData) {
|
||||
if (transactionData == null) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
byte[] digest = transactionData.getData();
|
||||
byte[] chunkHashes = transactionData.getChunkHashes();
|
||||
|
||||
if (chunkHashes == null) {
|
||||
// This file doesn't have any chunks
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Load complete file and chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest);
|
||||
if (chunkHashes != null && chunkHashes.length > 0) {
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
}
|
||||
return arbitraryDataFile.chunkCount();
|
||||
}
|
||||
|
||||
public static boolean isFileHashRecent(byte[] hash, long now, long cleanupAfter) {
|
||||
try {
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash);
|
||||
if (arbitraryDataFile == null || !arbitraryDataFile.exists()) {
|
||||
// No hash, or file doesn't exist, so it's not recent
|
||||
return false;
|
||||
}
|
||||
Path filePath = arbitraryDataFile.getFilePath();
|
||||
|
||||
BasicFileAttributes attr = Files.readAttributes(filePath, BasicFileAttributes.class);
|
||||
long timeSinceCreated = now - attr.creationTime().toMillis();
|
||||
long timeSinceModified = now - attr.lastModifiedTime().toMillis();
|
||||
|
||||
// Check if the file has been created or modified recently
|
||||
if (timeSinceCreated < cleanupAfter) {
|
||||
return true;
|
||||
}
|
||||
if (timeSinceModified < cleanupAfter) {
|
||||
return true;
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
// Can't read file attributes, so assume it's not recent
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static void deleteCompleteFile(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash);
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
|
||||
if (!ArbitraryTransactionUtils.isFileHashRecent(completeHash, now, cleanupAfter)) {
|
||||
LOGGER.info("Deleting file {} because it can be rebuilt from chunks " +
|
||||
"if needed", Base58.encode(completeHash));
|
||||
|
||||
arbitraryDataFile.delete();
|
||||
}
|
||||
}
|
||||
|
||||
public static void deleteCompleteFileAndChunks(ArbitraryTransactionData arbitraryTransactionData) {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash);
|
||||
arbitraryDataFile.addChunkHashes(chunkHashes);
|
||||
arbitraryDataFile.deleteAll();
|
||||
}
|
||||
|
||||
public static void convertFileToChunks(ArbitraryTransactionData arbitraryTransactionData, long now, long cleanupAfter) {
|
||||
byte[] completeHash = arbitraryTransactionData.getData();
|
||||
byte[] chunkHashes = arbitraryTransactionData.getChunkHashes();
|
||||
|
||||
// Split the file into chunks
|
||||
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash);
|
||||
int chunkCount = arbitraryDataFile.split(ArbitraryDataFile.CHUNK_SIZE);
|
||||
if (chunkCount > 1) {
|
||||
LOGGER.info(String.format("Successfully split %s into %d chunk%s",
|
||||
Base58.encode(completeHash), chunkCount, (chunkCount == 1 ? "" : "s")));
|
||||
|
||||
// Verify that the chunk hashes match those in the transaction
|
||||
if (chunkHashes != null && Arrays.equals(chunkHashes, arbitraryDataFile.chunkHashes())) {
|
||||
// Ensure they exist on disk
|
||||
if (arbitraryDataFile.allChunksExist(chunkHashes)) {
|
||||
|
||||
// Now delete the original file if it's not recent
|
||||
if (!ArbitraryTransactionUtils.isFileHashRecent(completeHash, now, cleanupAfter)) {
|
||||
LOGGER.info("Deleting file {} because it can now be rebuilt from " +
|
||||
"chunks if needed", Base58.encode(completeHash));
|
||||
|
||||
ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, cleanupAfter);
|
||||
}
|
||||
else {
|
||||
// File might be in use. It's best to leave it and it it will be cleaned up later.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user