diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java index aaeaec2d..4c98fa4c 100644 --- a/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java +++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java @@ -315,16 +315,15 @@ public class ArbitraryDataFile { LOGGER.debug("Deleted file {}", this.filePath); return true; } catch (IOException e) { - LOGGER.warn("Couldn't delete DataFileChunk at path {}", this.filePath); + LOGGER.warn("Couldn't delete file at path {}", this.filePath); } } } return false; } - public boolean deleteAll() { - // Delete the complete file - boolean success = this.delete(); + public boolean deleteAllChunks() { + boolean success = false; // Delete the individual chunks if (this.chunks != null && this.chunks.size() > 0) { @@ -339,6 +338,16 @@ public class ArbitraryDataFile { return success; } + public boolean deleteAll() { + // Delete the complete file + boolean fileDeleted = this.delete(); + + // Delete the individual chunks + boolean chunksDeleted = this.deleteAllChunks(); + + return fileDeleted && chunksDeleted; + } + protected void cleanupFilesystem() { // Iterate through two levels of parent directories, and delete if empty for (int i=0; i<2; i++) { diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index 5655db0c..6bab9d42 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -14,6 +14,7 @@ import org.qortal.block.Block; import org.qortal.block.BlockChain; import org.qortal.block.BlockChain.BlockTimingByHeight; import org.qortal.controller.Synchronizer.SynchronizationResult; +import org.qortal.controller.arbitrary.ArbitraryDataCleanupManager; import org.qortal.controller.arbitrary.ArbitraryDataManager; import org.qortal.controller.tradebot.TradeBot; import org.qortal.data.account.MintingAccountData; @@ -446,9 +447,10 @@ public class Controller extends Thread { LOGGER.info("Starting trade-bot"); TradeBot.getInstance(); - // Arbitrary transaction data manager - LOGGER.info("Starting arbitrary-transaction data manager"); + // Arbitrary data controllers + LOGGER.info("Starting arbitrary-transaction controllers"); ArbitraryDataManager.getInstance().start(); + ArbitraryDataCleanupManager.getInstance().start(); // Auto-update service? if (Settings.getInstance().isAutoUpdateEnabled()) { @@ -934,9 +936,10 @@ public class Controller extends Thread { AutoUpdate.getInstance().shutdown(); } - // Arbitrary transaction data manager - LOGGER.info("Shutting down arbitrary-transaction data manager"); + // Arbitrary data controllers + LOGGER.info("Shutting down arbitrary-transaction controllers"); ArbitraryDataManager.getInstance().shutdown(); + ArbitraryDataCleanupManager.getInstance().shutdown(); if (blockMinter != null) { LOGGER.info("Shutting down block minter"); diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java new file mode 100644 index 00000000..c85cc964 --- /dev/null +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java @@ -0,0 +1,281 @@ +package org.qortal.controller.arbitrary; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.api.resource.TransactionsResource.ConfirmationStatus; +import org.qortal.arbitrary.ArbitraryDataFile; +import org.qortal.controller.Controller; +import org.qortal.data.transaction.ArbitraryTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.network.Network; +import org.qortal.network.Peer; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.settings.Settings; +import org.qortal.transaction.Transaction.TransactionType; +import org.qortal.utils.Base58; +import org.qortal.utils.NTP; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public class ArbitraryDataCleanupManager extends Thread { + + private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataCleanupManager.class); + private static final List ARBITRARY_TX_TYPE = Arrays.asList(TransactionType.ARBITRARY); + + private static ArbitraryDataCleanupManager instance; + + private volatile boolean isStopping = false; + + /** + * The amount of time that must pass before a file is treated as stale / not recent. + * We can safely delete files created/accessed longer ago that this, if we have a means of + * rebuilding them. The main purpose of this is to avoid deleting files that are currently + * being used by other parts of the system. + */ + private static long STALE_FILE_TIMEOUT = 60*60*1000; // 1 hour + + + private ArbitraryDataCleanupManager() { + } + + public static ArbitraryDataCleanupManager getInstance() { + if (instance == null) + instance = new ArbitraryDataCleanupManager(); + + return instance; + } + + @Override + public void run() { + Thread.currentThread().setName("Arbitrary Data Cleanup Manager"); + + // Use a fixed thread pool to execute the arbitrary data build actions (currently just a single thread) + // This can be expanded to have multiple threads processing the build queue when needed + ExecutorService arbitraryDataBuildExecutor = Executors.newFixedThreadPool(1); + arbitraryDataBuildExecutor.execute(new ArbitraryDataBuildManager()); + + // Paginate queries when fetching arbitrary transactions + final int limit = 100; + int offset = 0; + + try { + while (!isStopping) { + Thread.sleep(30000); + + if (NTP.getTime() == null) { + // Don't attempt to make decisions if we haven't synced our time yet + continue; + } + + List peers = Network.getInstance().getHandshakedPeers(); + + // Disregard peers that have "misbehaved" recently + peers.removeIf(Controller.hasMisbehaved); + + // Don't fetch data if we don't have enough up-to-date peers + if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) { + continue; + } + + // Any arbitrary transactions we want to fetch data for? + try (final Repository repository = RepositoryManager.getRepository()) { + List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, ConfirmationStatus.BOTH, limit, offset, true); + // LOGGER.info("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit); + if (signatures == null || signatures.isEmpty()) { + offset = 0; + continue; + } + offset += limit; + Long now = NTP.getTime(); + + // Loop through the signatures in this batch + for (int i=0; i 0) { + arbitraryDataFile.addChunkHashes(chunkHashes); + } + return arbitraryDataFile.allChunksExist(chunkHashes); + + } + + private boolean isFileHashRecent(byte[] hash, long now) { + try { + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash); + if (arbitraryDataFile == null || !arbitraryDataFile.exists()) { + // No hash, or file doesn't exist, so it's not recent + return false; + } + Path filePath = arbitraryDataFile.getFilePath(); + + BasicFileAttributes attr = Files.readAttributes(filePath, BasicFileAttributes.class); + long timeSinceCreated = now - attr.creationTime().toMillis(); + long timeSinceModified = now - attr.lastModifiedTime().toMillis(); + + // Check if the file has been created or modified recently + if (timeSinceCreated < STALE_FILE_TIMEOUT) { + return true; + } + if (timeSinceModified < STALE_FILE_TIMEOUT) { + return true; + } + + } catch (IOException e) { + // Can't read file attributes, so assume it's not recent + } + return false; + } + + private void deleteCompleteFile(ArbitraryTransactionData arbitraryTransactionData, long now) { + byte[] completeHash = arbitraryTransactionData.getData(); + byte[] chunkHashes = arbitraryTransactionData.getChunkHashes(); + + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash); + arbitraryDataFile.addChunkHashes(chunkHashes); + + if (!this.isFileHashRecent(completeHash, now)) { + LOGGER.info("Deleting file {} because it can be rebuilt from chunks " + + "if needed", Base58.encode(completeHash)); + + arbitraryDataFile.delete(); + } + } + + private void createChunks(ArbitraryTransactionData arbitraryTransactionData, long now) { + byte[] completeHash = arbitraryTransactionData.getData(); + byte[] chunkHashes = arbitraryTransactionData.getChunkHashes(); + + // Split the file into chunks + ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(completeHash); + int chunkCount = arbitraryDataFile.split(ArbitraryDataFile.CHUNK_SIZE); + if (chunkCount > 1) { + LOGGER.info(String.format("Successfully split %s into %d chunk%s", + Base58.encode(completeHash), chunkCount, (chunkCount == 1 ? "" : "s"))); + + // Verify that the chunk hashes match those in the transaction + if (chunkHashes != null && Arrays.equals(chunkHashes, arbitraryDataFile.chunkHashes())) { + // Ensure they exist on disk + if (arbitraryDataFile.allChunksExist(chunkHashes)) { + + // Now delete the original file if it's not recent + if (!this.isFileHashRecent(completeHash, now)) { + LOGGER.info("Deleting file {} because it can now be rebuilt from " + + "chunks if needed", Base58.encode(completeHash)); + + this.deleteCompleteFile(arbitraryTransactionData, now); + } + else { + // File might be in use. It's best to leave it and it it will be cleaned up later. + } + } + } + } + } + +}