Merge branch 'rebuild-archive'

This commit is contained in:
CalDescent
2023-03-10 11:50:09 +00:00
17 changed files with 1001 additions and 741 deletions

View File

@@ -45,6 +45,7 @@ import org.qortal.block.BlockChain;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.controller.Synchronizer.SynchronizationResult;
import org.qortal.controller.repository.BlockArchiveRebuilder;
import org.qortal.data.account.MintingAccountData;
import org.qortal.data.account.RewardShareData;
import org.qortal.network.Network;
@@ -734,6 +735,64 @@ public class AdminResource {
}
}
@POST
@Path("/repository/archive/rebuild")
@Operation(
summary = "Rebuild archive",
description = "Rebuilds archive files, using the specified serialization version",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "number", example = "2"
)
)
),
responses = {
@ApiResponse(
description = "\"true\"",
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
)
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String rebuildArchive(@HeaderParam(Security.API_KEY_HEADER) String apiKey, Integer serializationVersion) {
Security.checkApiCallAllowed(request);
// Default serialization version to value specified in settings
if (serializationVersion == null) {
serializationVersion = Settings.getInstance().getDefaultArchiveVersion();
}
try {
// We don't actually need to lock the blockchain here, but we'll do it anyway so that
// the node can focus on rebuilding rather than synchronizing / minting.
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
blockchainLock.lockInterruptibly();
try {
BlockArchiveRebuilder blockArchiveRebuilder = new BlockArchiveRebuilder(serializationVersion);
blockArchiveRebuilder.start();
return "true";
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
} finally {
blockchainLock.unlock();
}
} catch (InterruptedException e) {
// We couldn't lock blockchain to perform rebuild
return "false";
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
}
@DELETE
@Path("/repository")
@Operation(

View File

@@ -48,6 +48,7 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformer;
import org.qortal.utils.Base58;
import org.qortal.utils.Triple;
@Path("/blocks")
@Tag(name = "Blocks")
@@ -165,10 +166,13 @@ public class BlocksResource {
}
// Not found, so try the block archive
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
if (bytes != null) {
if (version != 1) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Archived blocks require version 1");
Triple<byte[], Integer, Integer> serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, false, repository);
if (serializedBlock != null) {
byte[] bytes = serializedBlock.getA();
Integer serializationVersion = serializedBlock.getB();
if (version != serializationVersion) {
// TODO: we could quite easily reserialize the block with the requested version
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Block is not stored using requested serialization version.");
}
return Base58.encode(bytes);
}

View File

@@ -657,6 +657,10 @@ public class Block {
return this.atStates;
}
public byte[] getAtStatesHash() {
return this.atStatesHash;
}
/**
* Return expanded info on block's online accounts.
* <p>

View File

@@ -400,12 +400,8 @@ public class Controller extends Thread {
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
RepositoryManager.setRepositoryFactory(repositoryFactory);
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
try (final Repository repository = RepositoryManager.getRepository()) {
RepositoryManager.archive(repository);
RepositoryManager.prune(repository);
}
} catch (DataException e) {
}
catch (DataException e) {
// If exception has no cause then repository is in use by some other process.
if (e.getCause() == null) {
LOGGER.info("Repository in use by another process?");
@@ -1379,9 +1375,24 @@ public class Controller extends Thread {
// If we have no block data, we should check the archive in case it's there
if (blockData == null) {
if (Settings.getInstance().isArchiveEnabled()) {
byte[] bytes = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository);
if (bytes != null) {
CachedBlockMessage blockMessage = new CachedBlockMessage(bytes);
Triple<byte[], Integer, Integer> serializedBlock = BlockArchiveReader.getInstance().fetchSerializedBlockBytesForSignature(signature, true, repository);
if (serializedBlock != null) {
byte[] bytes = serializedBlock.getA();
Integer serializationVersion = serializedBlock.getB();
Message blockMessage;
switch (serializationVersion) {
case 1:
blockMessage = new CachedBlockMessage(bytes);
break;
case 2:
blockMessage = new CachedBlockV2Message(bytes);
break;
default:
return;
}
blockMessage.setId(message.getId());
// This call also causes the other needed data to be pulled in from repository

View File

@@ -0,0 +1,121 @@
package org.qortal.controller.repository;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.repository.*;
import org.qortal.settings.Settings;
import org.qortal.transform.TransformationException;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
public class BlockArchiveRebuilder {
private static final Logger LOGGER = LogManager.getLogger(BlockArchiveRebuilder.class);
private final int serializationVersion;
public BlockArchiveRebuilder(int serializationVersion) {
this.serializationVersion = serializationVersion;
}
public void start() throws DataException, IOException {
if (!Settings.getInstance().isArchiveEnabled() || Settings.getInstance().isLite()) {
return;
}
// New archive path is in a different location from original archive path, to avoid conflicts.
// It will be moved later, once the process is complete.
final Path newArchivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive-rebuild");
final Path originalArchivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive");
// Delete archive-rebuild if it exists from a previous attempt
FileUtils.deleteDirectory(newArchivePath.toFile());
try (final Repository repository = RepositoryManager.getRepository()) {
int startHeight = 1; // We need to rebuild the entire archive
LOGGER.info("Rebuilding block archive from height {}...", startHeight);
while (!Controller.isStopping()) {
repository.discardChanges();
Thread.sleep(1000L);
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
// Rebuild archive
try {
final int maximumArchiveHeight = BlockArchiveReader.getInstance().getHeightOfLastArchivedBlock();
if (startHeight >= maximumArchiveHeight) {
// We've finished.
// Delete existing archive and move the newly built one into its place
FileUtils.deleteDirectory(originalArchivePath.toFile());
FileUtils.moveDirectory(newArchivePath.toFile(), originalArchivePath.toFile());
BlockArchiveReader.getInstance().invalidateFileListCache();
LOGGER.info("Block archive successfully rebuilt");
return;
}
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, serializationVersion, newArchivePath, repository);
// Set data source to BLOCK_ARCHIVE as we are rebuilding
writer.setDataSource(BlockArchiveWriter.BlockArchiveDataSource.BLOCK_ARCHIVE);
// We can't enforce the 100MB file size target, as the final file needs to contain all blocks
// that exist in the current archive. Otherwise, the final blocks in the archive will be lost.
writer.setShouldEnforceFileSizeTarget(false);
// We want to log the rebuild progress
writer.setShouldLogProgress(true);
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
switch (result) {
case OK:
// Increment block archive height
startHeight += writer.getWrittenCount();
repository.saveChanges();
break;
case STOPPING:
return;
// We've reached the limit of the blocks we can archive
// Sleep for a while to allow more to become available
case NOT_ENOUGH_BLOCKS:
// This shouldn't happen, as we're not enforcing minimum file sizes
repository.discardChanges();
throw new DataException("Unable to rebuild archive due to unexpected NOT_ENOUGH_BLOCKS response.");
case BLOCK_NOT_FOUND:
// We tried to archive a block that didn't exist. This is a major failure and likely means
// that a bootstrap or re-sync is needed. Try again every minute until then.
LOGGER.info("Error: block not found when rebuilding archive. If this error persists, " +
"a bootstrap or re-sync may be needed.");
repository.discardChanges();
throw new DataException("Unable to rebuild archive because a block is missing.");
}
} catch (IOException | TransformationException e) {
LOGGER.info("Caught exception when rebuilding block archive", e);
throw new DataException("Unable to rebuild block archive");
}
}
} catch (InterruptedException e) {
// Do nothing
} finally {
// Delete archive-rebuild if it still exists, as that means something went wrong
FileUtils.deleteDirectory(newArchivePath.toFile());
}
}
}

View File

@@ -0,0 +1,43 @@
package org.qortal.network.message;
import com.google.common.primitives.Ints;
import org.qortal.block.Block;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
// This is an OUTGOING-only Message which more readily lends itself to being cached
public class CachedBlockV2Message extends Message implements Cloneable {
public CachedBlockV2Message(Block block) throws TransformationException {
super(MessageType.BLOCK_V2);
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try {
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
bytes.write(BlockTransformer.toBytes(block));
} catch (IOException e) {
throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream");
}
this.dataBytes = bytes.toByteArray();
this.checksumBytes = Message.generateChecksum(this.dataBytes);
}
public CachedBlockV2Message(byte[] cachedBytes) {
super(MessageType.BLOCK_V2);
this.dataBytes = cachedBytes;
this.checksumBytes = Message.generateChecksum(this.dataBytes);
}
public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) {
throw new UnsupportedOperationException("CachedBlockMessageV2 is for outgoing messages only");
}
}

View File

@@ -3,10 +3,7 @@ package org.qortal.repository;
import com.google.common.primitives.Ints;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.data.at.ATStateData;
import org.qortal.data.block.BlockArchiveData;
import org.qortal.data.block.BlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.settings.Settings;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformation;
@@ -67,20 +64,51 @@ public class BlockArchiveReader {
this.fileListCache = Map.copyOf(map);
}
public Integer fetchSerializationVersionForHeight(int height) {
if (this.fileListCache == null) {
this.fetchFileList();
}
Triple<byte[], Integer, Integer> serializedBlock = this.fetchSerializedBlockBytesForHeight(height);
if (serializedBlock == null) {
return null;
}
Integer serializationVersion = serializedBlock.getB();
return serializationVersion;
}
public BlockTransformation fetchBlockAtHeight(int height) {
if (this.fileListCache == null) {
this.fetchFileList();
}
byte[] serializedBytes = this.fetchSerializedBlockBytesForHeight(height);
if (serializedBytes == null) {
Triple<byte[], Integer, Integer> serializedBlock = this.fetchSerializedBlockBytesForHeight(height);
if (serializedBlock == null) {
return null;
}
byte[] serializedBytes = serializedBlock.getA();
Integer serializationVersion = serializedBlock.getB();
if (serializedBytes == null || serializationVersion == null) {
return null;
}
ByteBuffer byteBuffer = ByteBuffer.wrap(serializedBytes);
BlockTransformation blockInfo = null;
try {
blockInfo = BlockTransformer.fromByteBuffer(byteBuffer);
switch (serializationVersion) {
case 1:
blockInfo = BlockTransformer.fromByteBuffer(byteBuffer);
break;
case 2:
blockInfo = BlockTransformer.fromByteBufferV2(byteBuffer);
break;
default:
// Invalid serialization version
return null;
}
if (blockInfo != null && blockInfo.getBlockData() != null) {
// Block height is stored outside of the main serialized bytes, so it
// won't be set automatically.
@@ -168,15 +196,20 @@ public class BlockArchiveReader {
return null;
}
public byte[] fetchSerializedBlockBytesForSignature(byte[] signature, boolean includeHeightPrefix, Repository repository) {
public Triple<byte[], Integer, Integer> fetchSerializedBlockBytesForSignature(byte[] signature, boolean includeHeightPrefix, Repository repository) {
if (this.fileListCache == null) {
this.fetchFileList();
}
Integer height = this.fetchHeightForSignature(signature, repository);
if (height != null) {
byte[] blockBytes = this.fetchSerializedBlockBytesForHeight(height);
if (blockBytes == null) {
Triple<byte[], Integer, Integer> serializedBlock = this.fetchSerializedBlockBytesForHeight(height);
if (serializedBlock == null) {
return null;
}
byte[] blockBytes = serializedBlock.getA();
Integer version = serializedBlock.getB();
if (blockBytes == null || version == null) {
return null;
}
@@ -187,18 +220,18 @@ public class BlockArchiveReader {
try {
bytes.write(Ints.toByteArray(height));
bytes.write(blockBytes);
return bytes.toByteArray();
return new Triple<>(bytes.toByteArray(), version, height);
} catch (IOException e) {
return null;
}
}
return blockBytes;
return new Triple<>(blockBytes, version, height);
}
return null;
}
public byte[] fetchSerializedBlockBytesForHeight(int height) {
public Triple<byte[], Integer, Integer> fetchSerializedBlockBytesForHeight(int height) {
String filename = this.getFilenameForHeight(height);
if (filename == null) {
// We don't have this block in the archive
@@ -221,7 +254,7 @@ public class BlockArchiveReader {
// End of fixed length header
// Make sure the version is one we recognize
if (version != 1) {
if (version != 1 && version != 2) {
LOGGER.info("Error: unknown version in file {}: {}", filename, version);
return null;
}
@@ -258,7 +291,7 @@ public class BlockArchiveReader {
byte[] blockBytes = new byte[blockLength];
file.read(blockBytes);
return blockBytes;
return new Triple<>(blockBytes, version, height);
} catch (FileNotFoundException e) {
LOGGER.info("File {} not found: {}", filename, e.getMessage());
@@ -279,6 +312,30 @@ public class BlockArchiveReader {
}
}
public int getHeightOfLastArchivedBlock() {
if (this.fileListCache == null) {
this.fetchFileList();
}
int maxEndHeight = 0;
Iterator it = this.fileListCache.entrySet().iterator();
while (it.hasNext()) {
Map.Entry pair = (Map.Entry) it.next();
if (pair == null && pair.getKey() == null && pair.getValue() == null) {
continue;
}
Triple<Integer, Integer, Integer> heightInfo = (Triple<Integer, Integer, Integer>) pair.getValue();
Integer endHeight = heightInfo.getB();
if (endHeight != null && endHeight > maxEndHeight) {
maxEndHeight = endHeight;
}
}
return maxEndHeight;
}
public void invalidateFileListCache() {
this.fileListCache = null;
}

View File

@@ -6,10 +6,13 @@ import org.apache.logging.log4j.Logger;
import org.qortal.block.Block;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.data.at.ATStateData;
import org.qortal.data.block.BlockArchiveData;
import org.qortal.data.block.BlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.settings.Settings;
import org.qortal.transform.TransformationException;
import org.qortal.transform.block.BlockTransformation;
import org.qortal.transform.block.BlockTransformer;
import java.io.ByteArrayOutputStream;
@@ -18,6 +21,7 @@ import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
public class BlockArchiveWriter {
@@ -28,25 +32,71 @@ public class BlockArchiveWriter {
BLOCK_NOT_FOUND
}
public enum BlockArchiveDataSource {
BLOCK_REPOSITORY, // To build an archive from the Blocks table
BLOCK_ARCHIVE // To build a new archive from an existing archive
}
private static final Logger LOGGER = LogManager.getLogger(BlockArchiveWriter.class);
public static final long DEFAULT_FILE_SIZE_TARGET = 100 * 1024 * 1024; // 100MiB
private int startHeight;
private final int endHeight;
private final Integer serializationVersion;
private final Path archivePath;
private final Repository repository;
private long fileSizeTarget = DEFAULT_FILE_SIZE_TARGET;
private boolean shouldEnforceFileSizeTarget = true;
// Default data source to BLOCK_REPOSITORY; can optionally be overridden
private BlockArchiveDataSource dataSource = BlockArchiveDataSource.BLOCK_REPOSITORY;
private boolean shouldLogProgress = false;
private int writtenCount;
private int lastWrittenHeight;
private Path outputPath;
public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) {
/**
* Instantiate a BlockArchiveWriter using a custom archive path
* @param startHeight
* @param endHeight
* @param repository
*/
public BlockArchiveWriter(int startHeight, int endHeight, Integer serializationVersion, Path archivePath, Repository repository) {
this.startHeight = startHeight;
this.endHeight = endHeight;
this.archivePath = archivePath.toAbsolutePath();
this.repository = repository;
if (serializationVersion == null) {
// When serialization version isn't specified, fetch it from the existing archive
serializationVersion = this.findSerializationVersion();
}
this.serializationVersion = serializationVersion;
}
/**
* Instantiate a BlockArchiveWriter using the default archive path and version
* @param startHeight
* @param endHeight
* @param repository
*/
public BlockArchiveWriter(int startHeight, int endHeight, Repository repository) {
this(startHeight, endHeight, null, Paths.get(Settings.getInstance().getRepositoryPath(), "archive"), repository);
}
private int findSerializationVersion() {
// Attempt to fetch the serialization version from the existing archive
Integer block2SerializationVersion = BlockArchiveReader.getInstance().fetchSerializationVersionForHeight(2);
if (block2SerializationVersion != null) {
return block2SerializationVersion;
}
// Default to version specified in settings
return Settings.getInstance().getDefaultArchiveVersion();
}
public static int getMaxArchiveHeight(Repository repository) throws DataException {
@@ -72,8 +122,7 @@ public class BlockArchiveWriter {
public BlockArchiveWriteResult write() throws DataException, IOException, TransformationException, InterruptedException {
// Create the archive folder if it doesn't exist
// This is a subfolder of the db directory, to make bootstrapping easier
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
// This is generally a subfolder of the db directory, to make bootstrapping easier
try {
Files.createDirectories(archivePath);
} catch (IOException e) {
@@ -95,13 +144,13 @@ public class BlockArchiveWriter {
LOGGER.info(String.format("Fetching blocks from height %d...", startHeight));
int i = 0;
while (headerBytes.size() + bytes.size() < this.fileSizeTarget
|| this.shouldEnforceFileSizeTarget == false) {
while (headerBytes.size() + bytes.size() < this.fileSizeTarget) {
if (Controller.isStopping()) {
return BlockArchiveWriteResult.STOPPING;
}
if (Synchronizer.getInstance().isSynchronizing()) {
Thread.sleep(1000L);
continue;
}
@@ -112,7 +161,28 @@ public class BlockArchiveWriter {
//LOGGER.info("Fetching block {}...", currentHeight);
BlockData blockData = repository.getBlockRepository().fromHeight(currentHeight);
BlockData blockData = null;
List<TransactionData> transactions = null;
List<ATStateData> atStates = null;
byte[] atStatesHash = null;
switch (this.dataSource) {
case BLOCK_ARCHIVE:
BlockTransformation archivedBlock = BlockArchiveReader.getInstance().fetchBlockAtHeight(currentHeight);
if (archivedBlock != null) {
blockData = archivedBlock.getBlockData();
transactions = archivedBlock.getTransactions();
atStates = archivedBlock.getAtStates();
atStatesHash = archivedBlock.getAtStatesHash();
}
break;
case BLOCK_REPOSITORY:
default:
blockData = repository.getBlockRepository().fromHeight(currentHeight);
break;
}
if (blockData == null) {
return BlockArchiveWriteResult.BLOCK_NOT_FOUND;
}
@@ -122,18 +192,50 @@ public class BlockArchiveWriter {
repository.getBlockArchiveRepository().save(blockArchiveData);
repository.saveChanges();
// Build the block
Block block;
if (atStatesHash != null) {
block = new Block(repository, blockData, transactions, atStatesHash);
}
else if (atStates != null) {
block = new Block(repository, blockData, transactions, atStates);
}
else {
block = new Block(repository, blockData);
}
// Write the block data to some byte buffers
Block block = new Block(repository, blockData);
int blockIndex = bytes.size();
// Write block index to header
headerBytes.write(Ints.toByteArray(blockIndex));
// Write block height
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
byte[] blockBytes = BlockTransformer.toBytes(block);
// Get serialized block bytes
byte[] blockBytes;
switch (serializationVersion) {
case 1:
blockBytes = BlockTransformer.toBytes(block);
break;
case 2:
blockBytes = BlockTransformer.toBytesV2(block);
break;
default:
throw new DataException("Invalid serialization version");
}
// Write block length
bytes.write(Ints.toByteArray(blockBytes.length));
// Write block bytes
bytes.write(blockBytes);
// Log every 1000 blocks
if (this.shouldLogProgress && i % 1000 == 0) {
LOGGER.info("Archived up to block height {}. Size of current file: {} bytes", currentHeight, (headerBytes.size() + bytes.size()));
}
i++;
}
@@ -147,11 +249,10 @@ public class BlockArchiveWriter {
// We have enough blocks to create a new file
int endHeight = startHeight + i - 1;
int version = 1;
String filePath = String.format("%s/%d-%d.dat", archivePath.toString(), startHeight, endHeight);
FileOutputStream fileOutputStream = new FileOutputStream(filePath);
// Write version number
fileOutputStream.write(Ints.toByteArray(version));
fileOutputStream.write(Ints.toByteArray(serializationVersion));
// Write start height
fileOutputStream.write(Ints.toByteArray(startHeight));
// Write end height
@@ -199,4 +300,12 @@ public class BlockArchiveWriter {
this.shouldEnforceFileSizeTarget = shouldEnforceFileSizeTarget;
}
public void setDataSource(BlockArchiveDataSource dataSource) {
this.dataSource = dataSource;
}
public void setShouldLogProgress(boolean shouldLogProgress) {
this.shouldLogProgress = shouldLogProgress;
}
}

View File

@@ -2,11 +2,6 @@ package org.qortal.repository;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.gui.SplashFrame;
import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving;
import org.qortal.repository.hsqldb.HSQLDBDatabasePruning;
import org.qortal.repository.hsqldb.HSQLDBRepository;
import org.qortal.settings.Settings;
import java.sql.SQLException;
import java.util.concurrent.TimeoutException;
@@ -61,62 +56,6 @@ public abstract class RepositoryManager {
}
}
public static boolean archive(Repository repository) {
if (Settings.getInstance().isLite()) {
// Lite nodes have no blockchain
return false;
}
// Bulk archive the database the first time we use archive mode
if (Settings.getInstance().isArchiveEnabled()) {
if (RepositoryManager.canArchiveOrPrune()) {
try {
return HSQLDBDatabaseArchiving.buildBlockArchive(repository, BlockArchiveWriter.DEFAULT_FILE_SIZE_TARGET);
} catch (DataException e) {
LOGGER.info("Unable to build block archive. The database may have been left in an inconsistent state.");
}
}
else {
LOGGER.info("Unable to build block archive due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
LOGGER.info("To bootstrap, stop the core and delete the db folder, then start the core again.");
SplashFrame.getInstance().updateStatus("Missing index. Bootstrapping is recommended.");
}
}
return false;
}
public static boolean prune(Repository repository) {
if (Settings.getInstance().isLite()) {
// Lite nodes have no blockchain
return false;
}
// Bulk prune the database the first time we use top-only or block archive mode
if (Settings.getInstance().isTopOnly() ||
Settings.getInstance().isArchiveEnabled()) {
if (RepositoryManager.canArchiveOrPrune()) {
try {
boolean prunedATStates = HSQLDBDatabasePruning.pruneATStates((HSQLDBRepository) repository);
boolean prunedBlocks = HSQLDBDatabasePruning.pruneBlocks((HSQLDBRepository) repository);
// Perform repository maintenance to shrink the db size down
if (prunedATStates && prunedBlocks) {
HSQLDBDatabasePruning.performMaintenance(repository);
return true;
}
} catch (SQLException | DataException e) {
LOGGER.info("Unable to bulk prune AT states. The database may have been left in an inconsistent state.");
}
}
else {
LOGGER.info("Unable to prune blocks due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
}
}
return false;
}
public static void setRequestedCheckpoint(Boolean quick) {
quickCheckpointRequested = quick;
}

View File

@@ -1,88 +0,0 @@
package org.qortal.repository.hsqldb;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
import org.qortal.gui.SplashFrame;
import org.qortal.repository.BlockArchiveWriter;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.transform.TransformationException;
import java.io.IOException;
/**
*
* When switching to an archiving node, we need to archive most of the database contents.
* This involves copying its data into flat files.
* If we do this entirely as a background process, it is very slow and can interfere with syncing.
* However, if we take the approach of doing this in bulk, before starting up the rest of the
* processes, this makes it much faster and less invasive.
*
* From that point, the original background archiving process will run, but can be dialled right down
* so not to interfere with syncing.
*
*/
public class HSQLDBDatabaseArchiving {
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabaseArchiving.class);
public static boolean buildBlockArchive(Repository repository, long fileSizeTarget) throws DataException {
// Only build the archive if we haven't already got one that is up to date
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
if (upToDate) {
// Already archived
return false;
}
LOGGER.info("Building block archive - this process could take a while...");
SplashFrame.getInstance().updateStatus("Building block archive...");
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
int startHeight = 0;
while (!Controller.isStopping()) {
try {
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
writer.setFileSizeTarget(fileSizeTarget);
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
switch (result) {
case OK:
// Increment block archive height
startHeight = writer.getLastWrittenHeight() + 1;
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
repository.saveChanges();
break;
case STOPPING:
return false;
case NOT_ENOUGH_BLOCKS:
// We've reached the limit of the blocks we can archive
// Return from the whole method
return true;
case BLOCK_NOT_FOUND:
// We tried to archive a block that didn't exist. This is a major failure and likely means
// that a bootstrap or re-sync is needed. Return rom the method
LOGGER.info("Error: block not found when building archive. If this error persists, " +
"a bootstrap or re-sync may be needed.");
return false;
}
} catch (IOException | TransformationException | InterruptedException e) {
LOGGER.info("Caught exception when creating block cache", e);
return false;
}
}
// If we got this far then something went wrong (most likely the app is stopping)
return false;
}
}

View File

@@ -1,332 +0,0 @@
package org.qortal.repository.hsqldb;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
import org.qortal.data.block.BlockData;
import org.qortal.gui.SplashFrame;
import org.qortal.repository.BlockArchiveWriter;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.settings.Settings;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.concurrent.TimeoutException;
/**
*
* When switching from a full node to a pruning node, we need to delete most of the database contents.
* If we do this entirely as a background process, it is very slow and can interfere with syncing.
* However, if we take the approach of transferring only the necessary rows to a new table and then
* deleting the original table, this makes the process much faster. It was taking several days to
* delete the AT states in the background, but only a couple of minutes to copy them to a new table.
*
* The trade off is that we have to go through a form of "reshape" when starting the app for the first
* time after enabling pruning mode. But given that this is an opt-in mode, I don't think it will be
* a problem.
*
* Once the pruning is complete, it automatically performs a CHECKPOINT DEFRAG in order to
* shrink the database file size down to a fraction of what it was before.
*
* From this point, the original background process will run, but can be dialled right down so not
* to interfere with syncing.
*
*/
public class HSQLDBDatabasePruning {
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabasePruning.class);
public static boolean pruneATStates(HSQLDBRepository repository) throws SQLException, DataException {
// Only bulk prune AT states if we have never done so before
int pruneHeight = repository.getATRepository().getAtPruneHeight();
if (pruneHeight > 0) {
// Already pruned AT states
return false;
}
if (Settings.getInstance().isArchiveEnabled()) {
// Only proceed if we can see that the archiver has already finished
// This way, if the archiver failed for any reason, we can prune once it has had
// some opportunities to try again
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
if (!upToDate) {
return false;
}
}
LOGGER.info("Starting bulk prune of AT states - this process could take a while... " +
"(approx. 2 mins on high spec, or upwards of 30 mins in some cases)");
SplashFrame.getInstance().updateStatus("Pruning database (takes up to 30 mins)...");
// Create new AT-states table to hold smaller dataset
repository.executeCheckedUpdate("DROP TABLE IF EXISTS ATStatesNew");
repository.executeCheckedUpdate("CREATE TABLE ATStatesNew ("
+ "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, "
+ "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, "
+ "PRIMARY KEY (AT_address, height), "
+ "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)");
repository.executeCheckedUpdate("SET TABLE ATStatesNew NEW SPACE");
repository.executeCheckedUpdate("CHECKPOINT");
// Add a height index
LOGGER.info("Adding index to AT states table...");
repository.executeCheckedUpdate("CREATE INDEX IF NOT EXISTS ATStatesNewHeightIndex ON ATStatesNew (height)");
repository.executeCheckedUpdate("CHECKPOINT");
// Find our latest block
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
if (latestBlock == null) {
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
return false;
}
// Calculate some constants for later use
final int blockchainHeight = latestBlock.getHeight();
int maximumBlockToTrim = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
if (Settings.getInstance().isArchiveEnabled()) {
// Archive mode - don't prune anything that hasn't been archived yet
maximumBlockToTrim = Math.min(maximumBlockToTrim, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
}
final int endHeight = blockchainHeight;
final int blockStep = 10000;
// It's essential that we rebuild the latest AT states here, as we are using this data in the next query.
// Failing to do this will result in important AT states being deleted, rendering the database unusable.
repository.getATRepository().rebuildLatestAtStates(endHeight);
// Loop through all the LatestATStates and copy them to the new table
LOGGER.info("Copying AT states...");
for (int height = 0; height < endHeight; height += blockStep) {
final int batchEndHeight = height + blockStep - 1;
//LOGGER.info(String.format("Copying AT states between %d and %d...", height, batchEndHeight));
String sql = "SELECT height, AT_address FROM LatestATStates WHERE height BETWEEN ? AND ?";
try (ResultSet latestAtStatesResultSet = repository.checkedExecute(sql, height, batchEndHeight)) {
if (latestAtStatesResultSet != null) {
do {
int latestAtHeight = latestAtStatesResultSet.getInt(1);
String latestAtAddress = latestAtStatesResultSet.getString(2);
// Copy this latest ATState to the new table
//LOGGER.info(String.format("Copying AT %s at height %d...", latestAtAddress, latestAtHeight));
try {
String updateSql = "INSERT INTO ATStatesNew ("
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
+ "FROM ATStates "
+ "WHERE height = ? AND AT_address = ?)";
repository.executeCheckedUpdate(updateSql, latestAtHeight, latestAtAddress);
} catch (SQLException e) {
repository.examineException(e);
throw new DataException("Unable to copy ATStates", e);
}
// If this batch includes blocks after the maximum block to trim, we will need to copy
// each of its AT states above maximumBlockToTrim as they are considered "recent". We
// need to do this for _all_ AT states in these blocks, regardless of their latest state.
if (batchEndHeight >= maximumBlockToTrim) {
// Now copy this AT's states for each recent block they are present in
for (int i = maximumBlockToTrim; i < endHeight; i++) {
if (latestAtHeight < i) {
// This AT finished before this block so there is nothing to copy
continue;
}
//LOGGER.info(String.format("Copying recent AT %s at height %d...", latestAtAddress, i));
try {
// Copy each LatestATState to the new table
String updateSql = "INSERT IGNORE INTO ATStatesNew ("
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
+ "FROM ATStates "
+ "WHERE height = ? AND AT_address = ?)";
repository.executeCheckedUpdate(updateSql, i, latestAtAddress);
} catch (SQLException e) {
repository.examineException(e);
throw new DataException("Unable to copy ATStates", e);
}
}
}
repository.saveChanges();
} while (latestAtStatesResultSet.next());
}
} catch (SQLException e) {
throw new DataException("Unable to copy AT states", e);
}
}
// Finally, drop the original table and rename
LOGGER.info("Deleting old AT states...");
repository.executeCheckedUpdate("DROP TABLE ATStates");
repository.executeCheckedUpdate("ALTER TABLE ATStatesNew RENAME TO ATStates");
repository.executeCheckedUpdate("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex");
repository.executeCheckedUpdate("CHECKPOINT");
// Update the prune height
int nextPruneHeight = maximumBlockToTrim + 1;
repository.getATRepository().setAtPruneHeight(nextPruneHeight);
repository.saveChanges();
repository.executeCheckedUpdate("CHECKPOINT");
// Now prune/trim the ATStatesData, as this currently goes back over a month
return HSQLDBDatabasePruning.pruneATStateData(repository);
}
/*
* Bulk prune ATStatesData to catch up with the now pruned ATStates table
* This uses the existing AT States trimming code but with a much higher end block
*/
private static boolean pruneATStateData(Repository repository) throws DataException {
if (Settings.getInstance().isArchiveEnabled()) {
// Don't prune ATStatesData in archive mode
return true;
}
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
if (latestBlock == null) {
LOGGER.info("Unable to determine blockchain height, necessary for bulk ATStatesData pruning");
return false;
}
final int blockchainHeight = latestBlock.getHeight();
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
// ATStateData is already trimmed - so carry on from where we left off in the past
int pruneStartHeight = repository.getATRepository().getAtTrimHeight();
LOGGER.info("Starting bulk prune of AT states data - this process could take a while... (approx. 3 mins on high spec)");
while (pruneStartHeight < upperPrunableHeight) {
// Prune all AT state data up until our latest minus pruneBlockLimit (or our archive height)
if (Controller.isStopping()) {
return false;
}
// Override batch size in the settings because this is a one-off process
final int batchSize = 1000;
final int rowLimitPerBatch = 50000;
int upperBatchHeight = pruneStartHeight + batchSize;
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
LOGGER.trace(String.format("Pruning AT states data between %d and %d...", pruneStartHeight, upperPruneHeight));
int numATStatesPruned = repository.getATRepository().trimAtStates(pruneStartHeight, upperPruneHeight, rowLimitPerBatch);
repository.saveChanges();
if (numATStatesPruned > 0) {
LOGGER.trace(String.format("Pruned %d AT states data rows between blocks %d and %d",
numATStatesPruned, pruneStartHeight, upperPruneHeight));
} else {
repository.getATRepository().setAtTrimHeight(upperBatchHeight);
// No need to rebuild the latest AT states as we aren't currently synchronizing
repository.saveChanges();
LOGGER.debug(String.format("Bumping AT states trim height to %d", upperBatchHeight));
// Can we move onto next batch?
if (upperPrunableHeight > upperBatchHeight) {
pruneStartHeight = upperBatchHeight;
}
else {
// We've finished pruning
break;
}
}
}
return true;
}
public static boolean pruneBlocks(Repository repository) throws SQLException, DataException {
// Only bulk prune AT states if we have never done so before
int pruneHeight = repository.getBlockRepository().getBlockPruneHeight();
if (pruneHeight > 0) {
// Already pruned blocks
return false;
}
if (Settings.getInstance().isArchiveEnabled()) {
// Only proceed if we can see that the archiver has already finished
// This way, if the archiver failed for any reason, we can prune once it has had
// some opportunities to try again
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
if (!upToDate) {
return false;
}
}
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
if (latestBlock == null) {
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
return false;
}
final int blockchainHeight = latestBlock.getHeight();
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
int pruneStartHeight = 0;
if (Settings.getInstance().isArchiveEnabled()) {
// Archive mode - don't prune anything that hasn't been archived yet
upperPrunableHeight = Math.min(upperPrunableHeight, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
}
LOGGER.info("Starting bulk prune of blocks - this process could take a while... (approx. 5 mins on high spec)");
while (pruneStartHeight < upperPrunableHeight) {
// Prune all blocks up until our latest minus pruneBlockLimit
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
if (numBlocksPruned > 0) {
LOGGER.info(String.format("Pruned %d block%s between %d and %d",
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
pruneStartHeight, upperPruneHeight));
} else {
final int nextPruneHeight = upperPruneHeight + 1;
repository.getBlockRepository().setBlockPruneHeight(nextPruneHeight);
repository.saveChanges();
LOGGER.debug(String.format("Bumping block base prune height to %d", nextPruneHeight));
// Can we move onto next batch?
if (upperPrunableHeight > nextPruneHeight) {
pruneStartHeight = nextPruneHeight;
}
else {
// We've finished pruning
break;
}
}
}
return true;
}
public static void performMaintenance(Repository repository) throws SQLException, DataException {
try {
SplashFrame.getInstance().updateStatus("Performing maintenance...");
// Timeout if the database isn't ready for backing up after 5 minutes
// Nothing else should be using the db at this point, so a timeout shouldn't happen
long timeout = 5 * 60 * 1000L;
repository.performPeriodicMaintenance(timeout);
} catch (TimeoutException e) {
LOGGER.info("Attempt to perform maintenance failed due to timeout: {}", e.getMessage());
}
}
}

View File

@@ -178,6 +178,8 @@ public class Settings {
private boolean archiveEnabled = true;
/** How often to attempt archiving (ms). */
private long archiveInterval = 7171L; // milliseconds
/** Serialization version to use when building an archive */
private int defaultArchiveVersion = 1;
/** Whether to automatically bootstrap instead of syncing from genesis */
@@ -927,6 +929,10 @@ public class Settings {
return this.archiveInterval;
}
public int getDefaultArchiveVersion() {
return this.defaultArchiveVersion;
}
public boolean getBootstrap() {
return this.bootstrap;

View File

@@ -312,16 +312,24 @@ public class BlockTransformer extends Transformer {
ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength);
long atFees = 0;
for (ATStateData atStateData : block.getATStates()) {
// Skip initial states generated by DEPLOY_AT transactions in the same block
if (atStateData.isInitial())
continue;
if (block.getAtStatesHash() != null) {
// We already have the AT states hash
atFees = blockData.getATFees();
atHashBytes.write(block.getAtStatesHash());
}
else {
// We need to build the AT states hash
for (ATStateData atStateData : block.getATStates()) {
// Skip initial states generated by DEPLOY_AT transactions in the same block
if (atStateData.isInitial())
continue;
atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8));
atHashBytes.write(atStateData.getStateHash());
atHashBytes.write(Longs.toByteArray(atStateData.getFees()));
atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8));
atHashBytes.write(atStateData.getStateHash());
atHashBytes.write(Longs.toByteArray(atStateData.getFees()));
atFees += atStateData.getFees();
atFees += atStateData.getFees();
}
}
bytes.write(Ints.toByteArray(blockData.getATCount()));

View File

@@ -21,6 +21,16 @@ public class BlockArchiveUtils {
* into the HSQLDB, in order to make it SQL-compatible
* again.
* <p>
* This is only fully compatible with archives that use
* serialization version 1. For version 2 (or above),
* we are unable to import individual AT states as we
* only have a single combined hash, so the use cases
* for this are greatly limited.
* <p>
* A version 1 archive should ultimately be rebuildable
* via a resync or reindex from genesis, allowing
* access to this feature once again.
* <p>
* Note: calls discardChanges() and saveChanges(), so
* make sure that you commit any existing repository
* changes before calling this method.
@@ -61,9 +71,18 @@ public class BlockArchiveUtils {
repository.getBlockRepository().save(blockInfo.getBlockData());
// Save AT state data hashes
for (ATStateData atStateData : blockInfo.getAtStates()) {
atStateData.setHeight(blockInfo.getBlockData().getHeight());
repository.getATRepository().save(atStateData);
if (blockInfo.getAtStates() != null) {
for (ATStateData atStateData : blockInfo.getAtStates()) {
atStateData.setHeight(blockInfo.getBlockData().getHeight());
repository.getATRepository().save(atStateData);
}
}
else {
// We don't have AT state hashes, so we are only importing a partial state.
// This can still be useful to allow orphaning to very old blocks, when we
// need to access other chainstate info (such as balances) at an earlier block.
// In order to do this, the orphan process must be temporarily adjusted to avoid
// orphaning AT states, as it will otherwise fail due to having no previous state.
}
} catch (DataException e) {