forked from Qortal/qortal
Pass a repository instance into the bulk archiving and pruning methods.
This is a better approach than opening a new session for each, and it makes it easier to write unit tests.
This commit is contained in:
parent
ce5bc80347
commit
0d17f02191
@ -2,8 +2,11 @@ package org.qortal.controller;
|
|||||||
|
|
||||||
import java.awt.TrayIcon.MessageType;
|
import java.awt.TrayIcon.MessageType;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
import java.security.SecureRandom;
|
import java.security.SecureRandom;
|
||||||
import java.security.Security;
|
import java.security.Security;
|
||||||
import java.time.LocalDateTime;
|
import java.time.LocalDateTime;
|
||||||
@ -409,8 +412,11 @@ public class Controller extends Thread {
|
|||||||
try {
|
try {
|
||||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||||
RepositoryManager.archive();
|
|
||||||
RepositoryManager.prune();
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
RepositoryManager.archive(repository);
|
||||||
|
RepositoryManager.prune(repository);
|
||||||
|
}
|
||||||
} catch (DataException e) {
|
} catch (DataException e) {
|
||||||
// If exception has no cause then repository is in use by some other process.
|
// If exception has no cause then repository is in use by some other process.
|
||||||
if (e.getCause() == null) {
|
if (e.getCause() == null) {
|
||||||
|
@ -4,6 +4,7 @@ import org.apache.logging.log4j.LogManager;
|
|||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving;
|
import org.qortal.repository.hsqldb.HSQLDBDatabaseArchiving;
|
||||||
import org.qortal.repository.hsqldb.HSQLDBDatabasePruning;
|
import org.qortal.repository.hsqldb.HSQLDBDatabasePruning;
|
||||||
|
import org.qortal.repository.hsqldb.HSQLDBRepository;
|
||||||
import org.qortal.settings.Settings;
|
import org.qortal.settings.Settings;
|
||||||
|
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
@ -58,12 +59,12 @@ public abstract class RepositoryManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean archive() {
|
public static boolean archive(Repository repository) {
|
||||||
// Bulk archive the database the first time we use archive mode
|
// Bulk archive the database the first time we use archive mode
|
||||||
if (Settings.getInstance().isArchiveEnabled()) {
|
if (Settings.getInstance().isArchiveEnabled()) {
|
||||||
if (RepositoryManager.canArchiveOrPrune()) {
|
if (RepositoryManager.canArchiveOrPrune()) {
|
||||||
try {
|
try {
|
||||||
return HSQLDBDatabaseArchiving.buildBlockArchive();
|
return HSQLDBDatabaseArchiving.buildBlockArchive(repository);
|
||||||
|
|
||||||
} catch (DataException e) {
|
} catch (DataException e) {
|
||||||
LOGGER.info("Unable to build block archive. The database may have been left in an inconsistent state.");
|
LOGGER.info("Unable to build block archive. The database may have been left in an inconsistent state.");
|
||||||
@ -76,18 +77,18 @@ public abstract class RepositoryManager {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean prune() {
|
public static boolean prune(Repository repository) {
|
||||||
// Bulk prune the database the first time we use pruning mode
|
// Bulk prune the database the first time we use pruning mode
|
||||||
if (Settings.getInstance().isPruningEnabled() ||
|
if (Settings.getInstance().isPruningEnabled() ||
|
||||||
Settings.getInstance().isArchiveEnabled()) {
|
Settings.getInstance().isArchiveEnabled()) {
|
||||||
if (RepositoryManager.canArchiveOrPrune()) {
|
if (RepositoryManager.canArchiveOrPrune()) {
|
||||||
try {
|
try {
|
||||||
boolean prunedATStates = HSQLDBDatabasePruning.pruneATStates();
|
boolean prunedATStates = HSQLDBDatabasePruning.pruneATStates((HSQLDBRepository) repository);
|
||||||
boolean prunedBlocks = HSQLDBDatabasePruning.pruneBlocks();
|
boolean prunedBlocks = HSQLDBDatabasePruning.pruneBlocks((HSQLDBRepository) repository);
|
||||||
|
|
||||||
// Perform repository maintenance to shrink the db size down
|
// Perform repository maintenance to shrink the db size down
|
||||||
if (prunedATStates && prunedBlocks) {
|
if (prunedATStates && prunedBlocks) {
|
||||||
HSQLDBDatabasePruning.performMaintenance();
|
HSQLDBDatabasePruning.performMaintenance(repository);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ import org.apache.logging.log4j.Logger;
|
|||||||
import org.qortal.controller.Controller;
|
import org.qortal.controller.Controller;
|
||||||
import org.qortal.repository.BlockArchiveWriter;
|
import org.qortal.repository.BlockArchiveWriter;
|
||||||
import org.qortal.repository.DataException;
|
import org.qortal.repository.DataException;
|
||||||
|
import org.qortal.repository.Repository;
|
||||||
import org.qortal.repository.RepositoryManager;
|
import org.qortal.repository.RepositoryManager;
|
||||||
import org.qortal.transform.TransformationException;
|
import org.qortal.transform.TransformationException;
|
||||||
|
|
||||||
@ -29,53 +30,51 @@ public class HSQLDBDatabaseArchiving {
|
|||||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabaseArchiving.class);
|
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabaseArchiving.class);
|
||||||
|
|
||||||
|
|
||||||
public static boolean buildBlockArchive() throws DataException {
|
public static boolean buildBlockArchive(Repository repository) throws DataException {
|
||||||
try (final HSQLDBRepository repository = (HSQLDBRepository)RepositoryManager.getRepository()) {
|
|
||||||
|
|
||||||
// Only build the archive if we have never done so before
|
// Only build the archive if we have never done so before
|
||||||
int archiveHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
int archiveHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||||
if (archiveHeight > 0) {
|
if (archiveHeight > 0) {
|
||||||
// Already archived
|
// Already archived
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOGGER.info("Building block archive - this process could take a while... (approx. 15 mins on high spec)");
|
LOGGER.info("Building block archive - this process could take a while... (approx. 15 mins on high spec)");
|
||||||
|
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
int startHeight = 0;
|
int startHeight = 0;
|
||||||
|
|
||||||
while (!Controller.isStopping()) {
|
while (!Controller.isStopping()) {
|
||||||
try {
|
try {
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(startHeight, maximumArchiveHeight, repository);
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
switch (result) {
|
switch (result) {
|
||||||
case OK:
|
case OK:
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
startHeight += writer.getWrittenCount();
|
startHeight += writer.getWrittenCount();
|
||||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
|
repository.getBlockArchiveRepository().setBlockArchiveHeight(startHeight);
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case STOPPING:
|
case STOPPING:
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
case NOT_ENOUGH_BLOCKS:
|
case NOT_ENOUGH_BLOCKS:
|
||||||
// We've reached the limit of the blocks we can archive
|
// We've reached the limit of the blocks we can archive
|
||||||
// Return from the whole method
|
// Return from the whole method
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
case BLOCK_NOT_FOUND:
|
case BLOCK_NOT_FOUND:
|
||||||
// We tried to archive a block that didn't exist. This is a major failure and likely means
|
// We tried to archive a block that didn't exist. This is a major failure and likely means
|
||||||
// that a bootstrap or re-sync is needed. Return rom the method
|
// that a bootstrap or re-sync is needed. Return rom the method
|
||||||
LOGGER.info("Error: block not found when building archive. If this error persists, " +
|
LOGGER.info("Error: block not found when building archive. If this error persists, " +
|
||||||
"a bootstrap or re-sync may be needed.");
|
"a bootstrap or re-sync may be needed.");
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
|
|
||||||
} catch (IOException | TransformationException | InterruptedException e) {
|
|
||||||
LOGGER.info("Caught exception when creating block cache", e);
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} catch (IOException | TransformationException | InterruptedException e) {
|
||||||
|
LOGGER.info("Caught exception when creating block cache", e);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ import org.qortal.controller.Controller;
|
|||||||
import org.qortal.data.block.BlockData;
|
import org.qortal.data.block.BlockData;
|
||||||
import org.qortal.repository.BlockArchiveWriter;
|
import org.qortal.repository.BlockArchiveWriter;
|
||||||
import org.qortal.repository.DataException;
|
import org.qortal.repository.DataException;
|
||||||
|
import org.qortal.repository.Repository;
|
||||||
import org.qortal.repository.RepositoryManager;
|
import org.qortal.repository.RepositoryManager;
|
||||||
import org.qortal.settings.Settings;
|
import org.qortal.settings.Settings;
|
||||||
|
|
||||||
@ -38,286 +39,278 @@ public class HSQLDBDatabasePruning {
|
|||||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabasePruning.class);
|
private static final Logger LOGGER = LogManager.getLogger(HSQLDBDatabasePruning.class);
|
||||||
|
|
||||||
|
|
||||||
public static boolean pruneATStates() throws SQLException, DataException {
|
public static boolean pruneATStates(HSQLDBRepository repository) throws SQLException, DataException {
|
||||||
try (final HSQLDBRepository repository = (HSQLDBRepository)RepositoryManager.getRepository()) {
|
|
||||||
|
|
||||||
// Only bulk prune AT states if we have never done so before
|
// Only bulk prune AT states if we have never done so before
|
||||||
int pruneHeight = repository.getATRepository().getAtPruneHeight();
|
int pruneHeight = repository.getATRepository().getAtPruneHeight();
|
||||||
if (pruneHeight > 0) {
|
if (pruneHeight > 0) {
|
||||||
// Already pruned AT states
|
// Already pruned AT states
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Settings.getInstance().isArchiveEnabled()) {
|
||||||
|
// Only proceed if we can see that the archiver has already finished
|
||||||
|
// This way, if the archiver failed for any reason, we can prune once it has had
|
||||||
|
// some opportunities to try again
|
||||||
|
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
|
||||||
|
if (!upToDate) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (Settings.getInstance().isArchiveEnabled()) {
|
LOGGER.info("Starting bulk prune of AT states - this process could take a while... " +
|
||||||
// Only proceed if we can see that the archiver has already finished
|
"(approx. 2 mins on high spec, or upwards of 30 mins in some cases)");
|
||||||
// This way, if the archiver failed for any reason, we can prune once it has had
|
|
||||||
// some opportunities to try again
|
|
||||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
|
|
||||||
if (!upToDate) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LOGGER.info("Starting bulk prune of AT states - this process could take a while... " +
|
// Create new AT-states table to hold smaller dataset
|
||||||
"(approx. 2 mins on high spec, or upwards of 30 mins in some cases)");
|
repository.executeCheckedUpdate("DROP TABLE IF EXISTS ATStatesNew");
|
||||||
|
repository.executeCheckedUpdate("CREATE TABLE ATStatesNew ("
|
||||||
|
+ "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, "
|
||||||
|
+ "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, "
|
||||||
|
+ "PRIMARY KEY (AT_address, height), "
|
||||||
|
+ "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)");
|
||||||
|
repository.executeCheckedUpdate("SET TABLE ATStatesNew NEW SPACE");
|
||||||
|
repository.executeCheckedUpdate("CHECKPOINT");
|
||||||
|
|
||||||
// Create new AT-states table to hold smaller dataset
|
// Add a height index
|
||||||
repository.executeCheckedUpdate("DROP TABLE IF EXISTS ATStatesNew");
|
LOGGER.info("Adding index to AT states table...");
|
||||||
repository.executeCheckedUpdate("CREATE TABLE ATStatesNew ("
|
repository.executeCheckedUpdate("CREATE INDEX IF NOT EXISTS ATStatesNewHeightIndex ON ATStatesNew (height)");
|
||||||
+ "AT_address QortalAddress, height INTEGER NOT NULL, state_hash ATStateHash NOT NULL, "
|
repository.executeCheckedUpdate("CHECKPOINT");
|
||||||
+ "fees QortalAmount NOT NULL, is_initial BOOLEAN NOT NULL, sleep_until_message_timestamp BIGINT, "
|
|
||||||
+ "PRIMARY KEY (AT_address, height), "
|
|
||||||
+ "FOREIGN KEY (AT_address) REFERENCES ATs (AT_address) ON DELETE CASCADE)");
|
|
||||||
repository.executeCheckedUpdate("SET TABLE ATStatesNew NEW SPACE");
|
|
||||||
repository.executeCheckedUpdate("CHECKPOINT");
|
|
||||||
|
|
||||||
// Add a height index
|
|
||||||
LOGGER.info("Adding index to AT states table...");
|
|
||||||
repository.executeCheckedUpdate("CREATE INDEX IF NOT EXISTS ATStatesNewHeightIndex ON ATStatesNew (height)");
|
|
||||||
repository.executeCheckedUpdate("CHECKPOINT");
|
|
||||||
|
|
||||||
|
|
||||||
// Find our latest block
|
// Find our latest block
|
||||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
||||||
if (latestBlock == null) {
|
if (latestBlock == null) {
|
||||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
|
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate some constants for later use
|
// Calculate some constants for later use
|
||||||
final int blockchainHeight = latestBlock.getHeight();
|
final int blockchainHeight = latestBlock.getHeight();
|
||||||
int maximumBlockToTrim = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
int maximumBlockToTrim = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
||||||
if (Settings.getInstance().isArchiveEnabled()) {
|
if (Settings.getInstance().isArchiveEnabled()) {
|
||||||
// Archive mode - don't prune anything that hasn't been archived yet
|
// Archive mode - don't prune anything that hasn't been archived yet
|
||||||
maximumBlockToTrim = Math.min(maximumBlockToTrim, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
|
maximumBlockToTrim = Math.min(maximumBlockToTrim, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
|
||||||
}
|
}
|
||||||
final int startHeight = maximumBlockToTrim;
|
final int startHeight = maximumBlockToTrim;
|
||||||
final int endHeight = blockchainHeight;
|
final int endHeight = blockchainHeight;
|
||||||
final int blockStep = 10000;
|
final int blockStep = 10000;
|
||||||
|
|
||||||
|
|
||||||
// It's essential that we rebuild the latest AT states here, as we are using this data in the next query.
|
// It's essential that we rebuild the latest AT states here, as we are using this data in the next query.
|
||||||
// Failing to do this will result in important AT states being deleted, rendering the database unusable.
|
// Failing to do this will result in important AT states being deleted, rendering the database unusable.
|
||||||
repository.getATRepository().rebuildLatestAtStates();
|
repository.getATRepository().rebuildLatestAtStates();
|
||||||
|
|
||||||
|
|
||||||
// Loop through all the LatestATStates and copy them to the new table
|
// Loop through all the LatestATStates and copy them to the new table
|
||||||
LOGGER.info("Copying AT states...");
|
LOGGER.info("Copying AT states...");
|
||||||
for (int height = 0; height < endHeight; height += blockStep) {
|
for (int height = 0; height < endHeight; height += blockStep) {
|
||||||
//LOGGER.info(String.format("Copying AT states between %d and %d...", height, height + blockStep - 1));
|
//LOGGER.info(String.format("Copying AT states between %d and %d...", height, height + blockStep - 1));
|
||||||
|
|
||||||
String sql = "SELECT height, AT_address FROM LatestATStates WHERE height BETWEEN ? AND ?";
|
String sql = "SELECT height, AT_address FROM LatestATStates WHERE height BETWEEN ? AND ?";
|
||||||
try (ResultSet latestAtStatesResultSet = repository.checkedExecute(sql, height, height + blockStep - 1)) {
|
try (ResultSet latestAtStatesResultSet = repository.checkedExecute(sql, height, height + blockStep - 1)) {
|
||||||
if (latestAtStatesResultSet != null) {
|
if (latestAtStatesResultSet != null) {
|
||||||
do {
|
do {
|
||||||
int latestAtHeight = latestAtStatesResultSet.getInt(1);
|
int latestAtHeight = latestAtStatesResultSet.getInt(1);
|
||||||
String latestAtAddress = latestAtStatesResultSet.getString(2);
|
String latestAtAddress = latestAtStatesResultSet.getString(2);
|
||||||
|
|
||||||
// Copy this latest ATState to the new table
|
// Copy this latest ATState to the new table
|
||||||
//LOGGER.info(String.format("Copying AT %s at height %d...", latestAtAddress, latestAtHeight));
|
//LOGGER.info(String.format("Copying AT %s at height %d...", latestAtAddress, latestAtHeight));
|
||||||
try {
|
try {
|
||||||
String updateSql = "INSERT INTO ATStatesNew ("
|
String updateSql = "INSERT INTO ATStatesNew ("
|
||||||
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
||||||
+ "FROM ATStates "
|
+ "FROM ATStates "
|
||||||
+ "WHERE height = ? AND AT_address = ?)";
|
+ "WHERE height = ? AND AT_address = ?)";
|
||||||
repository.executeCheckedUpdate(updateSql, latestAtHeight, latestAtAddress);
|
repository.executeCheckedUpdate(updateSql, latestAtHeight, latestAtAddress);
|
||||||
} catch (SQLException e) {
|
} catch (SQLException e) {
|
||||||
repository.examineException(e);
|
repository.examineException(e);
|
||||||
throw new DataException("Unable to copy ATStates", e);
|
throw new DataException("Unable to copy ATStates", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (height >= startHeight) {
|
if (height >= startHeight) {
|
||||||
// Now copy this AT's states for each recent block they is present in
|
// Now copy this AT's states for each recent block they is present in
|
||||||
for (int i = startHeight; i < endHeight; i++) {
|
for (int i = startHeight; i < endHeight; i++) {
|
||||||
if (latestAtHeight < i) {
|
if (latestAtHeight < i) {
|
||||||
// This AT finished before this block so there is nothing to copy
|
// This AT finished before this block so there is nothing to copy
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
//LOGGER.info(String.format("Copying recent AT %s at height %d...", latestAtAddress, i));
|
//LOGGER.info(String.format("Copying recent AT %s at height %d...", latestAtAddress, i));
|
||||||
try {
|
try {
|
||||||
// Copy each LatestATState to the new table
|
// Copy each LatestATState to the new table
|
||||||
String updateSql = "INSERT IGNORE INTO ATStatesNew ("
|
String updateSql = "INSERT IGNORE INTO ATStatesNew ("
|
||||||
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
+ "SELECT AT_address, height, state_hash, fees, is_initial, sleep_until_message_timestamp "
|
||||||
+ "FROM ATStates "
|
+ "FROM ATStates "
|
||||||
+ "WHERE height = ? AND AT_address = ?)";
|
+ "WHERE height = ? AND AT_address = ?)";
|
||||||
repository.executeCheckedUpdate(updateSql, i, latestAtAddress);
|
repository.executeCheckedUpdate(updateSql, i, latestAtAddress);
|
||||||
} catch (SQLException e) {
|
} catch (SQLException e) {
|
||||||
repository.examineException(e);
|
repository.examineException(e);
|
||||||
throw new DataException("Unable to copy ATStates", e);
|
throw new DataException("Unable to copy ATStates", e);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} while (latestAtStatesResultSet.next());
|
} while (latestAtStatesResultSet.next());
|
||||||
}
|
|
||||||
} catch (SQLException e) {
|
|
||||||
throw new DataException("Unable to copy AT states", e);
|
|
||||||
}
|
}
|
||||||
|
} catch (SQLException e) {
|
||||||
|
throw new DataException("Unable to copy AT states", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
repository.saveChanges();
|
|
||||||
|
|
||||||
|
|
||||||
// Finally, drop the original table and rename
|
|
||||||
LOGGER.info("Deleting old AT states...");
|
|
||||||
repository.executeCheckedUpdate("DROP TABLE ATStates");
|
|
||||||
repository.executeCheckedUpdate("ALTER TABLE ATStatesNew RENAME TO ATStates");
|
|
||||||
repository.executeCheckedUpdate("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex");
|
|
||||||
repository.executeCheckedUpdate("CHECKPOINT");
|
|
||||||
|
|
||||||
// Update the prune height
|
|
||||||
repository.getATRepository().setAtPruneHeight(maximumBlockToTrim);
|
|
||||||
repository.saveChanges();
|
|
||||||
|
|
||||||
repository.executeCheckedUpdate("CHECKPOINT");
|
|
||||||
|
|
||||||
// Now prune/trim the ATStatesData, as this currently goes back over a month
|
|
||||||
return HSQLDBDatabasePruning.pruneATStateData();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
repository.saveChanges();
|
||||||
|
|
||||||
|
|
||||||
|
// Finally, drop the original table and rename
|
||||||
|
LOGGER.info("Deleting old AT states...");
|
||||||
|
repository.executeCheckedUpdate("DROP TABLE ATStates");
|
||||||
|
repository.executeCheckedUpdate("ALTER TABLE ATStatesNew RENAME TO ATStates");
|
||||||
|
repository.executeCheckedUpdate("ALTER INDEX ATStatesNewHeightIndex RENAME TO ATStatesHeightIndex");
|
||||||
|
repository.executeCheckedUpdate("CHECKPOINT");
|
||||||
|
|
||||||
|
// Update the prune height
|
||||||
|
repository.getATRepository().setAtPruneHeight(maximumBlockToTrim);
|
||||||
|
repository.saveChanges();
|
||||||
|
|
||||||
|
repository.executeCheckedUpdate("CHECKPOINT");
|
||||||
|
|
||||||
|
// Now prune/trim the ATStatesData, as this currently goes back over a month
|
||||||
|
return HSQLDBDatabasePruning.pruneATStateData(repository);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bulk prune ATStatesData to catch up with the now pruned ATStates table
|
* Bulk prune ATStatesData to catch up with the now pruned ATStates table
|
||||||
* This uses the existing AT States trimming code but with a much higher end block
|
* This uses the existing AT States trimming code but with a much higher end block
|
||||||
*/
|
*/
|
||||||
private static boolean pruneATStateData() throws SQLException, DataException {
|
private static boolean pruneATStateData(Repository repository) throws DataException {
|
||||||
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
|
||||||
|
|
||||||
if (Settings.getInstance().isArchiveEnabled()) {
|
|
||||||
// Don't prune ATStatesData in archive mode
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
|
||||||
if (latestBlock == null) {
|
|
||||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk ATStatesData pruning");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
final int blockchainHeight = latestBlock.getHeight();
|
|
||||||
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
|
||||||
// ATStateData is already trimmed - so carry on from where we left off in the past
|
|
||||||
int pruneStartHeight = repository.getATRepository().getAtTrimHeight();
|
|
||||||
|
|
||||||
LOGGER.info("Starting bulk prune of AT states data - this process could take a while... (approx. 3 mins on high spec)");
|
|
||||||
|
|
||||||
while (pruneStartHeight < upperPrunableHeight) {
|
|
||||||
// Prune all AT state data up until our latest minus pruneBlockLimit (or our archive height)
|
|
||||||
|
|
||||||
if (Controller.isStopping()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Override batch size in the settings because this is a one-off process
|
|
||||||
final int batchSize = 1000;
|
|
||||||
final int rowLimitPerBatch = 50000;
|
|
||||||
int upperBatchHeight = pruneStartHeight + batchSize;
|
|
||||||
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
|
|
||||||
|
|
||||||
LOGGER.trace(String.format("Pruning AT states data between %d and %d...", pruneStartHeight, upperPruneHeight));
|
|
||||||
|
|
||||||
int numATStatesPruned = repository.getATRepository().trimAtStates(pruneStartHeight, upperPruneHeight, rowLimitPerBatch);
|
|
||||||
repository.saveChanges();
|
|
||||||
|
|
||||||
if (numATStatesPruned > 0) {
|
|
||||||
LOGGER.trace(String.format("Pruned %d AT states data rows between blocks %d and %d",
|
|
||||||
numATStatesPruned, pruneStartHeight, upperPruneHeight));
|
|
||||||
} else {
|
|
||||||
repository.getATRepository().setAtTrimHeight(upperBatchHeight);
|
|
||||||
// No need to rebuild the latest AT states as we aren't currently synchronizing
|
|
||||||
repository.saveChanges();
|
|
||||||
LOGGER.debug(String.format("Bumping AT states trim height to %d", upperBatchHeight));
|
|
||||||
|
|
||||||
// Can we move onto next batch?
|
|
||||||
if (upperPrunableHeight > upperBatchHeight) {
|
|
||||||
pruneStartHeight = upperBatchHeight;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// We've finished pruning
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if (Settings.getInstance().isArchiveEnabled()) {
|
||||||
|
// Don't prune ATStatesData in archive mode
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
public static boolean pruneBlocks() throws SQLException, DataException {
|
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
||||||
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
if (latestBlock == null) {
|
||||||
|
LOGGER.info("Unable to determine blockchain height, necessary for bulk ATStatesData pruning");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
final int blockchainHeight = latestBlock.getHeight();
|
||||||
|
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
||||||
|
// ATStateData is already trimmed - so carry on from where we left off in the past
|
||||||
|
int pruneStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||||
|
|
||||||
// Only bulk prune AT states if we have never done so before
|
LOGGER.info("Starting bulk prune of AT states data - this process could take a while... (approx. 3 mins on high spec)");
|
||||||
int pruneHeight = repository.getBlockRepository().getBlockPruneHeight();
|
|
||||||
if (pruneHeight > 0) {
|
while (pruneStartHeight < upperPrunableHeight) {
|
||||||
// Already pruned blocks
|
// Prune all AT state data up until our latest minus pruneBlockLimit (or our archive height)
|
||||||
|
|
||||||
|
if (Controller.isStopping()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Settings.getInstance().isArchiveEnabled()) {
|
// Override batch size in the settings because this is a one-off process
|
||||||
// Only proceed if we can see that the archiver has already finished
|
final int batchSize = 1000;
|
||||||
// This way, if the archiver failed for any reason, we can prune once it has had
|
final int rowLimitPerBatch = 50000;
|
||||||
// some opportunities to try again
|
int upperBatchHeight = pruneStartHeight + batchSize;
|
||||||
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
|
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
|
||||||
if (!upToDate) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
LOGGER.trace(String.format("Pruning AT states data between %d and %d...", pruneStartHeight, upperPruneHeight));
|
||||||
if (latestBlock == null) {
|
|
||||||
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
final int blockchainHeight = latestBlock.getHeight();
|
|
||||||
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
|
||||||
int pruneStartHeight = 0;
|
|
||||||
|
|
||||||
if (Settings.getInstance().isArchiveEnabled()) {
|
int numATStatesPruned = repository.getATRepository().trimAtStates(pruneStartHeight, upperPruneHeight, rowLimitPerBatch);
|
||||||
// Archive mode - don't prune anything that hasn't been archived yet
|
repository.saveChanges();
|
||||||
upperPrunableHeight = Math.min(upperPrunableHeight, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
LOGGER.info("Starting bulk prune of blocks - this process could take a while... (approx. 5 mins on high spec)");
|
if (numATStatesPruned > 0) {
|
||||||
|
LOGGER.trace(String.format("Pruned %d AT states data rows between blocks %d and %d",
|
||||||
while (pruneStartHeight < upperPrunableHeight) {
|
numATStatesPruned, pruneStartHeight, upperPruneHeight));
|
||||||
// Prune all blocks up until our latest minus pruneBlockLimit
|
} else {
|
||||||
|
repository.getATRepository().setAtTrimHeight(upperBatchHeight);
|
||||||
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
|
// No need to rebuild the latest AT states as we aren't currently synchronizing
|
||||||
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
|
|
||||||
|
|
||||||
LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
|
|
||||||
|
|
||||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
|
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
|
LOGGER.debug(String.format("Bumping AT states trim height to %d", upperBatchHeight));
|
||||||
|
|
||||||
if (numBlocksPruned > 0) {
|
// Can we move onto next batch?
|
||||||
LOGGER.info(String.format("Pruned %d block%s between %d and %d",
|
if (upperPrunableHeight > upperBatchHeight) {
|
||||||
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
|
pruneStartHeight = upperBatchHeight;
|
||||||
pruneStartHeight, upperPruneHeight));
|
}
|
||||||
} else {
|
else {
|
||||||
repository.getBlockRepository().setBlockPruneHeight(upperBatchHeight);
|
// We've finished pruning
|
||||||
repository.saveChanges();
|
break;
|
||||||
LOGGER.debug(String.format("Bumping block base prune height to %d", upperBatchHeight));
|
|
||||||
|
|
||||||
// Can we move onto next batch?
|
|
||||||
if (upperPrunableHeight > upperBatchHeight) {
|
|
||||||
pruneStartHeight = upperBatchHeight;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// We've finished pruning
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void performMaintenance() throws SQLException, DataException {
|
public static boolean pruneBlocks(Repository repository) throws SQLException, DataException {
|
||||||
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
|
||||||
repository.performPeriodicMaintenance();
|
// Only bulk prune AT states if we have never done so before
|
||||||
|
int pruneHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||||
|
if (pruneHeight > 0) {
|
||||||
|
// Already pruned blocks
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (Settings.getInstance().isArchiveEnabled()) {
|
||||||
|
// Only proceed if we can see that the archiver has already finished
|
||||||
|
// This way, if the archiver failed for any reason, we can prune once it has had
|
||||||
|
// some opportunities to try again
|
||||||
|
boolean upToDate = BlockArchiveWriter.isArchiverUpToDate(repository);
|
||||||
|
if (!upToDate) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BlockData latestBlock = repository.getBlockRepository().getLastBlock();
|
||||||
|
if (latestBlock == null) {
|
||||||
|
LOGGER.info("Unable to determine blockchain height, necessary for bulk block pruning");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
final int blockchainHeight = latestBlock.getHeight();
|
||||||
|
int upperPrunableHeight = blockchainHeight - Settings.getInstance().getPruneBlockLimit();
|
||||||
|
int pruneStartHeight = 0;
|
||||||
|
|
||||||
|
if (Settings.getInstance().isArchiveEnabled()) {
|
||||||
|
// Archive mode - don't prune anything that hasn't been archived yet
|
||||||
|
upperPrunableHeight = Math.min(upperPrunableHeight, repository.getBlockArchiveRepository().getBlockArchiveHeight() - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOGGER.info("Starting bulk prune of blocks - this process could take a while... (approx. 5 mins on high spec)");
|
||||||
|
|
||||||
|
while (pruneStartHeight < upperPrunableHeight) {
|
||||||
|
// Prune all blocks up until our latest minus pruneBlockLimit
|
||||||
|
|
||||||
|
int upperBatchHeight = pruneStartHeight + Settings.getInstance().getBlockPruneBatchSize();
|
||||||
|
int upperPruneHeight = Math.min(upperBatchHeight, upperPrunableHeight);
|
||||||
|
|
||||||
|
LOGGER.info(String.format("Pruning blocks between %d and %d...", pruneStartHeight, upperPruneHeight));
|
||||||
|
|
||||||
|
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(pruneStartHeight, upperPruneHeight);
|
||||||
|
repository.saveChanges();
|
||||||
|
|
||||||
|
if (numBlocksPruned > 0) {
|
||||||
|
LOGGER.info(String.format("Pruned %d block%s between %d and %d",
|
||||||
|
numBlocksPruned, (numBlocksPruned != 1 ? "s" : ""),
|
||||||
|
pruneStartHeight, upperPruneHeight));
|
||||||
|
} else {
|
||||||
|
repository.getBlockRepository().setBlockPruneHeight(upperBatchHeight);
|
||||||
|
repository.saveChanges();
|
||||||
|
LOGGER.debug(String.format("Bumping block base prune height to %d", upperBatchHeight));
|
||||||
|
|
||||||
|
// Can we move onto next batch?
|
||||||
|
if (upperPrunableHeight > upperBatchHeight) {
|
||||||
|
pruneStartHeight = upperBatchHeight;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// We've finished pruning
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void performMaintenance(Repository repository) throws SQLException, DataException {
|
||||||
|
repository.performPeriodicMaintenance();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user