forked from Qortal/qortal
Merge pull request #239 from kennycud/master
Restructuring database connections for better garbage collection - resolves long-standing memory leak in multiple places that was discovered more specifically after the thread crashes were made to restart if crashed. Thanks so much to @kennycud for this improvement!
This commit is contained in:
commit
2347118e59
@ -97,364 +97,375 @@ public class BlockMinter extends Thread {
|
||||
|
||||
final boolean isSingleNodeTestnet = Settings.getInstance().isSingleNodeTestnet();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Going to need this a lot...
|
||||
BlockRepository blockRepository = repository.getBlockRepository();
|
||||
|
||||
// Flags for tracking change in whether minting is possible,
|
||||
// so we can notify Controller, and further update SysTray, etc.
|
||||
boolean isMintingPossible = false;
|
||||
boolean wasMintingPossible = isMintingPossible;
|
||||
// Flags for tracking change in whether minting is possible,
|
||||
// so we can notify Controller, and further update SysTray, etc.
|
||||
boolean isMintingPossible = false;
|
||||
boolean wasMintingPossible = isMintingPossible;
|
||||
try {
|
||||
while (running) {
|
||||
if (isMintingPossible != wasMintingPossible)
|
||||
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
|
||||
// recreate repository for new loop iteration
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
wasMintingPossible = isMintingPossible;
|
||||
// Going to need this a lot...
|
||||
BlockRepository blockRepository = repository.getBlockRepository();
|
||||
|
||||
try {
|
||||
// Free up any repository locks
|
||||
repository.discardChanges();
|
||||
if (isMintingPossible != wasMintingPossible)
|
||||
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
|
||||
|
||||
// Sleep for a while.
|
||||
// It's faster on single node testnets, to allow lots of blocks to be minted quickly.
|
||||
Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
|
||||
|
||||
isMintingPossible = false;
|
||||
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
continue;
|
||||
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null)
|
||||
continue;
|
||||
|
||||
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
||||
// No minting accounts?
|
||||
if (mintingAccountsData.isEmpty())
|
||||
continue;
|
||||
|
||||
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
|
||||
// Note that minting accounts are actually reward-shares in Qortal
|
||||
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
|
||||
while (madi.hasNext()) {
|
||||
MintingAccountData mintingAccountData = madi.next();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint(true)) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Optional (non-validated) prevention of block submissions below a defined level.
|
||||
// This is an unvalidated version of Blockchain.minAccountLevelToMint
|
||||
// and exists only to reduce block candidates by default.
|
||||
int level = mintingAccount.getEffectiveMintingLevel();
|
||||
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
|
||||
madi.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// Needs a mutable copy of the unmodifiableList
|
||||
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||
BlockData lastBlockData = blockRepository.getLastBlock();
|
||||
|
||||
// Disregard peers that have "misbehaved" recently
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
|
||||
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
|
||||
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
|
||||
if (!Synchronizer.getInstance().getRecoveryMode())
|
||||
peers.removeIf(Controller.hasNoRecentBlock);
|
||||
|
||||
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
|
||||
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
||||
continue;
|
||||
|
||||
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
||||
boolean recoverInvalidBlock = false;
|
||||
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
||||
// We've had at least one invalid block
|
||||
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
||||
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
||||
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
||||
// Assume that the chain has stalled because there is no alternative valid candidate
|
||||
// Enter recovery mode to allow alternative, valid candidates to be minted
|
||||
recoverInvalidBlock = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
||||
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
||||
if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
|
||||
continue;
|
||||
|
||||
// There are enough peers with a recent block and our latest block is recent
|
||||
// so go ahead and mint a block if possible.
|
||||
isMintingPossible = true;
|
||||
|
||||
// Check blockchain hasn't changed
|
||||
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
||||
previousBlockData = lastBlockData;
|
||||
newBlocks.clear();
|
||||
|
||||
// Reduce log timeout
|
||||
logTimeout = 10 * 1000L;
|
||||
|
||||
// Last low weight block is no longer valid
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
}
|
||||
|
||||
// Discard accounts we have already built blocks with
|
||||
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
|
||||
|
||||
// Do we need to build any potential new blocks?
|
||||
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
||||
|
||||
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
||||
// Skip this check for single node testnets, since they definitely need to mint every block
|
||||
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||
if (mintedLastBlock && !isSingleNodeTestnet) {
|
||||
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (parentSignatureForLastLowWeightBlock != null) {
|
||||
// The last iteration found a higher weight block in the network, so sleep for a while
|
||||
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
||||
// detected as we don't want to hold the blockchain lock open.
|
||||
LOGGER.info("Sleeping for 10 seconds...");
|
||||
Thread.sleep(10 * 1000L);
|
||||
}
|
||||
|
||||
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
|
||||
// First block does the AT heavy-lifting
|
||||
if (newBlocks.isEmpty()) {
|
||||
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
} else {
|
||||
// The blocks for other minters require less effort...
|
||||
Block newBlock = newBlocks.get(0).remint(mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
}
|
||||
}
|
||||
|
||||
// No potential block candidates?
|
||||
if (newBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Make sure we're the only thread modifying the blockchain
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
|
||||
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
|
||||
continue;
|
||||
}
|
||||
|
||||
boolean newBlockMinted = false;
|
||||
Block newBlock = null;
|
||||
wasMintingPossible = isMintingPossible;
|
||||
|
||||
try {
|
||||
// Clear repository session state so we have latest view of data
|
||||
// reset the repository, to the repository recreated for this loop iteration
|
||||
for( Block newBlock : newBlocks ) newBlock.setRepository(repository);
|
||||
|
||||
// Free up any repository locks
|
||||
repository.discardChanges();
|
||||
|
||||
// Now that we have blockchain lock, do final check that chain hasn't changed
|
||||
BlockData latestBlockData = blockRepository.getLastBlock();
|
||||
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
|
||||
// Sleep for a while.
|
||||
// It's faster on single node testnets, to allow lots of blocks to be minted quickly.
|
||||
Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
|
||||
|
||||
isMintingPossible = false;
|
||||
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
continue;
|
||||
|
||||
List<Block> goodBlocks = new ArrayList<>();
|
||||
boolean wasInvalidBlockDiscarded = false;
|
||||
Iterator<Block> newBlocksIterator = newBlocks.iterator();
|
||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||
if (minLatestBlockTimestamp == null)
|
||||
continue;
|
||||
|
||||
while (newBlocksIterator.hasNext()) {
|
||||
Block testBlock = newBlocksIterator.next();
|
||||
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
||||
// No minting accounts?
|
||||
if (mintingAccountsData.isEmpty())
|
||||
continue;
|
||||
|
||||
// Is new block's timestamp valid yet?
|
||||
// We do a separate check as some timestamp checks are skipped for testchains
|
||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
|
||||
// Note that minting accounts are actually reward-shares in Qortal
|
||||
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
|
||||
while (madi.hasNext()) {
|
||||
MintingAccountData mintingAccountData = madi.next();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint(true)) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
madi.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Optional (non-validated) prevention of block submissions below a defined level.
|
||||
// This is an unvalidated version of Blockchain.minAccountLevelToMint
|
||||
// and exists only to reduce block candidates by default.
|
||||
int level = mintingAccount.getEffectiveMintingLevel();
|
||||
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
|
||||
madi.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// Needs a mutable copy of the unmodifiableList
|
||||
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||
BlockData lastBlockData = blockRepository.getLastBlock();
|
||||
|
||||
// Disregard peers that have "misbehaved" recently
|
||||
peers.removeIf(Controller.hasMisbehaved);
|
||||
|
||||
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
|
||||
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
|
||||
if (!Synchronizer.getInstance().getRecoveryMode())
|
||||
peers.removeIf(Controller.hasNoRecentBlock);
|
||||
|
||||
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
|
||||
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
||||
continue;
|
||||
|
||||
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
||||
boolean recoverInvalidBlock = false;
|
||||
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
||||
// We've had at least one invalid block
|
||||
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
||||
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
||||
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
||||
// Assume that the chain has stalled because there is no alternative valid candidate
|
||||
// Enter recovery mode to allow alternative, valid candidates to be minted
|
||||
recoverInvalidBlock = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
||||
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
||||
if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
|
||||
continue;
|
||||
|
||||
testBlock.preProcess();
|
||||
// There are enough peers with a recent block and our latest block is recent
|
||||
// so go ahead and mint a block if possible.
|
||||
isMintingPossible = true;
|
||||
|
||||
// Is new block valid yet? (Before adding unconfirmed transactions)
|
||||
ValidationResult result = testBlock.isValid();
|
||||
if (result != ValidationResult.OK) {
|
||||
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
||||
// Check blockchain hasn't changed
|
||||
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
||||
previousBlockData = lastBlockData;
|
||||
newBlocks.clear();
|
||||
|
||||
newBlocksIterator.remove();
|
||||
wasInvalidBlockDiscarded = true;
|
||||
/*
|
||||
* Bail out fast so that we loop around from the top again.
|
||||
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
|
||||
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
|
||||
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
|
||||
*/
|
||||
break;
|
||||
}
|
||||
// Reduce log timeout
|
||||
logTimeout = 10 * 1000L;
|
||||
|
||||
goodBlocks.add(testBlock);
|
||||
// Last low weight block is no longer valid
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
}
|
||||
|
||||
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
|
||||
// Discard accounts we have already built blocks with
|
||||
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
|
||||
|
||||
// Do we need to build any potential new blocks?
|
||||
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
||||
|
||||
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
||||
// Skip this check for single node testnets, since they definitely need to mint every block
|
||||
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||
if (mintedLastBlock && !isSingleNodeTestnet) {
|
||||
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
||||
continue;
|
||||
|
||||
// Pick best block
|
||||
final int parentHeight = previousBlockData.getHeight();
|
||||
final byte[] parentBlockSignature = previousBlockData.getSignature();
|
||||
|
||||
BigInteger bestWeight = null;
|
||||
|
||||
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
|
||||
BlockData blockData = goodBlocks.get(bi).getBlockData();
|
||||
|
||||
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||
blockSummaryData.setMinterLevel(minterLevel);
|
||||
|
||||
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
|
||||
|
||||
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
|
||||
newBlock = goodBlocks.get(bi);
|
||||
bestWeight = blockWeight;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (this.higherWeightChainExists(repository, bestWeight)) {
|
||||
if (parentSignatureForLastLowWeightBlock != null) {
|
||||
// The last iteration found a higher weight block in the network, so sleep for a while
|
||||
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
||||
// detected as we don't want to hold the blockchain lock open.
|
||||
LOGGER.info("Sleeping for 10 seconds...");
|
||||
Thread.sleep(10 * 1000L);
|
||||
}
|
||||
|
||||
// Check if the base block has updated since the last time we were here
|
||||
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
|
||||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
|
||||
// We've switched to a different chain, so reset the timer
|
||||
timeOfLastLowWeightBlock = NTP.getTime();
|
||||
}
|
||||
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
|
||||
|
||||
// If less than 30 seconds has passed since first detection the higher weight chain,
|
||||
// we should skip our block submission to give us the opportunity to sync to the better chain
|
||||
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
|
||||
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
||||
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
|
||||
// First block does the AT heavy-lifting
|
||||
if (newBlocks.isEmpty()) {
|
||||
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
|
||||
continue;
|
||||
} else {
|
||||
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
||||
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
} else {
|
||||
LOGGER.debug("No higher weight chain found in peers");
|
||||
// The blocks for other minters require less effort...
|
||||
Block newBlock = newBlocks.get(0).remint(mintingAccount);
|
||||
if (newBlock == null) {
|
||||
// For some reason we can't mint right now
|
||||
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
|
||||
continue;
|
||||
}
|
||||
|
||||
newBlocks.add(newBlock);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
|
||||
}
|
||||
|
||||
// Discard any uncommitted changes as a result of the higher weight chain detection
|
||||
repository.discardChanges();
|
||||
// No potential block candidates?
|
||||
if (newBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Clear variables that track low weight blocks
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
timeOfLastLowWeightBlock = null;
|
||||
|
||||
Long unconfirmedStartTime = NTP.getTime();
|
||||
|
||||
// Add unconfirmed transactions
|
||||
addUnconfirmedTransactions(repository, newBlock);
|
||||
|
||||
LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime()-unconfirmedStartTime)));
|
||||
|
||||
// Sign to create block's signature
|
||||
newBlock.sign();
|
||||
|
||||
// Is newBlock still valid?
|
||||
ValidationResult validationResult = newBlock.isValid();
|
||||
if (validationResult != ValidationResult.OK) {
|
||||
// No longer valid? Report and discard
|
||||
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
|
||||
|
||||
// Rebuild block candidates, just to be sure
|
||||
newBlocks.clear();
|
||||
// Make sure we're the only thread modifying the blockchain
|
||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
|
||||
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Add to blockchain - something else will notice and broadcast new block to network
|
||||
boolean newBlockMinted = false;
|
||||
Block newBlock = null;
|
||||
|
||||
try {
|
||||
newBlock.process();
|
||||
// Clear repository session state so we have latest view of data
|
||||
repository.discardChanges();
|
||||
|
||||
repository.saveChanges();
|
||||
// Now that we have blockchain lock, do final check that chain hasn't changed
|
||||
BlockData latestBlockData = blockRepository.getLastBlock();
|
||||
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
|
||||
continue;
|
||||
|
||||
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
|
||||
List<Block> goodBlocks = new ArrayList<>();
|
||||
boolean wasInvalidBlockDiscarded = false;
|
||||
Iterator<Block> newBlocksIterator = newBlocks.iterator();
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
|
||||
while (newBlocksIterator.hasNext()) {
|
||||
Block testBlock = newBlocksIterator.next();
|
||||
|
||||
if (rewardShareData != null) {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
rewardShareData.getMinter(),
|
||||
rewardShareData.getRecipient()));
|
||||
} else {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
newBlock.getMinter().getAddress()));
|
||||
// Is new block's timestamp valid yet?
|
||||
// We do a separate check as some timestamp checks are skipped for testchains
|
||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||
continue;
|
||||
|
||||
testBlock.preProcess();
|
||||
|
||||
// Is new block valid yet? (Before adding unconfirmed transactions)
|
||||
ValidationResult result = testBlock.isValid();
|
||||
if (result != ValidationResult.OK) {
|
||||
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
||||
|
||||
newBlocksIterator.remove();
|
||||
wasInvalidBlockDiscarded = true;
|
||||
/*
|
||||
* Bail out fast so that we loop around from the top again.
|
||||
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
|
||||
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
|
||||
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
goodBlocks.add(testBlock);
|
||||
}
|
||||
|
||||
// Notify network after we're released blockchain lock
|
||||
newBlockMinted = true;
|
||||
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Notify Controller
|
||||
repository.discardChanges(); // clear transaction status to prevent deadlocks
|
||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||
} catch (DataException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
} catch (ArithmeticException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
// Pick best block
|
||||
final int parentHeight = previousBlockData.getHeight();
|
||||
final byte[] parentBlockSignature = previousBlockData.getSignature();
|
||||
|
||||
BigInteger bestWeight = null;
|
||||
|
||||
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
|
||||
BlockData blockData = goodBlocks.get(bi).getBlockData();
|
||||
|
||||
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||
blockSummaryData.setMinterLevel(minterLevel);
|
||||
|
||||
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
|
||||
|
||||
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
|
||||
newBlock = goodBlocks.get(bi);
|
||||
bestWeight = blockWeight;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (this.higherWeightChainExists(repository, bestWeight)) {
|
||||
|
||||
// Check if the base block has updated since the last time we were here
|
||||
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
|
||||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
|
||||
// We've switched to a different chain, so reset the timer
|
||||
timeOfLastLowWeightBlock = NTP.getTime();
|
||||
}
|
||||
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
|
||||
|
||||
// If less than 30 seconds has passed since first detection the higher weight chain,
|
||||
// we should skip our block submission to give us the opportunity to sync to the better chain
|
||||
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
|
||||
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
||||
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||
continue;
|
||||
} else {
|
||||
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
||||
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("No higher weight chain found in peers");
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
|
||||
}
|
||||
|
||||
// Discard any uncommitted changes as a result of the higher weight chain detection
|
||||
repository.discardChanges();
|
||||
|
||||
// Clear variables that track low weight blocks
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
timeOfLastLowWeightBlock = null;
|
||||
|
||||
Long unconfirmedStartTime = NTP.getTime();
|
||||
|
||||
// Add unconfirmed transactions
|
||||
addUnconfirmedTransactions(repository, newBlock);
|
||||
|
||||
LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime() - unconfirmedStartTime)));
|
||||
|
||||
// Sign to create block's signature
|
||||
newBlock.sign();
|
||||
|
||||
// Is newBlock still valid?
|
||||
ValidationResult validationResult = newBlock.isValid();
|
||||
if (validationResult != ValidationResult.OK) {
|
||||
// No longer valid? Report and discard
|
||||
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
|
||||
|
||||
// Rebuild block candidates, just to be sure
|
||||
newBlocks.clear();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Add to blockchain - something else will notice and broadcast new block to network
|
||||
try {
|
||||
newBlock.process();
|
||||
|
||||
repository.saveChanges();
|
||||
|
||||
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
|
||||
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
|
||||
|
||||
if (rewardShareData != null) {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
rewardShareData.getMinter(),
|
||||
rewardShareData.getRecipient()));
|
||||
} else {
|
||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
|
||||
newBlock.getBlockData().getHeight(),
|
||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||
Base58.encode(newBlock.getParent().getSignature()),
|
||||
newBlock.getMinter().getAddress()));
|
||||
}
|
||||
|
||||
// Notify network after we're released blockchain lock
|
||||
newBlockMinted = true;
|
||||
|
||||
// Notify Controller
|
||||
repository.discardChanges(); // clear transaction status to prevent deadlocks
|
||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||
} catch (DataException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
} catch (ArithmeticException e) {
|
||||
// Unable to process block - report and discard
|
||||
LOGGER.error("Unable to process newly minted block?", e);
|
||||
newBlocks.clear();
|
||||
}
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
} finally {
|
||||
blockchainLock.unlock();
|
||||
}
|
||||
|
||||
if (newBlockMinted) {
|
||||
// Broadcast our new chain to network
|
||||
Network.getInstance().broadcastOurChain();
|
||||
}
|
||||
if (newBlockMinted) {
|
||||
// Broadcast our new chain to network
|
||||
Network.getInstance().broadcastOurChain();
|
||||
}
|
||||
|
||||
} catch (InterruptedException e) {
|
||||
// We've been interrupted - time to exit
|
||||
return;
|
||||
} catch (InterruptedException e) {
|
||||
// We've been interrupted - time to exit
|
||||
return;
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ import org.qortal.block.Block;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.block.BlockChain.BlockTimingByHeight;
|
||||
import org.qortal.controller.arbitrary.*;
|
||||
import org.qortal.controller.hsqldb.HSQLDBBalanceRecorder;
|
||||
import org.qortal.controller.hsqldb.HSQLDBDataCacheManager;
|
||||
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
|
||||
import org.qortal.controller.repository.PruneManager;
|
||||
@ -36,7 +37,6 @@ import org.qortal.network.Peer;
|
||||
import org.qortal.network.PeerAddress;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.repository.*;
|
||||
import org.qortal.repository.hsqldb.HSQLDBRepository;
|
||||
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transaction.Transaction;
|
||||
@ -73,6 +73,8 @@ import java.util.stream.Collectors;
|
||||
|
||||
public class Controller extends Thread {
|
||||
|
||||
public static HSQLDBRepositoryFactory REPOSITORY_FACTORY;
|
||||
|
||||
static {
|
||||
// This must go before any calls to LogManager/Logger
|
||||
System.setProperty("log4j2.formatMsgNoLookups", "true");
|
||||
@ -403,23 +405,38 @@ public class Controller extends Thread {
|
||||
|
||||
LOGGER.info("Starting repository");
|
||||
try {
|
||||
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
REPOSITORY_FACTORY = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||
RepositoryManager.setRepositoryFactory(REPOSITORY_FACTORY);
|
||||
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// RepositoryManager.rebuildTransactionSequences(repository);
|
||||
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, false);
|
||||
}
|
||||
|
||||
if( Settings.getInstance().isDbCacheEnabled() ) {
|
||||
LOGGER.info("Db Cache Starting ...");
|
||||
HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager((HSQLDBRepository) repositoryFactory.getRepository());
|
||||
hsqldbDataCacheManager.start();
|
||||
if( Settings.getInstance().isDbCacheEnabled() ) {
|
||||
LOGGER.info("Db Cache Starting ...");
|
||||
HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager();
|
||||
hsqldbDataCacheManager.start();
|
||||
}
|
||||
else {
|
||||
LOGGER.info("Db Cache Disabled");
|
||||
}
|
||||
|
||||
if( Settings.getInstance().isBalanceRecorderEnabled() ) {
|
||||
Optional<HSQLDBBalanceRecorder> recorder = HSQLDBBalanceRecorder.getInstance();
|
||||
|
||||
if( recorder.isPresent() ) {
|
||||
LOGGER.info("Balance Recorder Starting ...");
|
||||
recorder.get().start();
|
||||
}
|
||||
else {
|
||||
LOGGER.info("Db Cache Disabled");
|
||||
LOGGER.info("Balance Recorder won't start.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
LOGGER.info("Balance Recorder Disabled");
|
||||
}
|
||||
} catch (DataException e) {
|
||||
// If exception has no cause or message then repository is in use by some other process.
|
||||
if (e.getCause() == null && e.getMessage() == null) {
|
||||
@ -639,10 +656,8 @@ public class Controller extends Thread {
|
||||
boolean canBootstrap = Settings.getInstance().getBootstrap();
|
||||
boolean needsArchiveRebuild = false;
|
||||
int checkHeight = 0;
|
||||
Repository repository = null;
|
||||
|
||||
try {
|
||||
repository = RepositoryManager.getRepository();
|
||||
try (final Repository repository = RepositoryManager.getRepository()){
|
||||
needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null);
|
||||
checkHeight = repository.getBlockRepository().getBlockchainHeight();
|
||||
} catch (DataException e) {
|
||||
|
@ -0,0 +1,117 @@
|
||||
package org.qortal.controller.hsqldb;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
|
||||
import org.qortal.settings.Settings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class HSQLDBBalanceRecorder extends Thread{
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBBalanceRecorder.class);
|
||||
|
||||
private static HSQLDBBalanceRecorder SINGLETON = null;
|
||||
|
||||
private ConcurrentHashMap<Integer, List<AccountBalanceData>> balancesByHeight = new ConcurrentHashMap<>();
|
||||
|
||||
private ConcurrentHashMap<String, List<AccountBalanceData>> balancesByAddress = new ConcurrentHashMap<>();
|
||||
|
||||
private int priorityRequested;
|
||||
private int frequency;
|
||||
private int capacity;
|
||||
|
||||
private HSQLDBBalanceRecorder( int priorityRequested, int frequency, int capacity) {
|
||||
|
||||
super("Balance Recorder");
|
||||
|
||||
this.priorityRequested = priorityRequested;
|
||||
this.frequency = frequency;
|
||||
this.capacity = capacity;
|
||||
}
|
||||
|
||||
public static Optional<HSQLDBBalanceRecorder> getInstance() {
|
||||
|
||||
if( SINGLETON == null ) {
|
||||
|
||||
SINGLETON
|
||||
= new HSQLDBBalanceRecorder(
|
||||
Settings.getInstance().getBalanceRecorderPriority(),
|
||||
Settings.getInstance().getBalanceRecorderFrequency(),
|
||||
Settings.getInstance().getBalanceRecorderCapacity()
|
||||
);
|
||||
|
||||
}
|
||||
else if( SINGLETON == null ) {
|
||||
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
return Optional.of(SINGLETON);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
Thread.currentThread().setName("Balance Recorder");
|
||||
|
||||
HSQLDBCacheUtils.startRecordingBalances(this.balancesByHeight, this.balancesByAddress, this.priorityRequested, this.frequency, this.capacity);
|
||||
}
|
||||
|
||||
public List<AccountBalanceData> getLatestRecordings(int limit, long offset) {
|
||||
ArrayList<AccountBalanceData> data;
|
||||
|
||||
Optional<Integer> lastHeight = getLastHeight();
|
||||
|
||||
if(lastHeight.isPresent() ) {
|
||||
List<AccountBalanceData> latest = this.balancesByHeight.get(lastHeight.get());
|
||||
|
||||
if( latest != null ) {
|
||||
data = new ArrayList<>(latest.size());
|
||||
data.addAll(
|
||||
latest.stream()
|
||||
.sorted(Comparator.comparingDouble(AccountBalanceData::getBalance).reversed())
|
||||
.skip(offset)
|
||||
.limit(limit)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
else {
|
||||
data = new ArrayList<>(0);
|
||||
}
|
||||
}
|
||||
else {
|
||||
data = new ArrayList<>(0);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
private Optional<Integer> getLastHeight() {
|
||||
return this.balancesByHeight.keySet().stream().sorted(Comparator.reverseOrder()).findFirst();
|
||||
}
|
||||
|
||||
public List<Integer> getBlocksRecorded() {
|
||||
|
||||
return this.balancesByHeight.keySet().stream().collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public List<AccountBalanceData> getAccountBalanceRecordings(String address) {
|
||||
return this.balancesByAddress.get(address);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "HSQLDBBalanceRecorder{" +
|
||||
"priorityRequested=" + priorityRequested +
|
||||
", frequency=" + frequency +
|
||||
", capacity=" + capacity +
|
||||
'}';
|
||||
}
|
||||
}
|
@ -8,11 +8,7 @@ import org.qortal.settings.Settings;
|
||||
|
||||
public class HSQLDBDataCacheManager extends Thread{
|
||||
|
||||
private HSQLDBRepository respository;
|
||||
|
||||
public HSQLDBDataCacheManager(HSQLDBRepository respository) {
|
||||
this.respository = respository;
|
||||
}
|
||||
public HSQLDBDataCacheManager() {}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
@ -20,8 +16,7 @@ public class HSQLDBDataCacheManager extends Thread{
|
||||
|
||||
HSQLDBCacheUtils.startCaching(
|
||||
Settings.getInstance().getDbCacheThreadPriority(),
|
||||
Settings.getInstance().getDbCacheFrequency(),
|
||||
this.respository
|
||||
Settings.getInstance().getDbCacheFrequency()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -39,15 +39,24 @@ public class AtStatesPruner implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
int pruneStartHeight;
|
||||
int maxLatestAtStatesHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int pruneStartHeight = repository.getATRepository().getAtPruneHeight();
|
||||
int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||
pruneStartHeight = repository.getATRepository().getAtPruneHeight();
|
||||
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||
|
||||
repository.discardChanges();
|
||||
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
|
||||
repository.saveChanges();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@ -102,28 +111,25 @@ public class AtStatesPruner implements Runnable {
|
||||
|
||||
final int finalPruneStartHeight = pruneStartHeight;
|
||||
LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// We've pruned up to the upper prunable height
|
||||
// Back off for a while to save CPU for syncing
|
||||
repository.discardChanges();
|
||||
Thread.sleep(5*60*1000L);
|
||||
Thread.sleep(5 * 60 * 1000L);
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
if(Controller.isStopping()) {
|
||||
if (Controller.isStopping()) {
|
||||
LOGGER.info("AT States Pruning Shutting Down");
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
LOGGER.warn("AT States Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("AT States Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch(Exception e){
|
||||
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -26,15 +26,23 @@ public class AtStatesTrimmer implements Runnable {
|
||||
return;
|
||||
}
|
||||
|
||||
int trimStartHeight;
|
||||
int maxLatestAtStatesHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int trimStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||
int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||
trimStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||
|
||||
repository.discardChanges();
|
||||
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
|
||||
repository.saveChanges();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@ -92,9 +100,9 @@ public class AtStatesTrimmer implements Runnable {
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("AT States Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,11 +30,13 @@ public class BlockArchiver implements Runnable {
|
||||
return;
|
||||
}
|
||||
|
||||
int startHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Don't even start building until initial rush has ended
|
||||
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
||||
|
||||
int startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||
startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||
|
||||
// Don't attempt to archive if we have no ATStatesHeightIndex, as it will be too slow
|
||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||
@ -43,10 +45,16 @@ public class BlockArchiver implements Runnable {
|
||||
repository.discardChanges();
|
||||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.info("Starting block archiver from height {}...", startHeight);
|
||||
LOGGER.info("Starting block archiver from height {}...", startHeight);
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@ -107,20 +115,17 @@ public class BlockArchiver implements Runnable {
|
||||
LOGGER.info("Caught exception when creating block cache", e);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
if(Controller.isStopping()) {
|
||||
if (Controller.isStopping()) {
|
||||
LOGGER.info("Block Archiving Shutting Down");
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
LOGGER.warn("Block Archiving interrupted. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Block Archiving stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch(Exception e){
|
||||
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -39,8 +39,10 @@ public class BlockPruner implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
int pruneStartHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
int pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||
pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||
|
||||
// Don't attempt to prune if we have no ATStatesHeightIndex, as it will be too slow
|
||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||
@ -48,8 +50,15 @@ public class BlockPruner implements Runnable {
|
||||
LOGGER.info("Unable to start block pruner due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
|
||||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@ -122,10 +131,9 @@ public class BlockPruner implements Runnable {
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Block Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch(Exception e){
|
||||
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -28,13 +28,21 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
|
||||
return;
|
||||
}
|
||||
|
||||
int trimStartHeight;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Don't even start trimming until initial rush has ended
|
||||
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
||||
|
||||
int trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
|
||||
trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
while (!Controller.isStopping()) {
|
||||
try {
|
||||
repository.discardChanges();
|
||||
|
||||
@ -88,10 +96,9 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Online Accounts Signatures Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -5,10 +5,13 @@ import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.api.SearchMode;
|
||||
import org.qortal.arbitrary.misc.Category;
|
||||
import org.qortal.arbitrary.misc.Service;
|
||||
import org.qortal.controller.Controller;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceCache;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceData;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
|
||||
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
|
||||
import org.qortal.repository.DataException;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
@ -48,6 +51,11 @@ public class HSQLDBCacheUtils {
|
||||
}
|
||||
};
|
||||
private static final String DEFAULT_IDENTIFIER = "default";
|
||||
private static final int ZERO = 0;
|
||||
public static final String DB_CACHE_TIMER = "DB Cache Timer";
|
||||
public static final String DB_CACHE_TIMER_TASK = "DB Cache Timer Task";
|
||||
public static final String BALANCE_RECORDER_TIMER = "Balance Recorder Timer";
|
||||
public static final String BALANCE_RECORDER_TIMER_TASK = "Balance Recorder Timer Task";
|
||||
|
||||
/**
|
||||
*
|
||||
@ -351,13 +359,124 @@ public class HSQLDBCacheUtils {
|
||||
* Start Caching
|
||||
*
|
||||
* @param priorityRequested the thread priority to fill cache in
|
||||
* @param frequency the frequency to fill the cache (in seconds)
|
||||
* @param respository the data source
|
||||
* @param frequency the frequency to fill the cache (in seconds)
|
||||
*
|
||||
* @return the data cache
|
||||
*/
|
||||
public static void startCaching(int priorityRequested, int frequency, HSQLDBRepository respository) {
|
||||
public static void startCaching(int priorityRequested, int frequency) {
|
||||
|
||||
Timer timer = buildTimer(DB_CACHE_TIMER, priorityRequested);
|
||||
|
||||
TimerTask task = new TimerTask() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
Thread.currentThread().setName(DB_CACHE_TIMER_TASK);
|
||||
|
||||
try (final HSQLDBRepository respository = (HSQLDBRepository) Controller.REPOSITORY_FACTORY.getRepository()) {
|
||||
fillCache(ArbitraryResourceCache.getInstance(), respository);
|
||||
}
|
||||
catch( DataException e ) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// delay 1 second
|
||||
timer.scheduleAtFixedRate(task, 1000, frequency * 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start Recording Balances
|
||||
*
|
||||
* @param queue the queue to add to, remove oldest data if necssary
|
||||
* @param repository the db repsoitory
|
||||
* @param priorityRequested the requested thread priority
|
||||
* @param frequency the recording frequencies, in minutes
|
||||
*/
|
||||
public static void startRecordingBalances(
|
||||
final ConcurrentHashMap<Integer, List<AccountBalanceData>> balancesByHeight,
|
||||
final ConcurrentHashMap<String, List<AccountBalanceData>> balancesByAddress,
|
||||
int priorityRequested,
|
||||
int frequency,
|
||||
int capacity) {
|
||||
|
||||
Timer timer = buildTimer(BALANCE_RECORDER_TIMER, priorityRequested);
|
||||
|
||||
TimerTask task = new TimerTask() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
Thread.currentThread().setName(BALANCE_RECORDER_TIMER_TASK);
|
||||
|
||||
try (final HSQLDBRepository repository = (HSQLDBRepository) Controller.REPOSITORY_FACTORY.getRepository()) {
|
||||
while (balancesByHeight.size() > capacity + 1) {
|
||||
Optional<Integer> firstHeight = balancesByHeight.keySet().stream().sorted().findFirst();
|
||||
|
||||
if (firstHeight.isPresent()) balancesByHeight.remove(firstHeight.get());
|
||||
}
|
||||
|
||||
// get current balances
|
||||
List<AccountBalanceData> accountBalances = getAccountBalances(repository);
|
||||
|
||||
// get anyone of the balances
|
||||
Optional<AccountBalanceData> data = accountBalances.stream().findAny();
|
||||
|
||||
// if there are any balances, then record them
|
||||
if (data.isPresent()) {
|
||||
// map all new balances to the current height
|
||||
balancesByHeight.put(data.get().getHeight(), accountBalances);
|
||||
|
||||
// for each new balance, map to address
|
||||
for (AccountBalanceData accountBalance : accountBalances) {
|
||||
|
||||
// get recorded balances for this address
|
||||
List<AccountBalanceData> establishedBalances
|
||||
= balancesByAddress.getOrDefault(accountBalance.getAddress(), new ArrayList<>(0));
|
||||
|
||||
// start a new list of recordings for this address, add the new balance and add the established
|
||||
// balances
|
||||
List<AccountBalanceData> balances = new ArrayList<>(establishedBalances.size() + 1);
|
||||
balances.add(accountBalance);
|
||||
balances.addAll(establishedBalances);
|
||||
|
||||
// reset tha balances for this address
|
||||
balancesByAddress.put(accountBalance.getAddress(), balances);
|
||||
|
||||
// TODO: reduce account balances to capacity
|
||||
}
|
||||
|
||||
// reduce height balances to capacity
|
||||
while( balancesByHeight.size() > capacity ) {
|
||||
Optional<Integer> lowestHeight
|
||||
= balancesByHeight.entrySet().stream()
|
||||
.min(Comparator.comparingInt(Map.Entry::getKey))
|
||||
.map(Map.Entry::getKey);
|
||||
|
||||
if (lowestHeight.isPresent()) balancesByHeight.entrySet().remove(lowestHeight);
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// wait 5 minutes
|
||||
timer.scheduleAtFixedRate(task, 300_000, frequency * 60_000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build Timer
|
||||
*
|
||||
* Build a timer for scheduling a timer task.
|
||||
*
|
||||
* @param name the name for the thread running the timer task
|
||||
* @param priorityRequested the priority for the thread running the timer task
|
||||
*
|
||||
* @return a timer for scheduling a timer task
|
||||
*/
|
||||
private static Timer buildTimer( final String name, int priorityRequested) {
|
||||
// ensure priority is in between 1-10
|
||||
final int priority = Math.max(0, Math.min(10, priorityRequested));
|
||||
|
||||
@ -365,7 +484,7 @@ public class HSQLDBCacheUtils {
|
||||
Timer timer = new Timer(true) { // 'true' to make the Timer daemon
|
||||
@Override
|
||||
public void schedule(TimerTask task, long delay) {
|
||||
Thread thread = new Thread(task) {
|
||||
Thread thread = new Thread(task, name) {
|
||||
@Override
|
||||
public void run() {
|
||||
this.setPriority(priority);
|
||||
@ -376,17 +495,7 @@ public class HSQLDBCacheUtils {
|
||||
thread.start();
|
||||
}
|
||||
};
|
||||
|
||||
TimerTask task = new TimerTask() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
fillCache(ArbitraryResourceCache.getInstance(), respository);
|
||||
}
|
||||
};
|
||||
|
||||
// delay 1 second
|
||||
timer.scheduleAtFixedRate(task, 1000, frequency * 1000);
|
||||
return timer;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -541,4 +650,43 @@ public class HSQLDBCacheUtils {
|
||||
|
||||
return resources;
|
||||
}
|
||||
|
||||
public static List<AccountBalanceData> getAccountBalances(HSQLDBRepository repository) {
|
||||
|
||||
StringBuilder sql = new StringBuilder();
|
||||
|
||||
sql.append("SELECT account, balance, height ");
|
||||
sql.append("FROM ACCOUNTBALANCES as balances ");
|
||||
sql.append("JOIN (SELECT height FROM BLOCKS ORDER BY height DESC LIMIT 1) AS max_height ON true ");
|
||||
sql.append("WHERE asset_id=0");
|
||||
|
||||
List<AccountBalanceData> data = new ArrayList<>();
|
||||
|
||||
LOGGER.info( "Getting account balances ...");
|
||||
|
||||
try {
|
||||
Statement statement = repository.connection.createStatement();
|
||||
|
||||
ResultSet resultSet = statement.executeQuery(sql.toString());
|
||||
|
||||
if (resultSet == null || !resultSet.next())
|
||||
return new ArrayList<>(0);
|
||||
|
||||
do {
|
||||
String account = resultSet.getString(1);
|
||||
long balance = resultSet.getLong(2);
|
||||
int height = resultSet.getInt(3);
|
||||
|
||||
data.add(new AccountBalanceData(account, ZERO, balance, height));
|
||||
} while (resultSet.next());
|
||||
} catch (SQLException e) {
|
||||
LOGGER.warn(e.getMessage());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
|
||||
LOGGER.info("Retrieved account balances: count = " + data.size());
|
||||
|
||||
return data;
|
||||
}
|
||||
}
|
@ -441,6 +441,14 @@ public class Settings {
|
||||
*/
|
||||
private long archivingPause = 3000;
|
||||
|
||||
private boolean balanceRecorderEnabled = false;
|
||||
|
||||
private int balanceRecorderPriority = 1;
|
||||
|
||||
private int balanceRecorderFrequency = 2*60*1000;
|
||||
|
||||
private int balanceRecorderCapacity = 1000;
|
||||
|
||||
// Domain mapping
|
||||
public static class ThreadLimit {
|
||||
private String messageType;
|
||||
@ -1230,4 +1238,20 @@ public class Settings {
|
||||
public long getArchivingPause() {
|
||||
return archivingPause;
|
||||
}
|
||||
|
||||
public int getBalanceRecorderPriority() {
|
||||
return balanceRecorderPriority;
|
||||
}
|
||||
|
||||
public int getBalanceRecorderFrequency() {
|
||||
return balanceRecorderFrequency;
|
||||
}
|
||||
|
||||
public int getBalanceRecorderCapacity() {
|
||||
return balanceRecorderCapacity;
|
||||
}
|
||||
|
||||
public boolean isBalanceRecorderEnabled() {
|
||||
return balanceRecorderEnabled;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user