forked from Qortal/qortal
Compare commits
1 Commits
master
...
revert-209
Author | SHA1 | Date | |
---|---|---|---|
|
8366c3db1e |
12
pom.xml
12
pom.xml
@ -3,7 +3,7 @@
|
|||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.qortal</groupId>
|
<groupId>org.qortal</groupId>
|
||||||
<artifactId>qortal</artifactId>
|
<artifactId>qortal</artifactId>
|
||||||
<version>4.6.6</version>
|
<version>4.6.0</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<properties>
|
<properties>
|
||||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||||
@ -16,7 +16,7 @@
|
|||||||
<ciyam-at.version>1.4.2</ciyam-at.version>
|
<ciyam-at.version>1.4.2</ciyam-at.version>
|
||||||
<commons-net.version>3.8.0</commons-net.version>
|
<commons-net.version>3.8.0</commons-net.version>
|
||||||
<commons-text.version>1.12.0</commons-text.version>
|
<commons-text.version>1.12.0</commons-text.version>
|
||||||
<commons-io.version>2.18.0</commons-io.version>
|
<commons-io.version>2.17.0</commons-io.version>
|
||||||
<commons-compress.version>1.27.1</commons-compress.version>
|
<commons-compress.version>1.27.1</commons-compress.version>
|
||||||
<commons-lang3.version>3.17.0</commons-lang3.version>
|
<commons-lang3.version>3.17.0</commons-lang3.version>
|
||||||
<dagger.version>1.2.2</dagger.version>
|
<dagger.version>1.2.2</dagger.version>
|
||||||
@ -26,9 +26,9 @@
|
|||||||
<guava.version>33.3.1-jre</guava.version>
|
<guava.version>33.3.1-jre</guava.version>
|
||||||
<hamcrest-library.version>2.2</hamcrest-library.version>
|
<hamcrest-library.version>2.2</hamcrest-library.version>
|
||||||
<homoglyph.version>1.2.1</homoglyph.version>
|
<homoglyph.version>1.2.1</homoglyph.version>
|
||||||
<hsqldb.version>2.7.4</hsqldb.version>
|
<hsqldb.version>2.5.1</hsqldb.version>
|
||||||
<icu4j.version>76.1</icu4j.version>
|
<icu4j.version>76.1</icu4j.version>
|
||||||
<java-diff-utils.version>4.15</java-diff-utils.version>
|
<java-diff-utils.version>4.12</java-diff-utils.version>
|
||||||
<javax.servlet-api.version>4.0.1</javax.servlet-api.version>
|
<javax.servlet-api.version>4.0.1</javax.servlet-api.version>
|
||||||
<jaxb-runtime.version>2.3.9</jaxb-runtime.version>
|
<jaxb-runtime.version>2.3.9</jaxb-runtime.version>
|
||||||
<jersey.version>2.42</jersey.version>
|
<jersey.version>2.42</jersey.version>
|
||||||
@ -45,7 +45,7 @@
|
|||||||
<maven-dependency-plugin.version>3.6.1</maven-dependency-plugin.version>
|
<maven-dependency-plugin.version>3.6.1</maven-dependency-plugin.version>
|
||||||
<maven-jar-plugin.version>3.4.2</maven-jar-plugin.version>
|
<maven-jar-plugin.version>3.4.2</maven-jar-plugin.version>
|
||||||
<maven-package-info-plugin.version>1.1.0</maven-package-info-plugin.version>
|
<maven-package-info-plugin.version>1.1.0</maven-package-info-plugin.version>
|
||||||
<maven-plugin.version>2.18.0</maven-plugin.version>
|
<maven-plugin.version>2.17.1</maven-plugin.version>
|
||||||
<maven-reproducible-build-plugin.version>0.17</maven-reproducible-build-plugin.version>
|
<maven-reproducible-build-plugin.version>0.17</maven-reproducible-build-plugin.version>
|
||||||
<maven-resources-plugin.version>3.3.1</maven-resources-plugin.version>
|
<maven-resources-plugin.version>3.3.1</maven-resources-plugin.version>
|
||||||
<maven-shade-plugin.version>3.6.0</maven-shade-plugin.version>
|
<maven-shade-plugin.version>3.6.0</maven-shade-plugin.version>
|
||||||
@ -55,7 +55,7 @@
|
|||||||
<simplemagic.version>1.17</simplemagic.version>
|
<simplemagic.version>1.17</simplemagic.version>
|
||||||
<slf4j.version>1.7.36</slf4j.version>
|
<slf4j.version>1.7.36</slf4j.version>
|
||||||
<swagger-api.version>2.0.10</swagger-api.version>
|
<swagger-api.version>2.0.10</swagger-api.version>
|
||||||
<swagger-ui.version>5.18.2</swagger-ui.version>
|
<swagger-ui.version>5.17.14</swagger-ui.version>
|
||||||
<upnp.version>1.2</upnp.version>
|
<upnp.version>1.2</upnp.version>
|
||||||
<xz.version>1.10</xz.version>
|
<xz.version>1.10</xz.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
@ -198,76 +198,66 @@ public class Account {
|
|||||||
|
|
||||||
/** Returns whether account can be considered a "minting account".
|
/** Returns whether account can be considered a "minting account".
|
||||||
* <p>
|
* <p>
|
||||||
* To be considered a "minting account", the account needs to pass some of these tests:<br>
|
* To be considered a "minting account", the account needs to pass all of these tests:<br>
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>account's level is at least <tt>minAccountLevelToMint</tt> from blockchain config</li>
|
* <li>account's level is at least <tt>minAccountLevelToMint</tt> from blockchain config</li>
|
||||||
* <li>account's address has registered a name</li>
|
* <li>account's address have registered a name</li>
|
||||||
* <li>account's address is a member of the minter group</li>
|
* <li>account's address is member of minter group</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
*
|
*
|
||||||
* @param isGroupValidated true if this account has already been validated for MINTER Group membership
|
|
||||||
* @return true if account can be considered "minting account"
|
* @return true if account can be considered "minting account"
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
public boolean canMint(boolean isGroupValidated) throws DataException {
|
public boolean canMint() throws DataException {
|
||||||
AccountData accountData = this.repository.getAccountRepository().getAccount(this.address);
|
AccountData accountData = this.repository.getAccountRepository().getAccount(this.address);
|
||||||
NameRepository nameRepository = this.repository.getNameRepository();
|
NameRepository nameRepository = this.repository.getNameRepository();
|
||||||
GroupRepository groupRepository = this.repository.getGroupRepository();
|
GroupRepository groupRepository = this.repository.getGroupRepository();
|
||||||
String myAddress = accountData.getAddress();
|
|
||||||
|
|
||||||
int blockchainHeight = this.repository.getBlockRepository().getBlockchainHeight();
|
int blockchainHeight = this.repository.getBlockRepository().getBlockchainHeight();
|
||||||
|
int nameCheckHeight = BlockChain.getInstance().getOnlyMintWithNameHeight();
|
||||||
int levelToMint = BlockChain.getInstance().getMinAccountLevelToMint();
|
int levelToMint = BlockChain.getInstance().getMinAccountLevelToMint();
|
||||||
int level = accountData.getLevel();
|
int level = accountData.getLevel();
|
||||||
int groupIdToMint = BlockChain.getInstance().getMintingGroupId();
|
int groupIdToMint = BlockChain.getInstance().getMintingGroupId();
|
||||||
int nameCheckHeight = BlockChain.getInstance().getOnlyMintWithNameHeight();
|
|
||||||
int groupCheckHeight = BlockChain.getInstance().getGroupMemberCheckHeight();
|
int groupCheckHeight = BlockChain.getInstance().getGroupMemberCheckHeight();
|
||||||
int removeNameCheckHeight = BlockChain.getInstance().getRemoveOnlyMintWithNameHeight();
|
|
||||||
|
|
||||||
// Can only mint if:
|
String myAddress = accountData.getAddress();
|
||||||
// Account's level is at least minAccountLevelToMint from blockchain config
|
List<NameData> myName = nameRepository.getNamesByOwner(myAddress);
|
||||||
if (blockchainHeight < nameCheckHeight) {
|
boolean isMember = groupRepository.memberExists(groupIdToMint, myAddress);
|
||||||
if (Account.isFounder(accountData.getFlags())) {
|
|
||||||
return accountData.getBlocksMintedPenalty() == 0;
|
|
||||||
} else {
|
|
||||||
return level >= levelToMint;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Can only mint on onlyMintWithNameHeight from blockchain config if:
|
if (accountData == null)
|
||||||
// Account's level is at least minAccountLevelToMint from blockchain config
|
return false;
|
||||||
// Account's address has registered a name
|
|
||||||
if (blockchainHeight >= nameCheckHeight && blockchainHeight < groupCheckHeight) {
|
|
||||||
List<NameData> myName = nameRepository.getNamesByOwner(myAddress);
|
|
||||||
if (Account.isFounder(accountData.getFlags())) {
|
|
||||||
return accountData.getBlocksMintedPenalty() == 0 && !myName.isEmpty();
|
|
||||||
} else {
|
|
||||||
return level >= levelToMint && !myName.isEmpty();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Can only mint on groupMemberCheckHeight from blockchain config if:
|
// Can only mint if level is at least minAccountLevelToMint< from blockchain config
|
||||||
// Account's level is at least minAccountLevelToMint from blockchain config
|
if (blockchainHeight < nameCheckHeight && level >= levelToMint)
|
||||||
// Account's address has registered a name
|
return true;
|
||||||
// Account's address is a member of the minter group
|
|
||||||
if (blockchainHeight >= groupCheckHeight && blockchainHeight < removeNameCheckHeight) {
|
|
||||||
List<NameData> myName = nameRepository.getNamesByOwner(myAddress);
|
|
||||||
if (Account.isFounder(accountData.getFlags())) {
|
|
||||||
return accountData.getBlocksMintedPenalty() == 0 && !myName.isEmpty() && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress));
|
|
||||||
} else {
|
|
||||||
return level >= levelToMint && !myName.isEmpty() && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Can only mint on removeOnlyMintWithNameHeight from blockchain config if:
|
// Can only mint if have registered a name
|
||||||
// Account's level is at least minAccountLevelToMint from blockchain config
|
if (blockchainHeight >= nameCheckHeight && blockchainHeight < groupCheckHeight && level >= levelToMint && !myName.isEmpty())
|
||||||
// Account's address is a member of the minter group
|
return true;
|
||||||
if (blockchainHeight >= removeNameCheckHeight) {
|
|
||||||
if (Account.isFounder(accountData.getFlags())) {
|
// Can only mint if have registered a name and is member of minter group id
|
||||||
return accountData.getBlocksMintedPenalty() == 0 && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress));
|
if (blockchainHeight >= groupCheckHeight && level >= levelToMint && !myName.isEmpty() && isMember)
|
||||||
} else {
|
return true;
|
||||||
return level >= levelToMint && (isGroupValidated || groupRepository.memberExists(groupIdToMint, myAddress));
|
|
||||||
}
|
// Founders needs to pass same tests like minters
|
||||||
}
|
if (blockchainHeight < nameCheckHeight &&
|
||||||
|
Account.isFounder(accountData.getFlags()) &&
|
||||||
|
accountData.getBlocksMintedPenalty() == 0)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (blockchainHeight >= nameCheckHeight &&
|
||||||
|
blockchainHeight < groupCheckHeight &&
|
||||||
|
Account.isFounder(accountData.getFlags()) &&
|
||||||
|
accountData.getBlocksMintedPenalty() == 0 &&
|
||||||
|
!myName.isEmpty())
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (blockchainHeight >= groupCheckHeight &&
|
||||||
|
Account.isFounder(accountData.getFlags()) &&
|
||||||
|
accountData.getBlocksMintedPenalty() == 0 &&
|
||||||
|
!myName.isEmpty() &&
|
||||||
|
isMember)
|
||||||
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -282,6 +272,7 @@ public class Account {
|
|||||||
return this.repository.getAccountRepository().getBlocksMintedPenaltyCount(this.address);
|
return this.repository.getAccountRepository().getBlocksMintedPenaltyCount(this.address);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** Returns whether account can build reward-shares.
|
/** Returns whether account can build reward-shares.
|
||||||
* <p>
|
* <p>
|
||||||
* To be able to create reward-shares, the account needs to pass at least one of these tests:<br>
|
* To be able to create reward-shares, the account needs to pass at least one of these tests:<br>
|
||||||
@ -295,7 +286,6 @@ public class Account {
|
|||||||
*/
|
*/
|
||||||
public boolean canRewardShare() throws DataException {
|
public boolean canRewardShare() throws DataException {
|
||||||
AccountData accountData = this.repository.getAccountRepository().getAccount(this.address);
|
AccountData accountData = this.repository.getAccountRepository().getAccount(this.address);
|
||||||
|
|
||||||
if (accountData == null)
|
if (accountData == null)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -348,27 +338,9 @@ public class Account {
|
|||||||
return accountData.getLevel();
|
return accountData.getLevel();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns reward-share minting address, or unknown if reward-share does not exist.
|
|
||||||
*
|
|
||||||
* @param repository
|
|
||||||
* @param rewardSharePublicKey
|
|
||||||
* @return address or unknown
|
|
||||||
* @throws DataException
|
|
||||||
*/
|
|
||||||
public static String getRewardShareMintingAddress(Repository repository, byte[] rewardSharePublicKey) throws DataException {
|
|
||||||
// Find actual minter address
|
|
||||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey);
|
|
||||||
|
|
||||||
if (rewardShareData == null)
|
|
||||||
return "Unknown";
|
|
||||||
|
|
||||||
return rewardShareData.getMinter();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns 'effective' minting level, or zero if reward-share does not exist.
|
* Returns 'effective' minting level, or zero if reward-share does not exist.
|
||||||
*
|
*
|
||||||
* @param repository
|
* @param repository
|
||||||
* @param rewardSharePublicKey
|
* @param rewardSharePublicKey
|
||||||
* @return 0+
|
* @return 0+
|
||||||
@ -383,7 +355,6 @@ public class Account {
|
|||||||
Account rewardShareMinter = new Account(repository, rewardShareData.getMinter());
|
Account rewardShareMinter = new Account(repository, rewardShareData.getMinter());
|
||||||
return rewardShareMinter.getEffectiveMintingLevel();
|
return rewardShareMinter.getEffectiveMintingLevel();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns 'effective' minting level, with a fix for the zero level.
|
* Returns 'effective' minting level, with a fix for the zero level.
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -1,13 +1,7 @@
|
|||||||
package org.qortal.api.model;
|
package org.qortal.api.model;
|
||||||
|
|
||||||
import org.qortal.account.Account;
|
|
||||||
import org.qortal.repository.DataException;
|
|
||||||
import org.qortal.repository.RepositoryManager;
|
|
||||||
import org.qortal.repository.Repository;
|
|
||||||
|
|
||||||
import javax.xml.bind.annotation.XmlAccessType;
|
import javax.xml.bind.annotation.XmlAccessType;
|
||||||
import javax.xml.bind.annotation.XmlAccessorType;
|
import javax.xml.bind.annotation.XmlAccessorType;
|
||||||
import javax.xml.bind.annotation.XmlElement;
|
|
||||||
|
|
||||||
// All properties to be converted to JSON via JAXB
|
// All properties to be converted to JSON via JAXB
|
||||||
@XmlAccessorType(XmlAccessType.FIELD)
|
@XmlAccessorType(XmlAccessType.FIELD)
|
||||||
@ -53,31 +47,4 @@ public class ApiOnlineAccount {
|
|||||||
return this.recipientAddress;
|
return this.recipientAddress;
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getMinterLevelFromPublicKey() {
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
return Account.getRewardShareEffectiveMintingLevel(repository, this.rewardSharePublicKey);
|
|
||||||
} catch (DataException e) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean getIsMember() {
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
return repository.getGroupRepository().memberExists(694, getMinterAddress());
|
|
||||||
} catch (DataException e) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// JAXB special
|
|
||||||
|
|
||||||
@XmlElement(name = "minterLevel")
|
|
||||||
protected int getMinterLevel() {
|
|
||||||
return getMinterLevelFromPublicKey();
|
|
||||||
}
|
|
||||||
|
|
||||||
@XmlElement(name = "isMinterMember")
|
|
||||||
protected boolean getMinterMember() {
|
|
||||||
return getIsMember();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@ import java.math.BigInteger;
|
|||||||
public class BlockMintingInfo {
|
public class BlockMintingInfo {
|
||||||
|
|
||||||
public byte[] minterPublicKey;
|
public byte[] minterPublicKey;
|
||||||
public String minterAddress;
|
|
||||||
public int minterLevel;
|
public int minterLevel;
|
||||||
public int onlineAccountsCount;
|
public int onlineAccountsCount;
|
||||||
public BigDecimal maxDistance;
|
public BigDecimal maxDistance;
|
||||||
@ -20,4 +19,5 @@ public class BlockMintingInfo {
|
|||||||
|
|
||||||
public BlockMintingInfo() {
|
public BlockMintingInfo() {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -542,7 +542,6 @@ public class BlocksResource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
String minterAddress = Account.getRewardShareMintingAddress(repository, blockData.getMinterPublicKey());
|
|
||||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||||
if (minterLevel == 0)
|
if (minterLevel == 0)
|
||||||
// This may be unavailable when requesting a trimmed block
|
// This may be unavailable when requesting a trimmed block
|
||||||
@ -555,7 +554,6 @@ public class BlocksResource {
|
|||||||
|
|
||||||
BlockMintingInfo blockMintingInfo = new BlockMintingInfo();
|
BlockMintingInfo blockMintingInfo = new BlockMintingInfo();
|
||||||
blockMintingInfo.minterPublicKey = blockData.getMinterPublicKey();
|
blockMintingInfo.minterPublicKey = blockData.getMinterPublicKey();
|
||||||
blockMintingInfo.minterAddress = minterAddress;
|
|
||||||
blockMintingInfo.minterLevel = minterLevel;
|
blockMintingInfo.minterLevel = minterLevel;
|
||||||
blockMintingInfo.onlineAccountsCount = blockData.getOnlineAccountsCount();
|
blockMintingInfo.onlineAccountsCount = blockData.getOnlineAccountsCount();
|
||||||
blockMintingInfo.maxDistance = new BigDecimal(block.MAX_DISTANCE);
|
blockMintingInfo.maxDistance = new BigDecimal(block.MAX_DISTANCE);
|
||||||
@ -889,4 +887,5 @@ public class BlocksResource {
|
|||||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -234,21 +234,17 @@ public class ChatResource {
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
|
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
|
||||||
public ActiveChats getActiveChats(
|
public ActiveChats getActiveChats(@PathParam("address") String address, @QueryParam("encoding") Encoding encoding) {
|
||||||
@PathParam("address") String address,
|
|
||||||
@QueryParam("encoding") Encoding encoding,
|
|
||||||
@QueryParam("haschatreference") Boolean hasChatReference
|
|
||||||
) {
|
|
||||||
if (address == null || !Crypto.isValidAddress(address))
|
if (address == null || !Crypto.isValidAddress(address))
|
||||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
return repository.getChatRepository().getActiveChats(address, encoding, hasChatReference);
|
return repository.getChatRepository().getActiveChats(address, encoding);
|
||||||
} catch (DataException e) {
|
} catch (DataException e) {
|
||||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@POST
|
@POST
|
||||||
@Operation(
|
@Operation(
|
||||||
summary = "Build raw, unsigned, CHAT transaction",
|
summary = "Build raw, unsigned, CHAT transaction",
|
||||||
|
@ -459,7 +459,7 @@ public class AdminResource {
|
|||||||
|
|
||||||
// Qortal: check reward-share's minting account is still allowed to mint
|
// Qortal: check reward-share's minting account is still allowed to mint
|
||||||
Account rewardShareMintingAccount = new Account(repository, rewardShareData.getMinter());
|
Account rewardShareMintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||||
if (!rewardShareMintingAccount.canMint(false))
|
if (!rewardShareMintingAccount.canMint())
|
||||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.CANNOT_MINT);
|
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.CANNOT_MINT);
|
||||||
|
|
||||||
MintingAccountData mintingAccountData = new MintingAccountData(mintingAccount.getPrivateKey(), mintingAccount.getPublicKey());
|
MintingAccountData mintingAccountData = new MintingAccountData(mintingAccount.getPrivateKey(), mintingAccount.getPublicKey());
|
||||||
|
@ -77,9 +77,7 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
Boolean hasChatReference = getHasChatReference(session);
|
ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session));
|
||||||
|
|
||||||
ActiveChats activeChats = repository.getChatRepository().getActiveChats(ourAddress, getTargetEncoding(session), hasChatReference);
|
|
||||||
|
|
||||||
StringWriter stringWriter = new StringWriter();
|
StringWriter stringWriter = new StringWriter();
|
||||||
|
|
||||||
@ -105,20 +103,4 @@ public class ActiveChatsWebSocket extends ApiWebSocket {
|
|||||||
return Encoding.valueOf(encoding);
|
return Encoding.valueOf(encoding);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Boolean getHasChatReference(Session session) {
|
|
||||||
Map<String, List<String>> queryParams = session.getUpgradeRequest().getParameterMap();
|
|
||||||
List<String> hasChatReferenceList = queryParams.get("haschatreference");
|
|
||||||
|
|
||||||
// Return null if not specified
|
|
||||||
if (hasChatReferenceList != null && hasChatReferenceList.size() == 1) {
|
|
||||||
String value = hasChatReferenceList.get(0).toLowerCase();
|
|
||||||
if (value.equals("true")) {
|
|
||||||
return true;
|
|
||||||
} else if (value.equals("false")) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null; // Ignored if not present
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -168,7 +168,7 @@ public class ArbitraryDataRenderer {
|
|||||||
byte[] data = Files.readAllBytes(filePath); // TODO: limit file size that can be read into memory
|
byte[] data = Files.readAllBytes(filePath); // TODO: limit file size that can be read into memory
|
||||||
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting);
|
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, includeResourceIdInPrefix, data, qdnContext, service, identifier, theme, usingCustomRouting);
|
||||||
htmlParser.addAdditionalHeaderTags();
|
htmlParser.addAdditionalHeaderTags();
|
||||||
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; font-src 'self' data:; media-src 'self' data: blob:; img-src 'self' data: blob:; connect-src 'self' wss:;");
|
response.addHeader("Content-Security-Policy", "default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' data: blob:; img-src 'self' data: blob:;");
|
||||||
response.setContentType(context.getMimeType(filename));
|
response.setContentType(context.getMimeType(filename));
|
||||||
response.setContentLength(htmlParser.getData().length);
|
response.setContentLength(htmlParser.getData().length);
|
||||||
response.getOutputStream().write(htmlParser.getData());
|
response.getOutputStream().write(htmlParser.getData());
|
||||||
|
@ -25,7 +25,10 @@ import org.qortal.data.block.BlockSummaryData;
|
|||||||
import org.qortal.data.block.BlockTransactionData;
|
import org.qortal.data.block.BlockTransactionData;
|
||||||
import org.qortal.data.network.OnlineAccountData;
|
import org.qortal.data.network.OnlineAccountData;
|
||||||
import org.qortal.data.transaction.TransactionData;
|
import org.qortal.data.transaction.TransactionData;
|
||||||
import org.qortal.repository.*;
|
import org.qortal.repository.ATRepository;
|
||||||
|
import org.qortal.repository.DataException;
|
||||||
|
import org.qortal.repository.Repository;
|
||||||
|
import org.qortal.repository.TransactionRepository;
|
||||||
import org.qortal.settings.Settings;
|
import org.qortal.settings.Settings;
|
||||||
import org.qortal.transaction.AtTransaction;
|
import org.qortal.transaction.AtTransaction;
|
||||||
import org.qortal.transaction.Transaction;
|
import org.qortal.transaction.Transaction;
|
||||||
@ -141,13 +144,10 @@ public class Block {
|
|||||||
private final Account mintingAccount;
|
private final Account mintingAccount;
|
||||||
private final AccountData mintingAccountData;
|
private final AccountData mintingAccountData;
|
||||||
private final boolean isMinterFounder;
|
private final boolean isMinterFounder;
|
||||||
private final boolean isMinterMember;
|
|
||||||
|
|
||||||
private final Account recipientAccount;
|
private final Account recipientAccount;
|
||||||
private final AccountData recipientAccountData;
|
private final AccountData recipientAccountData;
|
||||||
|
|
||||||
final BlockChain blockChain = BlockChain.getInstance();
|
|
||||||
|
|
||||||
ExpandedAccount(Repository repository, RewardShareData rewardShareData) throws DataException {
|
ExpandedAccount(Repository repository, RewardShareData rewardShareData) throws DataException {
|
||||||
this.rewardShareData = rewardShareData;
|
this.rewardShareData = rewardShareData;
|
||||||
this.sharePercent = this.rewardShareData.getSharePercent();
|
this.sharePercent = this.rewardShareData.getSharePercent();
|
||||||
@ -157,7 +157,6 @@ public class Block {
|
|||||||
this.isMinterFounder = Account.isFounder(mintingAccountData.getFlags());
|
this.isMinterFounder = Account.isFounder(mintingAccountData.getFlags());
|
||||||
|
|
||||||
this.isRecipientAlsoMinter = this.rewardShareData.getRecipient().equals(this.mintingAccount.getAddress());
|
this.isRecipientAlsoMinter = this.rewardShareData.getRecipient().equals(this.mintingAccount.getAddress());
|
||||||
this.isMinterMember = repository.getGroupRepository().memberExists(BlockChain.getInstance().getMintingGroupId(), this.mintingAccount.getAddress());
|
|
||||||
|
|
||||||
if (this.isRecipientAlsoMinter) {
|
if (this.isRecipientAlsoMinter) {
|
||||||
// Self-share: minter is also recipient
|
// Self-share: minter is also recipient
|
||||||
@ -182,7 +181,7 @@ public class Block {
|
|||||||
* <p>
|
* <p>
|
||||||
* This is a method, not a final variable, because account's level can change between construction and call,
|
* This is a method, not a final variable, because account's level can change between construction and call,
|
||||||
* e.g. during Block.process() where account levels are bumped right before Block.distributeBlockReward().
|
* e.g. during Block.process() where account levels are bumped right before Block.distributeBlockReward().
|
||||||
*
|
*
|
||||||
* @return account-level share "bin" from blockchain config, or null if founder / none found
|
* @return account-level share "bin" from blockchain config, or null if founder / none found
|
||||||
*/
|
*/
|
||||||
public AccountLevelShareBin getShareBin(int blockHeight) {
|
public AccountLevelShareBin getShareBin(int blockHeight) {
|
||||||
@ -193,12 +192,8 @@ public class Block {
|
|||||||
if (accountLevel <= 0)
|
if (accountLevel <= 0)
|
||||||
return null; // level 0 isn't included in any share bins
|
return null; // level 0 isn't included in any share bins
|
||||||
|
|
||||||
if (blockHeight >= blockChain.getFixBatchRewardHeight()) {
|
|
||||||
if (!this.isMinterMember)
|
|
||||||
return null; // not member of minter group isn't included in any share bins
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select the correct set of share bins based on block height
|
// Select the correct set of share bins based on block height
|
||||||
|
final BlockChain blockChain = BlockChain.getInstance();
|
||||||
final AccountLevelShareBin[] shareBinsByLevel = (blockHeight >= blockChain.getSharesByLevelV2Height()) ?
|
final AccountLevelShareBin[] shareBinsByLevel = (blockHeight >= blockChain.getSharesByLevelV2Height()) ?
|
||||||
blockChain.getShareBinsByAccountLevelV2() : blockChain.getShareBinsByAccountLevelV1();
|
blockChain.getShareBinsByAccountLevelV2() : blockChain.getShareBinsByAccountLevelV1();
|
||||||
|
|
||||||
@ -267,7 +262,7 @@ public class Block {
|
|||||||
* Constructs new Block without loading transactions and AT states.
|
* Constructs new Block without loading transactions and AT states.
|
||||||
* <p>
|
* <p>
|
||||||
* Transactions and AT states are loaded on first call to getTransactions() or getATStates() respectively.
|
* Transactions and AT states are loaded on first call to getTransactions() or getATStates() respectively.
|
||||||
*
|
*
|
||||||
* @param repository
|
* @param repository
|
||||||
* @param blockData
|
* @param blockData
|
||||||
*/
|
*/
|
||||||
@ -338,7 +333,7 @@ public class Block {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs new Block with empty transaction list, using passed minter account.
|
* Constructs new Block with empty transaction list, using passed minter account.
|
||||||
*
|
*
|
||||||
* @param repository
|
* @param repository
|
||||||
* @param blockData
|
* @param blockData
|
||||||
* @param minter
|
* @param minter
|
||||||
@ -356,7 +351,7 @@ public class Block {
|
|||||||
* This constructor typically used when minting a new block.
|
* This constructor typically used when minting a new block.
|
||||||
* <p>
|
* <p>
|
||||||
* Note that CIYAM ATs will be executed and AT-Transactions prepended to this block, along with AT state data and fees.
|
* Note that CIYAM ATs will be executed and AT-Transactions prepended to this block, along with AT state data and fees.
|
||||||
*
|
*
|
||||||
* @param repository
|
* @param repository
|
||||||
* @param parentBlockData
|
* @param parentBlockData
|
||||||
* @param minter
|
* @param minter
|
||||||
@ -382,7 +377,7 @@ public class Block {
|
|||||||
byte[] encodedOnlineAccounts = new byte[0];
|
byte[] encodedOnlineAccounts = new byte[0];
|
||||||
int onlineAccountsCount = 0;
|
int onlineAccountsCount = 0;
|
||||||
byte[] onlineAccountsSignatures = null;
|
byte[] onlineAccountsSignatures = null;
|
||||||
|
|
||||||
if (isBatchRewardDistributionBlock(height)) {
|
if (isBatchRewardDistributionBlock(height)) {
|
||||||
// Batch reward distribution block - copy online accounts from recent block with highest online accounts count
|
// Batch reward distribution block - copy online accounts from recent block with highest online accounts count
|
||||||
|
|
||||||
@ -414,21 +409,6 @@ public class Block {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// After feature trigger, remove any online accounts that are not minter group member
|
|
||||||
if (height >= BlockChain.getInstance().getGroupMemberCheckHeight()) {
|
|
||||||
onlineAccounts.removeIf(a -> {
|
|
||||||
try {
|
|
||||||
int groupId = BlockChain.getInstance().getMintingGroupId();
|
|
||||||
String address = Account.getRewardShareMintingAddress(repository, a.getPublicKey());
|
|
||||||
boolean isMinterGroupMember = repository.getGroupRepository().memberExists(groupId, address);
|
|
||||||
return !isMinterGroupMember;
|
|
||||||
} catch (DataException e) {
|
|
||||||
// Something went wrong, so remove the account
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (onlineAccounts.isEmpty()) {
|
if (onlineAccounts.isEmpty()) {
|
||||||
LOGGER.debug("No online accounts - not even our own?");
|
LOGGER.debug("No online accounts - not even our own?");
|
||||||
return null;
|
return null;
|
||||||
@ -532,7 +512,7 @@ public class Block {
|
|||||||
* Mints new block using this block as template, but with different minting account.
|
* Mints new block using this block as template, but with different minting account.
|
||||||
* <p>
|
* <p>
|
||||||
* NOTE: uses the same transactions list, AT states, etc.
|
* NOTE: uses the same transactions list, AT states, etc.
|
||||||
*
|
*
|
||||||
* @param minter
|
* @param minter
|
||||||
* @return
|
* @return
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
@ -618,7 +598,7 @@ public class Block {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Return composite block signature (minterSignature + transactionsSignature).
|
* Return composite block signature (minterSignature + transactionsSignature).
|
||||||
*
|
*
|
||||||
* @return byte[], or null if either component signature is null.
|
* @return byte[], or null if either component signature is null.
|
||||||
*/
|
*/
|
||||||
public byte[] getSignature() {
|
public byte[] getSignature() {
|
||||||
@ -633,7 +613,7 @@ public class Block {
|
|||||||
* <p>
|
* <p>
|
||||||
* We're starting with version 4 as a nod to being newer than successor Qora,
|
* We're starting with version 4 as a nod to being newer than successor Qora,
|
||||||
* whose latest block version was 3.
|
* whose latest block version was 3.
|
||||||
*
|
*
|
||||||
* @return 1, 2, 3 or 4
|
* @return 1, 2, 3 or 4
|
||||||
*/
|
*/
|
||||||
public int getNextBlockVersion() {
|
public int getNextBlockVersion() {
|
||||||
@ -647,7 +627,7 @@ public class Block {
|
|||||||
* Return block's transactions.
|
* Return block's transactions.
|
||||||
* <p>
|
* <p>
|
||||||
* If the block was loaded from repository then it's possible this method will call the repository to fetch the transactions if not done already.
|
* If the block was loaded from repository then it's possible this method will call the repository to fetch the transactions if not done already.
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
@ -681,7 +661,7 @@ public class Block {
|
|||||||
* If the block was loaded from repository then it's possible this method will call the repository to fetch the AT states if not done already.
|
* If the block was loaded from repository then it's possible this method will call the repository to fetch the AT states if not done already.
|
||||||
* <p>
|
* <p>
|
||||||
* <b>Note:</b> AT states fetched from repository only contain summary info, not actual data like serialized state data or AT creation timestamps!
|
* <b>Note:</b> AT states fetched from repository only contain summary info, not actual data like serialized state data or AT creation timestamps!
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
@ -717,7 +697,7 @@ public class Block {
|
|||||||
* <p>
|
* <p>
|
||||||
* Typically called as part of Block.process() or Block.orphan()
|
* Typically called as part of Block.process() or Block.orphan()
|
||||||
* so ideally after any calls to Block.isValid().
|
* so ideally after any calls to Block.isValid().
|
||||||
*
|
*
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
public List<ExpandedAccount> getExpandedAccounts() throws DataException {
|
public List<ExpandedAccount> getExpandedAccounts() throws DataException {
|
||||||
@ -735,20 +715,10 @@ public class Block {
|
|||||||
|
|
||||||
List<ExpandedAccount> expandedAccounts = new ArrayList<>();
|
List<ExpandedAccount> expandedAccounts = new ArrayList<>();
|
||||||
|
|
||||||
for (RewardShareData rewardShare : this.cachedOnlineRewardShares) {
|
for (RewardShareData rewardShare : this.cachedOnlineRewardShares)
|
||||||
int groupId = BlockChain.getInstance().getMintingGroupId();
|
expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
|
||||||
String address = rewardShare.getMinter();
|
|
||||||
boolean isMinterGroupMember = repository.getGroupRepository().memberExists(groupId, address);
|
|
||||||
|
|
||||||
if (this.getBlockData().getHeight() < BlockChain.getInstance().getFixBatchRewardHeight())
|
|
||||||
expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
|
|
||||||
|
|
||||||
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getFixBatchRewardHeight() && isMinterGroupMember)
|
|
||||||
expandedAccounts.add(new ExpandedAccount(repository, rewardShare));
|
|
||||||
}
|
|
||||||
|
|
||||||
this.cachedExpandedAccounts = expandedAccounts;
|
this.cachedExpandedAccounts = expandedAccounts;
|
||||||
LOGGER.trace(() -> String.format("Online reward-shares after expanded accounts %s", this.cachedOnlineRewardShares));
|
|
||||||
|
|
||||||
return this.cachedExpandedAccounts;
|
return this.cachedExpandedAccounts;
|
||||||
}
|
}
|
||||||
@ -757,7 +727,7 @@ public class Block {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Load parent block's data from repository via this block's reference.
|
* Load parent block's data from repository via this block's reference.
|
||||||
*
|
*
|
||||||
* @return parent's BlockData, or null if no parent found
|
* @return parent's BlockData, or null if no parent found
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
@ -771,7 +741,7 @@ public class Block {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Load child block's data from repository via this block's signature.
|
* Load child block's data from repository via this block's signature.
|
||||||
*
|
*
|
||||||
* @return child's BlockData, or null if no parent found
|
* @return child's BlockData, or null if no parent found
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
@ -791,7 +761,7 @@ public class Block {
|
|||||||
* Used when constructing a new block during minting.
|
* Used when constructing a new block during minting.
|
||||||
* <p>
|
* <p>
|
||||||
* Requires block's {@code minter} being a {@code PrivateKeyAccount} so block's transactions signature can be recalculated.
|
* Requires block's {@code minter} being a {@code PrivateKeyAccount} so block's transactions signature can be recalculated.
|
||||||
*
|
*
|
||||||
* @param transactionData
|
* @param transactionData
|
||||||
* @return true if transaction successfully added to block, false otherwise
|
* @return true if transaction successfully added to block, false otherwise
|
||||||
* @throws IllegalStateException
|
* @throws IllegalStateException
|
||||||
@ -844,7 +814,7 @@ public class Block {
|
|||||||
* Used when constructing a new block during minting.
|
* Used when constructing a new block during minting.
|
||||||
* <p>
|
* <p>
|
||||||
* Requires block's {@code minter} being a {@code PrivateKeyAccount} so block's transactions signature can be recalculated.
|
* Requires block's {@code minter} being a {@code PrivateKeyAccount} so block's transactions signature can be recalculated.
|
||||||
*
|
*
|
||||||
* @param transactionData
|
* @param transactionData
|
||||||
* @throws IllegalStateException
|
* @throws IllegalStateException
|
||||||
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
|
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
|
||||||
@ -889,7 +859,7 @@ public class Block {
|
|||||||
* previous block's minter signature + minter's public key + (encoded) online-accounts data
|
* previous block's minter signature + minter's public key + (encoded) online-accounts data
|
||||||
* <p>
|
* <p>
|
||||||
* (Previous block's minter signature is extracted from this block's reference).
|
* (Previous block's minter signature is extracted from this block's reference).
|
||||||
*
|
*
|
||||||
* @throws IllegalStateException
|
* @throws IllegalStateException
|
||||||
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
|
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
|
||||||
* @throws RuntimeException
|
* @throws RuntimeException
|
||||||
@ -906,7 +876,7 @@ public class Block {
|
|||||||
* Recalculate block's transactions signature.
|
* Recalculate block's transactions signature.
|
||||||
* <p>
|
* <p>
|
||||||
* Requires block's {@code minter} being a {@code PrivateKeyAccount}.
|
* Requires block's {@code minter} being a {@code PrivateKeyAccount}.
|
||||||
*
|
*
|
||||||
* @throws IllegalStateException
|
* @throws IllegalStateException
|
||||||
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
|
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
|
||||||
* @throws RuntimeException
|
* @throws RuntimeException
|
||||||
@ -1028,7 +998,7 @@ public class Block {
|
|||||||
* Recalculate block's minter and transactions signatures, thus giving block full signature.
|
* Recalculate block's minter and transactions signatures, thus giving block full signature.
|
||||||
* <p>
|
* <p>
|
||||||
* Note: Block instance must have been constructed with a <tt>PrivateKeyAccount</tt> minter or this call will throw an <tt>IllegalStateException</tt>.
|
* Note: Block instance must have been constructed with a <tt>PrivateKeyAccount</tt> minter or this call will throw an <tt>IllegalStateException</tt>.
|
||||||
*
|
*
|
||||||
* @throws IllegalStateException
|
* @throws IllegalStateException
|
||||||
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
|
* if block's {@code minter} is not a {@code PrivateKeyAccount}.
|
||||||
*/
|
*/
|
||||||
@ -1041,7 +1011,7 @@ public class Block {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns whether this block's signatures are valid.
|
* Returns whether this block's signatures are valid.
|
||||||
*
|
*
|
||||||
* @return true if both minter and transaction signatures are valid, false otherwise
|
* @return true if both minter and transaction signatures are valid, false otherwise
|
||||||
*/
|
*/
|
||||||
public boolean isSignatureValid() {
|
public boolean isSignatureValid() {
|
||||||
@ -1065,7 +1035,7 @@ public class Block {
|
|||||||
* <p>
|
* <p>
|
||||||
* Used by BlockMinter to check whether it's time to mint a new block,
|
* Used by BlockMinter to check whether it's time to mint a new block,
|
||||||
* and also used by Block.isValid for checks (if not a testchain).
|
* and also used by Block.isValid for checks (if not a testchain).
|
||||||
*
|
*
|
||||||
* @return ValidationResult.OK if timestamp valid, or some other ValidationResult otherwise.
|
* @return ValidationResult.OK if timestamp valid, or some other ValidationResult otherwise.
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
@ -1158,17 +1128,8 @@ public class Block {
|
|||||||
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
|
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getOnlineAccountMinterLevelValidationHeight()) {
|
||||||
List<ExpandedAccount> expandedAccounts = this.getExpandedAccounts();
|
List<ExpandedAccount> expandedAccounts = this.getExpandedAccounts();
|
||||||
for (ExpandedAccount account : expandedAccounts) {
|
for (ExpandedAccount account : expandedAccounts) {
|
||||||
int groupId = BlockChain.getInstance().getMintingGroupId();
|
|
||||||
String address = account.getMintingAccount().getAddress();
|
|
||||||
boolean isMinterGroupMember = repository.getGroupRepository().memberExists(groupId, address);
|
|
||||||
|
|
||||||
if (account.getMintingAccount().getEffectiveMintingLevel() == 0)
|
if (account.getMintingAccount().getEffectiveMintingLevel() == 0)
|
||||||
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
|
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
|
||||||
|
|
||||||
if (this.getBlockData().getHeight() >= BlockChain.getInstance().getFixBatchRewardHeight()) {
|
|
||||||
if (!isMinterGroupMember)
|
|
||||||
return ValidationResult.ONLINE_ACCOUNTS_INVALID;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1254,7 +1215,7 @@ public class Block {
|
|||||||
* <p>
|
* <p>
|
||||||
* Checks block's transactions by testing their validity then processing them.<br>
|
* Checks block's transactions by testing their validity then processing them.<br>
|
||||||
* Hence uses a repository savepoint during execution.
|
* Hence uses a repository savepoint during execution.
|
||||||
*
|
*
|
||||||
* @return ValidationResult.OK if block is valid, or some other ValidationResult otherwise.
|
* @return ValidationResult.OK if block is valid, or some other ValidationResult otherwise.
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
@ -1297,7 +1258,6 @@ public class Block {
|
|||||||
|
|
||||||
// Online Accounts
|
// Online Accounts
|
||||||
ValidationResult onlineAccountsResult = this.areOnlineAccountsValid();
|
ValidationResult onlineAccountsResult = this.areOnlineAccountsValid();
|
||||||
LOGGER.trace("Accounts valid = {}", onlineAccountsResult);
|
|
||||||
if (onlineAccountsResult != ValidationResult.OK)
|
if (onlineAccountsResult != ValidationResult.OK)
|
||||||
return onlineAccountsResult;
|
return onlineAccountsResult;
|
||||||
|
|
||||||
@ -1386,7 +1346,7 @@ public class Block {
|
|||||||
// Check transaction can even be processed
|
// Check transaction can even be processed
|
||||||
validationResult = transaction.isProcessable();
|
validationResult = transaction.isProcessable();
|
||||||
if (validationResult != Transaction.ValidationResult.OK) {
|
if (validationResult != Transaction.ValidationResult.OK) {
|
||||||
LOGGER.debug(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
|
LOGGER.info(String.format("Error during transaction validation, tx %s: %s", Base58.encode(transactionData.getSignature()), validationResult.name()));
|
||||||
return ValidationResult.TRANSACTION_INVALID;
|
return ValidationResult.TRANSACTION_INVALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1426,7 +1386,7 @@ public class Block {
|
|||||||
* <p>
|
* <p>
|
||||||
* NOTE: will execute ATs locally if not already done.<br>
|
* NOTE: will execute ATs locally if not already done.<br>
|
||||||
* This is so we have locally-generated AT states for comparison.
|
* This is so we have locally-generated AT states for comparison.
|
||||||
*
|
*
|
||||||
* @return OK, or some AT-related validation result
|
* @return OK, or some AT-related validation result
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
@ -1502,11 +1462,11 @@ public class Block {
|
|||||||
* Note: this method does not store new AT state data into repository - that is handled by <tt>process()</tt>.
|
* Note: this method does not store new AT state data into repository - that is handled by <tt>process()</tt>.
|
||||||
* <p>
|
* <p>
|
||||||
* This method is not needed if fetching an existing block from the repository as AT state data will be loaded from repository as well.
|
* This method is not needed if fetching an existing block from the repository as AT state data will be loaded from repository as well.
|
||||||
*
|
*
|
||||||
* @see #isValid()
|
* @see #isValid()
|
||||||
*
|
*
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
private void executeATs() throws DataException {
|
private void executeATs() throws DataException {
|
||||||
// We're expecting a lack of AT state data at this point.
|
// We're expecting a lack of AT state data at this point.
|
||||||
@ -1558,7 +1518,7 @@ public class Block {
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
Account mintingAccount = new PublicKeyAccount(this.repository, rewardShareData.getMinterPublicKey());
|
Account mintingAccount = new PublicKeyAccount(this.repository, rewardShareData.getMinterPublicKey());
|
||||||
return mintingAccount.canMint(false);
|
return mintingAccount.canMint();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1578,7 +1538,7 @@ public class Block {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Process block, and its transactions, adding them to the blockchain.
|
* Process block, and its transactions, adding them to the blockchain.
|
||||||
*
|
*
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
public void process() throws DataException {
|
public void process() throws DataException {
|
||||||
@ -1587,7 +1547,6 @@ public class Block {
|
|||||||
this.blockData.setHeight(blockchainHeight + 1);
|
this.blockData.setHeight(blockchainHeight + 1);
|
||||||
|
|
||||||
LOGGER.trace(() -> String.format("Processing block %d", this.blockData.getHeight()));
|
LOGGER.trace(() -> String.format("Processing block %d", this.blockData.getHeight()));
|
||||||
LOGGER.trace(() -> String.format("Online Reward Shares in process %s", this.cachedOnlineRewardShares));
|
|
||||||
|
|
||||||
if (this.blockData.getHeight() > 1) {
|
if (this.blockData.getHeight() > 1) {
|
||||||
|
|
||||||
@ -1880,7 +1839,7 @@ public class Block {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes block from blockchain undoing transactions and adding them to unconfirmed pile.
|
* Removes block from blockchain undoing transactions and adding them to unconfirmed pile.
|
||||||
*
|
*
|
||||||
* @throws DataException
|
* @throws DataException
|
||||||
*/
|
*/
|
||||||
public void orphan() throws DataException {
|
public void orphan() throws DataException {
|
||||||
@ -1920,7 +1879,7 @@ public class Block {
|
|||||||
SelfSponsorshipAlgoV3Block.orphanAccountPenalties(this);
|
SelfSponsorshipAlgoV3Block.orphanAccountPenalties(this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account levels and block rewards are only processed/orphaned on block reward distribution blocks
|
// Account levels and block rewards are only processed/orphaned on block reward distribution blocks
|
||||||
if (this.isRewardDistributionBlock()) {
|
if (this.isRewardDistributionBlock()) {
|
||||||
// Block rewards, including transaction fees, removed after transactions undone
|
// Block rewards, including transaction fees, removed after transactions undone
|
||||||
@ -2254,7 +2213,6 @@ public class Block {
|
|||||||
List<AccountBalanceData> accountBalanceDeltas = balanceChanges.entrySet().stream()
|
List<AccountBalanceData> accountBalanceDeltas = balanceChanges.entrySet().stream()
|
||||||
.map(entry -> new AccountBalanceData(entry.getKey(), Asset.QORT, entry.getValue()))
|
.map(entry -> new AccountBalanceData(entry.getKey(), Asset.QORT, entry.getValue()))
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
LOGGER.trace("Account Balance Deltas: {}", accountBalanceDeltas);
|
|
||||||
this.repository.getAccountRepository().modifyAssetBalances(accountBalanceDeltas);
|
this.repository.getAccountRepository().modifyAssetBalances(accountBalanceDeltas);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2267,30 +2225,30 @@ public class Block {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Distribution rules:
|
* Distribution rules:
|
||||||
*
|
*
|
||||||
* Distribution is based on the minting account of 'online' reward-shares.
|
* Distribution is based on the minting account of 'online' reward-shares.
|
||||||
*
|
*
|
||||||
* If ANY founders are online, then they receive the leftover non-distributed reward.
|
* If ANY founders are online, then they receive the leftover non-distributed reward.
|
||||||
* If NO founders are online, then account-level-based rewards are scaled up so 100% of reward is allocated.
|
* If NO founders are online, then account-level-based rewards are scaled up so 100% of reward is allocated.
|
||||||
*
|
*
|
||||||
* If ANY non-maxxed legacy QORA holders exist then they are always allocated their fixed share (e.g. 20%).
|
* If ANY non-maxxed legacy QORA holders exist then they are always allocated their fixed share (e.g. 20%).
|
||||||
*
|
*
|
||||||
* There has to be either at least one 'online' account for blocks to be minted
|
* There has to be either at least one 'online' account for blocks to be minted
|
||||||
* so there is always either one account-level-based or founder reward candidate.
|
* so there is always either one account-level-based or founder reward candidate.
|
||||||
*
|
*
|
||||||
* Examples:
|
* Examples:
|
||||||
*
|
*
|
||||||
* With at least one founder online:
|
* With at least one founder online:
|
||||||
* Level 1/2 accounts: 5%
|
* Level 1/2 accounts: 5%
|
||||||
* Legacy QORA holders: 20%
|
* Legacy QORA holders: 20%
|
||||||
* Founders: ~75%
|
* Founders: ~75%
|
||||||
*
|
*
|
||||||
* No online founders:
|
* No online founders:
|
||||||
* Level 1/2 accounts: 5%
|
* Level 1/2 accounts: 5%
|
||||||
* Level 5/6 accounts: 15%
|
* Level 5/6 accounts: 15%
|
||||||
* Legacy QORA holders: 20%
|
* Legacy QORA holders: 20%
|
||||||
* Total: 40%
|
* Total: 40%
|
||||||
*
|
*
|
||||||
* After scaling account-level-based shares to fill 100%:
|
* After scaling account-level-based shares to fill 100%:
|
||||||
* Level 1/2 accounts: 20%
|
* Level 1/2 accounts: 20%
|
||||||
* Level 5/6 accounts: 60%
|
* Level 5/6 accounts: 60%
|
||||||
@ -2306,6 +2264,7 @@ public class Block {
|
|||||||
// Select the correct set of share bins based on block height
|
// Select the correct set of share bins based on block height
|
||||||
List<AccountLevelShareBin> accountLevelShareBinsForBlock = (this.blockData.getHeight() >= BlockChain.getInstance().getSharesByLevelV2Height()) ?
|
List<AccountLevelShareBin> accountLevelShareBinsForBlock = (this.blockData.getHeight() >= BlockChain.getInstance().getSharesByLevelV2Height()) ?
|
||||||
BlockChain.getInstance().getAccountLevelShareBinsV2() : BlockChain.getInstance().getAccountLevelShareBinsV1();
|
BlockChain.getInstance().getAccountLevelShareBinsV2() : BlockChain.getInstance().getAccountLevelShareBinsV1();
|
||||||
|
|
||||||
// Determine reward candidates based on account level
|
// Determine reward candidates based on account level
|
||||||
// This needs a deep copy, so the shares can be modified when tiers aren't activated yet
|
// This needs a deep copy, so the shares can be modified when tiers aren't activated yet
|
||||||
List<AccountLevelShareBin> accountLevelShareBins = new ArrayList<>();
|
List<AccountLevelShareBin> accountLevelShareBins = new ArrayList<>();
|
||||||
@ -2595,11 +2554,9 @@ public class Block {
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(this.repository, this.getMinter().getPublicKey());
|
int minterLevel = Account.getRewardShareEffectiveMintingLevel(this.repository, this.getMinter().getPublicKey());
|
||||||
String minterAddress = Account.getRewardShareMintingAddress(this.repository, this.getMinter().getPublicKey());
|
|
||||||
|
|
||||||
LOGGER.debug(String.format("======= BLOCK %d (%.8s) =======", this.getBlockData().getHeight(), Base58.encode(this.getSignature())));
|
LOGGER.debug(String.format("======= BLOCK %d (%.8s) =======", this.getBlockData().getHeight(), Base58.encode(this.getSignature())));
|
||||||
LOGGER.debug(String.format("Timestamp: %d", this.getBlockData().getTimestamp()));
|
LOGGER.debug(String.format("Timestamp: %d", this.getBlockData().getTimestamp()));
|
||||||
LOGGER.debug(String.format("Minter address: %s", minterAddress));
|
|
||||||
LOGGER.debug(String.format("Minter level: %d", minterLevel));
|
LOGGER.debug(String.format("Minter level: %d", minterLevel));
|
||||||
LOGGER.debug(String.format("Online accounts: %d", this.getBlockData().getOnlineAccountsCount()));
|
LOGGER.debug(String.format("Online accounts: %d", this.getBlockData().getOnlineAccountsCount()));
|
||||||
LOGGER.debug(String.format("AT count: %d", this.getBlockData().getATCount()));
|
LOGGER.debug(String.format("AT count: %d", this.getBlockData().getATCount()));
|
||||||
|
@ -71,7 +71,6 @@ public class BlockChain {
|
|||||||
transactionV6Timestamp,
|
transactionV6Timestamp,
|
||||||
disableReferenceTimestamp,
|
disableReferenceTimestamp,
|
||||||
increaseOnlineAccountsDifficultyTimestamp,
|
increaseOnlineAccountsDifficultyTimestamp,
|
||||||
decreaseOnlineAccountsDifficultyTimestamp,
|
|
||||||
onlineAccountMinterLevelValidationHeight,
|
onlineAccountMinterLevelValidationHeight,
|
||||||
selfSponsorshipAlgoV1Height,
|
selfSponsorshipAlgoV1Height,
|
||||||
selfSponsorshipAlgoV2Height,
|
selfSponsorshipAlgoV2Height,
|
||||||
@ -86,9 +85,7 @@ public class BlockChain {
|
|||||||
disableRewardshareHeight,
|
disableRewardshareHeight,
|
||||||
enableRewardshareHeight,
|
enableRewardshareHeight,
|
||||||
onlyMintWithNameHeight,
|
onlyMintWithNameHeight,
|
||||||
removeOnlyMintWithNameHeight,
|
groupMemberCheckHeight
|
||||||
groupMemberCheckHeight,
|
|
||||||
fixBatchRewardHeight
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Custom transaction fees
|
// Custom transaction fees
|
||||||
@ -220,10 +217,6 @@ public class BlockChain {
|
|||||||
* featureTriggers because unit tests need to set this value via Reflection. */
|
* featureTriggers because unit tests need to set this value via Reflection. */
|
||||||
private long onlineAccountsModulusV2Timestamp;
|
private long onlineAccountsModulusV2Timestamp;
|
||||||
|
|
||||||
/** Feature trigger timestamp for ONLINE_ACCOUNTS_MODULUS time interval decrease. Can't use
|
|
||||||
* featureTriggers because unit tests need to set this value via Reflection. */
|
|
||||||
private long onlineAccountsModulusV3Timestamp;
|
|
||||||
|
|
||||||
/** Snapshot timestamp for self sponsorship algo V1 */
|
/** Snapshot timestamp for self sponsorship algo V1 */
|
||||||
private long selfSponsorshipAlgoV1SnapshotTimestamp;
|
private long selfSponsorshipAlgoV1SnapshotTimestamp;
|
||||||
|
|
||||||
@ -410,10 +403,6 @@ public class BlockChain {
|
|||||||
return this.onlineAccountsModulusV2Timestamp;
|
return this.onlineAccountsModulusV2Timestamp;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getOnlineAccountsModulusV3Timestamp() {
|
|
||||||
return this.onlineAccountsModulusV3Timestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Block reward batching */
|
/* Block reward batching */
|
||||||
public long getBlockRewardBatchStartHeight() {
|
public long getBlockRewardBatchStartHeight() {
|
||||||
return this.blockRewardBatchStartHeight;
|
return this.blockRewardBatchStartHeight;
|
||||||
@ -590,10 +579,6 @@ public class BlockChain {
|
|||||||
return this.featureTriggers.get(FeatureTrigger.increaseOnlineAccountsDifficultyTimestamp.name()).longValue();
|
return this.featureTriggers.get(FeatureTrigger.increaseOnlineAccountsDifficultyTimestamp.name()).longValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getDecreaseOnlineAccountsDifficultyTimestamp() {
|
|
||||||
return this.featureTriggers.get(FeatureTrigger.decreaseOnlineAccountsDifficultyTimestamp.name()).longValue();
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getSelfSponsorshipAlgoV1Height() {
|
public int getSelfSponsorshipAlgoV1Height() {
|
||||||
return this.featureTriggers.get(FeatureTrigger.selfSponsorshipAlgoV1Height.name()).intValue();
|
return this.featureTriggers.get(FeatureTrigger.selfSponsorshipAlgoV1Height.name()).intValue();
|
||||||
}
|
}
|
||||||
@ -650,18 +635,10 @@ public class BlockChain {
|
|||||||
return this.featureTriggers.get(FeatureTrigger.onlyMintWithNameHeight.name()).intValue();
|
return this.featureTriggers.get(FeatureTrigger.onlyMintWithNameHeight.name()).intValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getRemoveOnlyMintWithNameHeight() {
|
|
||||||
return this.featureTriggers.get(FeatureTrigger.removeOnlyMintWithNameHeight.name()).intValue();
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getGroupMemberCheckHeight() {
|
public int getGroupMemberCheckHeight() {
|
||||||
return this.featureTriggers.get(FeatureTrigger.groupMemberCheckHeight.name()).intValue();
|
return this.featureTriggers.get(FeatureTrigger.groupMemberCheckHeight.name()).intValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getFixBatchRewardHeight() {
|
|
||||||
return this.featureTriggers.get(FeatureTrigger.fixBatchRewardHeight.name()).intValue();
|
|
||||||
}
|
|
||||||
|
|
||||||
// More complex getters for aspects that change by height or timestamp
|
// More complex getters for aspects that change by height or timestamp
|
||||||
|
|
||||||
public long getRewardAtHeight(int ourHeight) {
|
public long getRewardAtHeight(int ourHeight) {
|
||||||
|
@ -97,375 +97,364 @@ public class BlockMinter extends Thread {
|
|||||||
|
|
||||||
final boolean isSingleNodeTestnet = Settings.getInstance().isSingleNodeTestnet();
|
final boolean isSingleNodeTestnet = Settings.getInstance().isSingleNodeTestnet();
|
||||||
|
|
||||||
// Flags for tracking change in whether minting is possible,
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
// so we can notify Controller, and further update SysTray, etc.
|
// Going to need this a lot...
|
||||||
boolean isMintingPossible = false;
|
BlockRepository blockRepository = repository.getBlockRepository();
|
||||||
boolean wasMintingPossible = isMintingPossible;
|
|
||||||
try {
|
// Flags for tracking change in whether minting is possible,
|
||||||
|
// so we can notify Controller, and further update SysTray, etc.
|
||||||
|
boolean isMintingPossible = false;
|
||||||
|
boolean wasMintingPossible = isMintingPossible;
|
||||||
while (running) {
|
while (running) {
|
||||||
// recreate repository for new loop iteration
|
if (isMintingPossible != wasMintingPossible)
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
|
||||||
|
|
||||||
// Going to need this a lot...
|
wasMintingPossible = isMintingPossible;
|
||||||
BlockRepository blockRepository = repository.getBlockRepository();
|
|
||||||
|
|
||||||
if (isMintingPossible != wasMintingPossible)
|
try {
|
||||||
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
|
// Free up any repository locks
|
||||||
|
repository.discardChanges();
|
||||||
|
|
||||||
wasMintingPossible = isMintingPossible;
|
// Sleep for a while.
|
||||||
|
// It's faster on single node testnets, to allow lots of blocks to be minted quickly.
|
||||||
|
Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
|
||||||
|
|
||||||
|
isMintingPossible = false;
|
||||||
|
|
||||||
|
final Long now = NTP.getTime();
|
||||||
|
if (now == null)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
||||||
|
if (minLatestBlockTimestamp == null)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
||||||
|
// No minting accounts?
|
||||||
|
if (mintingAccountsData.isEmpty())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
|
||||||
|
// Note that minting accounts are actually reward-shares in Qortal
|
||||||
|
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
|
||||||
|
while (madi.hasNext()) {
|
||||||
|
MintingAccountData mintingAccountData = madi.next();
|
||||||
|
|
||||||
|
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
||||||
|
if (rewardShareData == null) {
|
||||||
|
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
|
||||||
|
madi.remove();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||||
|
if (!mintingAccount.canMint()) {
|
||||||
|
// Minting-account component of reward-share can no longer mint - disregard
|
||||||
|
madi.remove();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional (non-validated) prevention of block submissions below a defined level.
|
||||||
|
// This is an unvalidated version of Blockchain.minAccountLevelToMint
|
||||||
|
// and exists only to reduce block candidates by default.
|
||||||
|
int level = mintingAccount.getEffectiveMintingLevel();
|
||||||
|
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
|
||||||
|
madi.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Needs a mutable copy of the unmodifiableList
|
||||||
|
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
||||||
|
BlockData lastBlockData = blockRepository.getLastBlock();
|
||||||
|
|
||||||
|
// Disregard peers that have "misbehaved" recently
|
||||||
|
peers.removeIf(Controller.hasMisbehaved);
|
||||||
|
|
||||||
|
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
|
||||||
|
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
|
||||||
|
if (!Synchronizer.getInstance().getRecoveryMode())
|
||||||
|
peers.removeIf(Controller.hasNoRecentBlock);
|
||||||
|
|
||||||
|
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
|
||||||
|
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
||||||
|
boolean recoverInvalidBlock = false;
|
||||||
|
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
||||||
|
// We've had at least one invalid block
|
||||||
|
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
||||||
|
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
||||||
|
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||||
|
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
||||||
|
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
||||||
|
// Assume that the chain has stalled because there is no alternative valid candidate
|
||||||
|
// Enter recovery mode to allow alternative, valid candidates to be minted
|
||||||
|
recoverInvalidBlock = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
||||||
|
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
||||||
|
if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// There are enough peers with a recent block and our latest block is recent
|
||||||
|
// so go ahead and mint a block if possible.
|
||||||
|
isMintingPossible = true;
|
||||||
|
|
||||||
|
// Check blockchain hasn't changed
|
||||||
|
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
||||||
|
previousBlockData = lastBlockData;
|
||||||
|
newBlocks.clear();
|
||||||
|
|
||||||
|
// Reduce log timeout
|
||||||
|
logTimeout = 10 * 1000L;
|
||||||
|
|
||||||
|
// Last low weight block is no longer valid
|
||||||
|
parentSignatureForLastLowWeightBlock = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Discard accounts we have already built blocks with
|
||||||
|
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
|
||||||
|
|
||||||
|
// Do we need to build any potential new blocks?
|
||||||
|
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
||||||
|
|
||||||
|
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
||||||
|
// Skip this check for single node testnets, since they definitely need to mint every block
|
||||||
|
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||||
|
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||||
|
if (mintedLastBlock && !isSingleNodeTestnet) {
|
||||||
|
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parentSignatureForLastLowWeightBlock != null) {
|
||||||
|
// The last iteration found a higher weight block in the network, so sleep for a while
|
||||||
|
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
||||||
|
// detected as we don't want to hold the blockchain lock open.
|
||||||
|
LOGGER.info("Sleeping for 10 seconds...");
|
||||||
|
Thread.sleep(10 * 1000L);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
|
||||||
|
// First block does the AT heavy-lifting
|
||||||
|
if (newBlocks.isEmpty()) {
|
||||||
|
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
||||||
|
if (newBlock == null) {
|
||||||
|
// For some reason we can't mint right now
|
||||||
|
moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
newBlocks.add(newBlock);
|
||||||
|
} else {
|
||||||
|
// The blocks for other minters require less effort...
|
||||||
|
Block newBlock = newBlocks.get(0).remint(mintingAccount);
|
||||||
|
if (newBlock == null) {
|
||||||
|
// For some reason we can't mint right now
|
||||||
|
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
newBlocks.add(newBlock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No potential block candidates?
|
||||||
|
if (newBlocks.isEmpty())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// Make sure we're the only thread modifying the blockchain
|
||||||
|
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
||||||
|
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
|
||||||
|
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean newBlockMinted = false;
|
||||||
|
Block newBlock = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// reset the repository, to the repository recreated for this loop iteration
|
// Clear repository session state so we have latest view of data
|
||||||
for( Block newBlock : newBlocks ) newBlock.setRepository(repository);
|
|
||||||
|
|
||||||
// Free up any repository locks
|
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
|
|
||||||
// Sleep for a while.
|
// Now that we have blockchain lock, do final check that chain hasn't changed
|
||||||
// It's faster on single node testnets, to allow lots of blocks to be minted quickly.
|
BlockData latestBlockData = blockRepository.getLastBlock();
|
||||||
Thread.sleep(isSingleNodeTestnet ? 50 : 1000);
|
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
|
||||||
|
|
||||||
isMintingPossible = false;
|
|
||||||
|
|
||||||
final Long now = NTP.getTime();
|
|
||||||
if (now == null)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
|
List<Block> goodBlocks = new ArrayList<>();
|
||||||
if (minLatestBlockTimestamp == null)
|
boolean wasInvalidBlockDiscarded = false;
|
||||||
continue;
|
Iterator<Block> newBlocksIterator = newBlocks.iterator();
|
||||||
|
|
||||||
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
while (newBlocksIterator.hasNext()) {
|
||||||
// No minting accounts?
|
Block testBlock = newBlocksIterator.next();
|
||||||
if (mintingAccountsData.isEmpty())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
// Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level
|
// Is new block's timestamp valid yet?
|
||||||
// Note that minting accounts are actually reward-shares in Qortal
|
// We do a separate check as some timestamp checks are skipped for testchains
|
||||||
Iterator<MintingAccountData> madi = mintingAccountsData.iterator();
|
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||||
while (madi.hasNext()) {
|
|
||||||
MintingAccountData mintingAccountData = madi.next();
|
|
||||||
|
|
||||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
|
|
||||||
if (rewardShareData == null) {
|
|
||||||
// Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts
|
|
||||||
madi.remove();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
|
||||||
if (!mintingAccount.canMint(true)) {
|
|
||||||
// Minting-account component of reward-share can no longer mint - disregard
|
|
||||||
madi.remove();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Optional (non-validated) prevention of block submissions below a defined level.
|
|
||||||
// This is an unvalidated version of Blockchain.minAccountLevelToMint
|
|
||||||
// and exists only to reduce block candidates by default.
|
|
||||||
int level = mintingAccount.getEffectiveMintingLevel();
|
|
||||||
if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
|
|
||||||
madi.remove();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Needs a mutable copy of the unmodifiableList
|
|
||||||
List<Peer> peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers());
|
|
||||||
BlockData lastBlockData = blockRepository.getLastBlock();
|
|
||||||
|
|
||||||
// Disregard peers that have "misbehaved" recently
|
|
||||||
peers.removeIf(Controller.hasMisbehaved);
|
|
||||||
|
|
||||||
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
|
|
||||||
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
|
|
||||||
if (!Synchronizer.getInstance().getRecoveryMode())
|
|
||||||
peers.removeIf(Controller.hasNoRecentBlock);
|
|
||||||
|
|
||||||
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
|
|
||||||
if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
// If we are stuck on an invalid block, we should allow an alternative to be minted
|
|
||||||
boolean recoverInvalidBlock = false;
|
|
||||||
if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) {
|
|
||||||
// We've had at least one invalid block
|
|
||||||
long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived;
|
|
||||||
long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived;
|
|
||||||
if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
|
||||||
if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) {
|
|
||||||
// Last valid block was more than 10 mins ago, but we've had an invalid block since then
|
|
||||||
// Assume that the chain has stalled because there is no alternative valid candidate
|
|
||||||
// Enter recovery mode to allow alternative, valid candidates to be minted
|
|
||||||
recoverInvalidBlock = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
|
|
||||||
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
|
|
||||||
if (!Synchronizer.getInstance().getRecoveryMode() && !recoverInvalidBlock)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// There are enough peers with a recent block and our latest block is recent
|
testBlock.preProcess();
|
||||||
// so go ahead and mint a block if possible.
|
|
||||||
isMintingPossible = true;
|
|
||||||
|
|
||||||
// Check blockchain hasn't changed
|
// Is new block valid yet? (Before adding unconfirmed transactions)
|
||||||
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
ValidationResult result = testBlock.isValid();
|
||||||
previousBlockData = lastBlockData;
|
if (result != ValidationResult.OK) {
|
||||||
newBlocks.clear();
|
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
||||||
|
|
||||||
// Reduce log timeout
|
newBlocksIterator.remove();
|
||||||
logTimeout = 10 * 1000L;
|
wasInvalidBlockDiscarded = true;
|
||||||
|
/*
|
||||||
|
* Bail out fast so that we loop around from the top again.
|
||||||
|
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
|
||||||
|
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
|
||||||
|
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
// Last low weight block is no longer valid
|
goodBlocks.add(testBlock);
|
||||||
parentSignatureForLastLowWeightBlock = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discard accounts we have already built blocks with
|
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
|
||||||
mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey())));
|
|
||||||
|
|
||||||
// Do we need to build any potential new blocks?
|
|
||||||
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
|
||||||
|
|
||||||
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
|
||||||
// Skip this check for single node testnets, since they definitely need to mint every block
|
|
||||||
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
|
||||||
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
|
||||||
if (mintedLastBlock && !isSingleNodeTestnet) {
|
|
||||||
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
if (parentSignatureForLastLowWeightBlock != null) {
|
// Pick best block
|
||||||
// The last iteration found a higher weight block in the network, so sleep for a while
|
final int parentHeight = previousBlockData.getHeight();
|
||||||
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
final byte[] parentBlockSignature = previousBlockData.getSignature();
|
||||||
// detected as we don't want to hold the blockchain lock open.
|
|
||||||
LOGGER.info("Sleeping for 10 seconds...");
|
|
||||||
Thread.sleep(10 * 1000L);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
|
BigInteger bestWeight = null;
|
||||||
// First block does the AT heavy-lifting
|
|
||||||
if (newBlocks.isEmpty()) {
|
|
||||||
Block newBlock = Block.mint(repository, previousBlockData, mintingAccount);
|
|
||||||
if (newBlock == null) {
|
|
||||||
// For some reason we can't mint right now
|
|
||||||
moderatedLog(() -> LOGGER.info("Couldn't build a to-be-minted block"));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
newBlocks.add(newBlock);
|
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
|
||||||
} else {
|
BlockData blockData = goodBlocks.get(bi).getBlockData();
|
||||||
// The blocks for other minters require less effort...
|
|
||||||
Block newBlock = newBlocks.get(0).remint(mintingAccount);
|
|
||||||
if (newBlock == null) {
|
|
||||||
// For some reason we can't mint right now
|
|
||||||
moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block"));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
newBlocks.add(newBlock);
|
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
||||||
|
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
||||||
|
blockSummaryData.setMinterLevel(minterLevel);
|
||||||
|
|
||||||
|
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
|
||||||
|
|
||||||
|
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
|
||||||
|
newBlock = goodBlocks.get(bi);
|
||||||
|
bestWeight = blockWeight;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No potential block candidates?
|
|
||||||
if (newBlocks.isEmpty())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
// Make sure we're the only thread modifying the blockchain
|
|
||||||
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
|
|
||||||
if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) {
|
|
||||||
LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean newBlockMinted = false;
|
|
||||||
Block newBlock = null;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Clear repository session state so we have latest view of data
|
if (this.higherWeightChainExists(repository, bestWeight)) {
|
||||||
repository.discardChanges();
|
|
||||||
|
|
||||||
// Now that we have blockchain lock, do final check that chain hasn't changed
|
// Check if the base block has updated since the last time we were here
|
||||||
BlockData latestBlockData = blockRepository.getLastBlock();
|
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
|
||||||
if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature()))
|
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
|
||||||
continue;
|
// We've switched to a different chain, so reset the timer
|
||||||
|
timeOfLastLowWeightBlock = NTP.getTime();
|
||||||
|
}
|
||||||
|
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
|
||||||
|
|
||||||
List<Block> goodBlocks = new ArrayList<>();
|
// If less than 30 seconds has passed since first detection the higher weight chain,
|
||||||
boolean wasInvalidBlockDiscarded = false;
|
// we should skip our block submission to give us the opportunity to sync to the better chain
|
||||||
Iterator<Block> newBlocksIterator = newBlocks.iterator();
|
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
|
||||||
|
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
||||||
while (newBlocksIterator.hasNext()) {
|
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||||
Block testBlock = newBlocksIterator.next();
|
|
||||||
|
|
||||||
// Is new block's timestamp valid yet?
|
|
||||||
// We do a separate check as some timestamp checks are skipped for testchains
|
|
||||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
testBlock.preProcess();
|
|
||||||
|
|
||||||
// Is new block valid yet? (Before adding unconfirmed transactions)
|
|
||||||
ValidationResult result = testBlock.isValid();
|
|
||||||
if (result != ValidationResult.OK) {
|
|
||||||
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
|
||||||
|
|
||||||
newBlocksIterator.remove();
|
|
||||||
wasInvalidBlockDiscarded = true;
|
|
||||||
/*
|
|
||||||
* Bail out fast so that we loop around from the top again.
|
|
||||||
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
|
|
||||||
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
|
|
||||||
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
|
|
||||||
*/
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
goodBlocks.add(testBlock);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
// Pick best block
|
|
||||||
final int parentHeight = previousBlockData.getHeight();
|
|
||||||
final byte[] parentBlockSignature = previousBlockData.getSignature();
|
|
||||||
|
|
||||||
BigInteger bestWeight = null;
|
|
||||||
|
|
||||||
for (int bi = 0; bi < goodBlocks.size(); ++bi) {
|
|
||||||
BlockData blockData = goodBlocks.get(bi).getBlockData();
|
|
||||||
|
|
||||||
BlockSummaryData blockSummaryData = new BlockSummaryData(blockData);
|
|
||||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey());
|
|
||||||
blockSummaryData.setMinterLevel(minterLevel);
|
|
||||||
|
|
||||||
BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData);
|
|
||||||
|
|
||||||
if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) {
|
|
||||||
newBlock = goodBlocks.get(bi);
|
|
||||||
bestWeight = blockWeight;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (this.higherWeightChainExists(repository, bestWeight)) {
|
|
||||||
|
|
||||||
// Check if the base block has updated since the last time we were here
|
|
||||||
if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
|
|
||||||
!Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
|
|
||||||
// We've switched to a different chain, so reset the timer
|
|
||||||
timeOfLastLowWeightBlock = NTP.getTime();
|
|
||||||
}
|
|
||||||
parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
|
|
||||||
|
|
||||||
// If less than 30 seconds has passed since first detection the higher weight chain,
|
|
||||||
// we should skip our block submission to give us the opportunity to sync to the better chain
|
|
||||||
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
|
|
||||||
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
|
||||||
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
|
||||||
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
LOGGER.debug("No higher weight chain found in peers");
|
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
||||||
|
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||||
}
|
}
|
||||||
} catch (DataException e) {
|
} else {
|
||||||
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
|
LOGGER.debug("No higher weight chain found in peers");
|
||||||
}
|
}
|
||||||
|
} catch (DataException e) {
|
||||||
// Discard any uncommitted changes as a result of the higher weight chain detection
|
LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
|
||||||
repository.discardChanges();
|
|
||||||
|
|
||||||
// Clear variables that track low weight blocks
|
|
||||||
parentSignatureForLastLowWeightBlock = null;
|
|
||||||
timeOfLastLowWeightBlock = null;
|
|
||||||
|
|
||||||
Long unconfirmedStartTime = NTP.getTime();
|
|
||||||
|
|
||||||
// Add unconfirmed transactions
|
|
||||||
addUnconfirmedTransactions(repository, newBlock);
|
|
||||||
|
|
||||||
LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime() - unconfirmedStartTime)));
|
|
||||||
|
|
||||||
// Sign to create block's signature
|
|
||||||
newBlock.sign();
|
|
||||||
|
|
||||||
// Is newBlock still valid?
|
|
||||||
ValidationResult validationResult = newBlock.isValid();
|
|
||||||
if (validationResult != ValidationResult.OK) {
|
|
||||||
// No longer valid? Report and discard
|
|
||||||
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
|
|
||||||
|
|
||||||
// Rebuild block candidates, just to be sure
|
|
||||||
newBlocks.clear();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add to blockchain - something else will notice and broadcast new block to network
|
|
||||||
try {
|
|
||||||
newBlock.process();
|
|
||||||
|
|
||||||
repository.saveChanges();
|
|
||||||
|
|
||||||
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
|
|
||||||
|
|
||||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
|
|
||||||
|
|
||||||
if (rewardShareData != null) {
|
|
||||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
|
|
||||||
newBlock.getBlockData().getHeight(),
|
|
||||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
|
||||||
Base58.encode(newBlock.getParent().getSignature()),
|
|
||||||
rewardShareData.getMinter(),
|
|
||||||
rewardShareData.getRecipient()));
|
|
||||||
} else {
|
|
||||||
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
|
|
||||||
newBlock.getBlockData().getHeight(),
|
|
||||||
Base58.encode(newBlock.getBlockData().getSignature()),
|
|
||||||
Base58.encode(newBlock.getParent().getSignature()),
|
|
||||||
newBlock.getMinter().getAddress()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify network after we're released blockchain lock
|
|
||||||
newBlockMinted = true;
|
|
||||||
|
|
||||||
// Notify Controller
|
|
||||||
repository.discardChanges(); // clear transaction status to prevent deadlocks
|
|
||||||
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
|
||||||
} catch (DataException e) {
|
|
||||||
// Unable to process block - report and discard
|
|
||||||
LOGGER.error("Unable to process newly minted block?", e);
|
|
||||||
newBlocks.clear();
|
|
||||||
} catch (ArithmeticException e) {
|
|
||||||
// Unable to process block - report and discard
|
|
||||||
LOGGER.error("Unable to process newly minted block?", e);
|
|
||||||
newBlocks.clear();
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
blockchainLock.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (newBlockMinted) {
|
// Discard any uncommitted changes as a result of the higher weight chain detection
|
||||||
// Broadcast our new chain to network
|
repository.discardChanges();
|
||||||
Network.getInstance().broadcastOurChain();
|
|
||||||
|
// Clear variables that track low weight blocks
|
||||||
|
parentSignatureForLastLowWeightBlock = null;
|
||||||
|
timeOfLastLowWeightBlock = null;
|
||||||
|
|
||||||
|
Long unconfirmedStartTime = NTP.getTime();
|
||||||
|
|
||||||
|
// Add unconfirmed transactions
|
||||||
|
addUnconfirmedTransactions(repository, newBlock);
|
||||||
|
|
||||||
|
LOGGER.info(String.format("Adding %d unconfirmed transactions took %d ms", newBlock.getTransactions().size(), (NTP.getTime()-unconfirmedStartTime)));
|
||||||
|
|
||||||
|
// Sign to create block's signature
|
||||||
|
newBlock.sign();
|
||||||
|
|
||||||
|
// Is newBlock still valid?
|
||||||
|
ValidationResult validationResult = newBlock.isValid();
|
||||||
|
if (validationResult != ValidationResult.OK) {
|
||||||
|
// No longer valid? Report and discard
|
||||||
|
LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name()));
|
||||||
|
|
||||||
|
// Rebuild block candidates, just to be sure
|
||||||
|
newBlocks.clear();
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (InterruptedException e) {
|
// Add to blockchain - something else will notice and broadcast new block to network
|
||||||
// We've been interrupted - time to exit
|
try {
|
||||||
return;
|
newBlock.process();
|
||||||
|
|
||||||
|
repository.saveChanges();
|
||||||
|
|
||||||
|
LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight()));
|
||||||
|
|
||||||
|
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey());
|
||||||
|
|
||||||
|
if (rewardShareData != null) {
|
||||||
|
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s",
|
||||||
|
newBlock.getBlockData().getHeight(),
|
||||||
|
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||||
|
Base58.encode(newBlock.getParent().getSignature()),
|
||||||
|
rewardShareData.getMinter(),
|
||||||
|
rewardShareData.getRecipient()));
|
||||||
|
} else {
|
||||||
|
LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s",
|
||||||
|
newBlock.getBlockData().getHeight(),
|
||||||
|
Base58.encode(newBlock.getBlockData().getSignature()),
|
||||||
|
Base58.encode(newBlock.getParent().getSignature()),
|
||||||
|
newBlock.getMinter().getAddress()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify network after we're released blockchain lock
|
||||||
|
newBlockMinted = true;
|
||||||
|
|
||||||
|
// Notify Controller
|
||||||
|
repository.discardChanges(); // clear transaction status to prevent deadlocks
|
||||||
|
Controller.getInstance().onNewBlock(newBlock.getBlockData());
|
||||||
|
} catch (DataException e) {
|
||||||
|
// Unable to process block - report and discard
|
||||||
|
LOGGER.error("Unable to process newly minted block?", e);
|
||||||
|
newBlocks.clear();
|
||||||
|
} catch (ArithmeticException e) {
|
||||||
|
// Unable to process block - report and discard
|
||||||
|
LOGGER.error("Unable to process newly minted block?", e);
|
||||||
|
newBlocks.clear();
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
blockchainLock.unlock();
|
||||||
}
|
}
|
||||||
} catch (DataException e) {
|
|
||||||
LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
|
if (newBlockMinted) {
|
||||||
} catch (Exception e) {
|
// Broadcast our new chain to network
|
||||||
LOGGER.error(e.getMessage(), e);
|
Network.getInstance().broadcastOurChain();
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
// We've been interrupted - time to exit
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (DataException e) {
|
||||||
LOGGER.error(e.getMessage(), e);
|
LOGGER.warn("Repository issue while running block minter - NO LONGER MINTING", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@ import org.qortal.block.Block;
|
|||||||
import org.qortal.block.BlockChain;
|
import org.qortal.block.BlockChain;
|
||||||
import org.qortal.block.BlockChain.BlockTimingByHeight;
|
import org.qortal.block.BlockChain.BlockTimingByHeight;
|
||||||
import org.qortal.controller.arbitrary.*;
|
import org.qortal.controller.arbitrary.*;
|
||||||
import org.qortal.controller.hsqldb.HSQLDBBalanceRecorder;
|
|
||||||
import org.qortal.controller.hsqldb.HSQLDBDataCacheManager;
|
import org.qortal.controller.hsqldb.HSQLDBDataCacheManager;
|
||||||
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
|
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
|
||||||
import org.qortal.controller.repository.PruneManager;
|
import org.qortal.controller.repository.PruneManager;
|
||||||
@ -37,6 +36,7 @@ import org.qortal.network.Peer;
|
|||||||
import org.qortal.network.PeerAddress;
|
import org.qortal.network.PeerAddress;
|
||||||
import org.qortal.network.message.*;
|
import org.qortal.network.message.*;
|
||||||
import org.qortal.repository.*;
|
import org.qortal.repository.*;
|
||||||
|
import org.qortal.repository.hsqldb.HSQLDBRepository;
|
||||||
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
|
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
|
||||||
import org.qortal.settings.Settings;
|
import org.qortal.settings.Settings;
|
||||||
import org.qortal.transaction.Transaction;
|
import org.qortal.transaction.Transaction;
|
||||||
@ -73,8 +73,6 @@ import java.util.stream.Collectors;
|
|||||||
|
|
||||||
public class Controller extends Thread {
|
public class Controller extends Thread {
|
||||||
|
|
||||||
public static HSQLDBRepositoryFactory REPOSITORY_FACTORY;
|
|
||||||
|
|
||||||
static {
|
static {
|
||||||
// This must go before any calls to LogManager/Logger
|
// This must go before any calls to LogManager/Logger
|
||||||
System.setProperty("log4j2.formatMsgNoLookups", "true");
|
System.setProperty("log4j2.formatMsgNoLookups", "true");
|
||||||
@ -405,38 +403,23 @@ public class Controller extends Thread {
|
|||||||
|
|
||||||
LOGGER.info("Starting repository");
|
LOGGER.info("Starting repository");
|
||||||
try {
|
try {
|
||||||
REPOSITORY_FACTORY = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
RepositoryFactory repositoryFactory = new HSQLDBRepositoryFactory(getRepositoryUrl());
|
||||||
RepositoryManager.setRepositoryFactory(REPOSITORY_FACTORY);
|
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||||
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
|
RepositoryManager.setRequestedCheckpoint(Boolean.TRUE);
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
// RepositoryManager.rebuildTransactionSequences(repository);
|
// RepositoryManager.rebuildTransactionSequences(repository);
|
||||||
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, false);
|
ArbitraryDataCacheManager.getInstance().buildArbitraryResourcesCache(repository, false);
|
||||||
}
|
|
||||||
|
|
||||||
if( Settings.getInstance().isDbCacheEnabled() ) {
|
if( Settings.getInstance().isDbCacheEnabled() ) {
|
||||||
LOGGER.info("Db Cache Starting ...");
|
LOGGER.info("Db Cache Starting ...");
|
||||||
HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager();
|
HSQLDBDataCacheManager hsqldbDataCacheManager = new HSQLDBDataCacheManager((HSQLDBRepository) repositoryFactory.getRepository());
|
||||||
hsqldbDataCacheManager.start();
|
hsqldbDataCacheManager.start();
|
||||||
}
|
|
||||||
else {
|
|
||||||
LOGGER.info("Db Cache Disabled");
|
|
||||||
}
|
|
||||||
|
|
||||||
if( Settings.getInstance().isBalanceRecorderEnabled() ) {
|
|
||||||
Optional<HSQLDBBalanceRecorder> recorder = HSQLDBBalanceRecorder.getInstance();
|
|
||||||
|
|
||||||
if( recorder.isPresent() ) {
|
|
||||||
LOGGER.info("Balance Recorder Starting ...");
|
|
||||||
recorder.get().start();
|
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
LOGGER.info("Balance Recorder won't start.");
|
LOGGER.info("Db Cache Disabled");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
LOGGER.info("Balance Recorder Disabled");
|
|
||||||
}
|
|
||||||
} catch (DataException e) {
|
} catch (DataException e) {
|
||||||
// If exception has no cause or message then repository is in use by some other process.
|
// If exception has no cause or message then repository is in use by some other process.
|
||||||
if (e.getCause() == null && e.getMessage() == null) {
|
if (e.getCause() == null && e.getMessage() == null) {
|
||||||
@ -656,8 +639,10 @@ public class Controller extends Thread {
|
|||||||
boolean canBootstrap = Settings.getInstance().getBootstrap();
|
boolean canBootstrap = Settings.getInstance().getBootstrap();
|
||||||
boolean needsArchiveRebuild = false;
|
boolean needsArchiveRebuild = false;
|
||||||
int checkHeight = 0;
|
int checkHeight = 0;
|
||||||
|
Repository repository = null;
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()){
|
try {
|
||||||
|
repository = RepositoryManager.getRepository();
|
||||||
needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null);
|
needsArchiveRebuild = (repository.getBlockArchiveRepository().fromHeight(2) == null);
|
||||||
checkHeight = repository.getBlockRepository().getBlockchainHeight();
|
checkHeight = repository.getBlockRepository().getBlockchainHeight();
|
||||||
} catch (DataException e) {
|
} catch (DataException e) {
|
||||||
|
@ -13,7 +13,6 @@ import org.qortal.crypto.MemoryPoW;
|
|||||||
import org.qortal.crypto.Qortal25519Extras;
|
import org.qortal.crypto.Qortal25519Extras;
|
||||||
import org.qortal.data.account.MintingAccountData;
|
import org.qortal.data.account.MintingAccountData;
|
||||||
import org.qortal.data.account.RewardShareData;
|
import org.qortal.data.account.RewardShareData;
|
||||||
import org.qortal.data.group.GroupMemberData;
|
|
||||||
import org.qortal.data.network.OnlineAccountData;
|
import org.qortal.data.network.OnlineAccountData;
|
||||||
import org.qortal.network.Network;
|
import org.qortal.network.Network;
|
||||||
import org.qortal.network.Peer;
|
import org.qortal.network.Peer;
|
||||||
@ -45,7 +44,6 @@ public class OnlineAccountsManager {
|
|||||||
*/
|
*/
|
||||||
private static final long ONLINE_TIMESTAMP_MODULUS_V1 = 5 * 60 * 1000L;
|
private static final long ONLINE_TIMESTAMP_MODULUS_V1 = 5 * 60 * 1000L;
|
||||||
private static final long ONLINE_TIMESTAMP_MODULUS_V2 = 30 * 60 * 1000L;
|
private static final long ONLINE_TIMESTAMP_MODULUS_V2 = 30 * 60 * 1000L;
|
||||||
private static final long ONLINE_TIMESTAMP_MODULUS_V3 = 10 * 60 * 1000L;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* How many 'current' timestamp-sets of online accounts we cache.
|
* How many 'current' timestamp-sets of online accounts we cache.
|
||||||
@ -69,13 +67,12 @@ public class OnlineAccountsManager {
|
|||||||
private static final long ONLINE_ACCOUNTS_COMPUTE_INITIAL_SLEEP_INTERVAL = 30 * 1000L; // ms
|
private static final long ONLINE_ACCOUNTS_COMPUTE_INITIAL_SLEEP_INTERVAL = 30 * 1000L; // ms
|
||||||
|
|
||||||
// MemoryPoW - mainnet
|
// MemoryPoW - mainnet
|
||||||
public static final int POW_BUFFER_SIZE = 1024 * 1024; // bytes
|
public static final int POW_BUFFER_SIZE = 1 * 1024 * 1024; // bytes
|
||||||
public static final int POW_DIFFICULTY_V1 = 18; // leading zero bits
|
public static final int POW_DIFFICULTY_V1 = 18; // leading zero bits
|
||||||
public static final int POW_DIFFICULTY_V2 = 19; // leading zero bits
|
public static final int POW_DIFFICULTY_V2 = 19; // leading zero bits
|
||||||
public static final int POW_DIFFICULTY_V3 = 6; // leading zero bits
|
|
||||||
|
|
||||||
// MemoryPoW - testnet
|
// MemoryPoW - testnet
|
||||||
public static final int POW_BUFFER_SIZE_TESTNET = 1024 * 1024; // bytes
|
public static final int POW_BUFFER_SIZE_TESTNET = 1 * 1024 * 1024; // bytes
|
||||||
public static final int POW_DIFFICULTY_TESTNET = 5; // leading zero bits
|
public static final int POW_DIFFICULTY_TESTNET = 5; // leading zero bits
|
||||||
|
|
||||||
// IMPORTANT: if we ever need to dynamically modify the buffer size using a feature trigger, the
|
// IMPORTANT: if we ever need to dynamically modify the buffer size using a feature trigger, the
|
||||||
@ -109,15 +106,11 @@ public class OnlineAccountsManager {
|
|||||||
|
|
||||||
public static long getOnlineTimestampModulus() {
|
public static long getOnlineTimestampModulus() {
|
||||||
Long now = NTP.getTime();
|
Long now = NTP.getTime();
|
||||||
if (now != null && now >= BlockChain.getInstance().getOnlineAccountsModulusV2Timestamp() && now < BlockChain.getInstance().getOnlineAccountsModulusV3Timestamp()) {
|
if (now != null && now >= BlockChain.getInstance().getOnlineAccountsModulusV2Timestamp()) {
|
||||||
return ONLINE_TIMESTAMP_MODULUS_V2;
|
return ONLINE_TIMESTAMP_MODULUS_V2;
|
||||||
}
|
}
|
||||||
if (now != null && now >= BlockChain.getInstance().getOnlineAccountsModulusV3Timestamp()) {
|
|
||||||
return ONLINE_TIMESTAMP_MODULUS_V3;
|
|
||||||
}
|
|
||||||
return ONLINE_TIMESTAMP_MODULUS_V1;
|
return ONLINE_TIMESTAMP_MODULUS_V1;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Long getCurrentOnlineAccountTimestamp() {
|
public static Long getCurrentOnlineAccountTimestamp() {
|
||||||
Long now = NTP.getTime();
|
Long now = NTP.getTime();
|
||||||
if (now == null)
|
if (now == null)
|
||||||
@ -142,12 +135,9 @@ public class OnlineAccountsManager {
|
|||||||
if (Settings.getInstance().isTestNet())
|
if (Settings.getInstance().isTestNet())
|
||||||
return POW_DIFFICULTY_TESTNET;
|
return POW_DIFFICULTY_TESTNET;
|
||||||
|
|
||||||
if (timestamp >= BlockChain.getInstance().getIncreaseOnlineAccountsDifficultyTimestamp() && timestamp < BlockChain.getInstance().getDecreaseOnlineAccountsDifficultyTimestamp())
|
if (timestamp >= BlockChain.getInstance().getIncreaseOnlineAccountsDifficultyTimestamp())
|
||||||
return POW_DIFFICULTY_V2;
|
return POW_DIFFICULTY_V2;
|
||||||
|
|
||||||
if (timestamp >= BlockChain.getInstance().getDecreaseOnlineAccountsDifficultyTimestamp())
|
|
||||||
return POW_DIFFICULTY_V3;
|
|
||||||
|
|
||||||
return POW_DIFFICULTY_V1;
|
return POW_DIFFICULTY_V1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,12 +215,6 @@ public class OnlineAccountsManager {
|
|||||||
Set<OnlineAccountData> onlineAccountsToAdd = new HashSet<>();
|
Set<OnlineAccountData> onlineAccountsToAdd = new HashSet<>();
|
||||||
Set<OnlineAccountData> onlineAccountsToRemove = new HashSet<>();
|
Set<OnlineAccountData> onlineAccountsToRemove = new HashSet<>();
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
List<String> mintingGroupMemberAddresses
|
|
||||||
= repository.getGroupRepository()
|
|
||||||
.getGroupMembers(BlockChain.getInstance().getMintingGroupId()).stream()
|
|
||||||
.map(GroupMemberData::getMember)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) {
|
for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) {
|
||||||
if (isStopping)
|
if (isStopping)
|
||||||
return;
|
return;
|
||||||
@ -243,7 +227,7 @@ public class OnlineAccountsManager {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean isValid = this.isValidCurrentAccount(repository, mintingGroupMemberAddresses, onlineAccountData);
|
boolean isValid = this.isValidCurrentAccount(repository, onlineAccountData);
|
||||||
if (isValid)
|
if (isValid)
|
||||||
onlineAccountsToAdd.add(onlineAccountData);
|
onlineAccountsToAdd.add(onlineAccountData);
|
||||||
|
|
||||||
@ -322,7 +306,7 @@ public class OnlineAccountsManager {
|
|||||||
return inplaceArray;
|
return inplaceArray;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean isValidCurrentAccount(Repository repository, List<String> mintingGroupMemberAddresses, OnlineAccountData onlineAccountData) throws DataException {
|
private static boolean isValidCurrentAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException {
|
||||||
final Long now = NTP.getTime();
|
final Long now = NTP.getTime();
|
||||||
if (now == null)
|
if (now == null)
|
||||||
return false;
|
return false;
|
||||||
@ -357,14 +341,9 @@ public class OnlineAccountsManager {
|
|||||||
LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(rewardSharePublicKey)));
|
LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(rewardSharePublicKey)));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// reject account address that are not in the MINTER Group
|
|
||||||
else if( !mintingGroupMemberAddresses.contains(rewardShareData.getMinter())) {
|
|
||||||
LOGGER.trace(() -> String.format("Rejecting online reward-share that is not in MINTER Group, account %s", rewardShareData.getMinter()));
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||||
if (!mintingAccount.canMint(true)) { // group validation is a few lines above
|
if (!mintingAccount.canMint()) {
|
||||||
// Minting-account component of reward-share can no longer mint - disregard
|
// Minting-account component of reward-share can no longer mint - disregard
|
||||||
LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress()));
|
LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress()));
|
||||||
return false;
|
return false;
|
||||||
@ -551,7 +530,7 @@ public class OnlineAccountsManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||||
if (!mintingAccount.canMint(true)) {
|
if (!mintingAccount.canMint()) {
|
||||||
// Minting-account component of reward-share can no longer mint - disregard
|
// Minting-account component of reward-share can no longer mint - disregard
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
continue;
|
continue;
|
||||||
|
@ -1,117 +0,0 @@
|
|||||||
package org.qortal.controller.hsqldb;
|
|
||||||
|
|
||||||
import org.apache.logging.log4j.LogManager;
|
|
||||||
import org.apache.logging.log4j.Logger;
|
|
||||||
import org.qortal.data.account.AccountBalanceData;
|
|
||||||
import org.qortal.repository.hsqldb.HSQLDBCacheUtils;
|
|
||||||
import org.qortal.settings.Settings;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
public class HSQLDBBalanceRecorder extends Thread{
|
|
||||||
|
|
||||||
private static final Logger LOGGER = LogManager.getLogger(HSQLDBBalanceRecorder.class);
|
|
||||||
|
|
||||||
private static HSQLDBBalanceRecorder SINGLETON = null;
|
|
||||||
|
|
||||||
private ConcurrentHashMap<Integer, List<AccountBalanceData>> balancesByHeight = new ConcurrentHashMap<>();
|
|
||||||
|
|
||||||
private ConcurrentHashMap<String, List<AccountBalanceData>> balancesByAddress = new ConcurrentHashMap<>();
|
|
||||||
|
|
||||||
private int priorityRequested;
|
|
||||||
private int frequency;
|
|
||||||
private int capacity;
|
|
||||||
|
|
||||||
private HSQLDBBalanceRecorder( int priorityRequested, int frequency, int capacity) {
|
|
||||||
|
|
||||||
super("Balance Recorder");
|
|
||||||
|
|
||||||
this.priorityRequested = priorityRequested;
|
|
||||||
this.frequency = frequency;
|
|
||||||
this.capacity = capacity;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Optional<HSQLDBBalanceRecorder> getInstance() {
|
|
||||||
|
|
||||||
if( SINGLETON == null ) {
|
|
||||||
|
|
||||||
SINGLETON
|
|
||||||
= new HSQLDBBalanceRecorder(
|
|
||||||
Settings.getInstance().getBalanceRecorderPriority(),
|
|
||||||
Settings.getInstance().getBalanceRecorderFrequency(),
|
|
||||||
Settings.getInstance().getBalanceRecorderCapacity()
|
|
||||||
);
|
|
||||||
|
|
||||||
}
|
|
||||||
else if( SINGLETON == null ) {
|
|
||||||
|
|
||||||
return Optional.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
return Optional.of(SINGLETON);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
|
|
||||||
Thread.currentThread().setName("Balance Recorder");
|
|
||||||
|
|
||||||
HSQLDBCacheUtils.startRecordingBalances(this.balancesByHeight, this.balancesByAddress, this.priorityRequested, this.frequency, this.capacity);
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<AccountBalanceData> getLatestRecordings(int limit, long offset) {
|
|
||||||
ArrayList<AccountBalanceData> data;
|
|
||||||
|
|
||||||
Optional<Integer> lastHeight = getLastHeight();
|
|
||||||
|
|
||||||
if(lastHeight.isPresent() ) {
|
|
||||||
List<AccountBalanceData> latest = this.balancesByHeight.get(lastHeight.get());
|
|
||||||
|
|
||||||
if( latest != null ) {
|
|
||||||
data = new ArrayList<>(latest.size());
|
|
||||||
data.addAll(
|
|
||||||
latest.stream()
|
|
||||||
.sorted(Comparator.comparingDouble(AccountBalanceData::getBalance).reversed())
|
|
||||||
.skip(offset)
|
|
||||||
.limit(limit)
|
|
||||||
.collect(Collectors.toList())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
data = new ArrayList<>(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
data = new ArrayList<>(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Optional<Integer> getLastHeight() {
|
|
||||||
return this.balancesByHeight.keySet().stream().sorted(Comparator.reverseOrder()).findFirst();
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<Integer> getBlocksRecorded() {
|
|
||||||
|
|
||||||
return this.balancesByHeight.keySet().stream().collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<AccountBalanceData> getAccountBalanceRecordings(String address) {
|
|
||||||
return this.balancesByAddress.get(address);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "HSQLDBBalanceRecorder{" +
|
|
||||||
"priorityRequested=" + priorityRequested +
|
|
||||||
", frequency=" + frequency +
|
|
||||||
", capacity=" + capacity +
|
|
||||||
'}';
|
|
||||||
}
|
|
||||||
}
|
|
@ -8,7 +8,11 @@ import org.qortal.settings.Settings;
|
|||||||
|
|
||||||
public class HSQLDBDataCacheManager extends Thread{
|
public class HSQLDBDataCacheManager extends Thread{
|
||||||
|
|
||||||
public HSQLDBDataCacheManager() {}
|
private HSQLDBRepository respository;
|
||||||
|
|
||||||
|
public HSQLDBDataCacheManager(HSQLDBRepository respository) {
|
||||||
|
this.respository = respository;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
@ -16,7 +20,8 @@ public class HSQLDBDataCacheManager extends Thread{
|
|||||||
|
|
||||||
HSQLDBCacheUtils.startCaching(
|
HSQLDBCacheUtils.startCaching(
|
||||||
Settings.getInstance().getDbCacheThreadPriority(),
|
Settings.getInstance().getDbCacheThreadPriority(),
|
||||||
Settings.getInstance().getDbCacheFrequency()
|
Settings.getInstance().getDbCacheFrequency(),
|
||||||
|
this.respository
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,24 +39,15 @@ public class AtStatesPruner implements Runnable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int pruneStartHeight;
|
|
||||||
int maxLatestAtStatesHeight;
|
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
pruneStartHeight = repository.getATRepository().getAtPruneHeight();
|
int pruneStartHeight = repository.getATRepository().getAtPruneHeight();
|
||||||
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||||
|
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
|
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!Controller.isStopping()) {
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
|
|
||||||
|
while (!Controller.isStopping()) {
|
||||||
try {
|
try {
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
|
|
||||||
@ -111,25 +102,28 @@ public class AtStatesPruner implements Runnable {
|
|||||||
|
|
||||||
final int finalPruneStartHeight = pruneStartHeight;
|
final int finalPruneStartHeight = pruneStartHeight;
|
||||||
LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
|
LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
// We've pruned up to the upper prunable height
|
// We've pruned up to the upper prunable height
|
||||||
// Back off for a while to save CPU for syncing
|
// Back off for a while to save CPU for syncing
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
Thread.sleep(5 * 60 * 1000L);
|
Thread.sleep(5*60*1000L);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
if (Controller.isStopping()) {
|
if(Controller.isStopping()) {
|
||||||
LOGGER.info("AT States Pruning Shutting Down");
|
LOGGER.info("AT States Pruning Shutting Down");
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
LOGGER.warn("AT States Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
|
LOGGER.warn("AT States Pruning interrupted. Trying again. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOGGER.warn("AT States Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
|
LOGGER.warn("AT States Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
} catch(Exception e){
|
|
||||||
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
}
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error("AT States Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -26,23 +26,15 @@ public class AtStatesTrimmer implements Runnable {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int trimStartHeight;
|
|
||||||
int maxLatestAtStatesHeight;
|
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
trimStartHeight = repository.getATRepository().getAtTrimHeight();
|
int trimStartHeight = repository.getATRepository().getAtTrimHeight();
|
||||||
maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
int maxLatestAtStatesHeight = PruneManager.getMaxHeightForLatestAtStates(repository);
|
||||||
|
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
|
repository.getATRepository().rebuildLatestAtStates(maxLatestAtStatesHeight);
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!Controller.isStopping()) {
|
while (!Controller.isStopping()) {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
try {
|
try {
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
|
|
||||||
@ -100,9 +92,9 @@ public class AtStatesTrimmer implements Runnable {
|
|||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOGGER.warn("AT States Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
|
LOGGER.warn("AT States Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
}
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error("AT States Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,13 +30,11 @@ public class BlockArchiver implements Runnable {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int startHeight;
|
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
// Don't even start building until initial rush has ended
|
// Don't even start building until initial rush has ended
|
||||||
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
||||||
|
|
||||||
startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
int startHeight = repository.getBlockArchiveRepository().getBlockArchiveHeight();
|
||||||
|
|
||||||
// Don't attempt to archive if we have no ATStatesHeightIndex, as it will be too slow
|
// Don't attempt to archive if we have no ATStatesHeightIndex, as it will be too slow
|
||||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||||
@ -45,16 +43,10 @@ public class BlockArchiver implements Runnable {
|
|||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOGGER.info("Starting block archiver from height {}...", startHeight);
|
LOGGER.info("Starting block archiver from height {}...", startHeight);
|
||||||
|
|
||||||
while (!Controller.isStopping()) {
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
|
|
||||||
|
while (!Controller.isStopping()) {
|
||||||
try {
|
try {
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
|
|
||||||
@ -115,17 +107,20 @@ public class BlockArchiver implements Runnable {
|
|||||||
LOGGER.info("Caught exception when creating block cache", e);
|
LOGGER.info("Caught exception when creating block cache", e);
|
||||||
}
|
}
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
if (Controller.isStopping()) {
|
if(Controller.isStopping()) {
|
||||||
LOGGER.info("Block Archiving Shutting Down");
|
LOGGER.info("Block Archiving Shutting Down");
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
LOGGER.warn("Block Archiving interrupted. Trying again. Report this error immediately to the developers.", e);
|
LOGGER.warn("Block Archiving interrupted. Trying again. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOGGER.warn("Block Archiving stopped working. Trying again. Report this error immediately to the developers.", e);
|
LOGGER.warn("Block Archiving stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
} catch(Exception e){
|
|
||||||
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
}
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error("Block Archiving is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -39,10 +39,8 @@ public class BlockPruner implements Runnable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int pruneStartHeight;
|
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
|
int pruneStartHeight = repository.getBlockRepository().getBlockPruneHeight();
|
||||||
|
|
||||||
// Don't attempt to prune if we have no ATStatesHeightIndex, as it will be too slow
|
// Don't attempt to prune if we have no ATStatesHeightIndex, as it will be too slow
|
||||||
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
boolean hasAtStatesHeightIndex = repository.getATRepository().hasAtStatesHeightIndex();
|
||||||
@ -50,15 +48,8 @@ public class BlockPruner implements Runnable {
|
|||||||
LOGGER.info("Unable to start block pruner due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
|
LOGGER.info("Unable to start block pruner due to missing ATStatesHeightIndex. Bootstrapping is recommended.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!Controller.isStopping()) {
|
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
|
|
||||||
|
while (!Controller.isStopping()) {
|
||||||
try {
|
try {
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
|
|
||||||
@ -131,9 +122,10 @@ public class BlockPruner implements Runnable {
|
|||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOGGER.warn("Block Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
|
LOGGER.warn("Block Pruning stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
} catch(Exception e){
|
|
||||||
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
}
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error("Block Pruning is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -28,21 +28,13 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int trimStartHeight;
|
|
||||||
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
// Don't even start trimming until initial rush has ended
|
// Don't even start trimming until initial rush has ended
|
||||||
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
Thread.sleep(INITIAL_SLEEP_PERIOD);
|
||||||
|
|
||||||
trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
|
int trimStartHeight = repository.getBlockRepository().getOnlineAccountsSignaturesTrimHeight();
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!Controller.isStopping()) {
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
|
|
||||||
|
while (!Controller.isStopping()) {
|
||||||
try {
|
try {
|
||||||
repository.discardChanges();
|
repository.discardChanges();
|
||||||
|
|
||||||
@ -96,9 +88,10 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
|
|||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOGGER.warn("Online Accounts Signatures Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
|
LOGGER.warn("Online Accounts Signatures Trimming stopped working. Trying again. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
|
||||||
}
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error("Online Accounts Signatures Trimming is not working! Not trying again. Restart ASAP. Report this error immediately to the developers.", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,8 @@
|
|||||||
package org.qortal.data.block;
|
package org.qortal.data.block;
|
||||||
|
|
||||||
import com.google.common.primitives.Bytes;
|
import com.google.common.primitives.Bytes;
|
||||||
import org.qortal.account.Account;
|
|
||||||
import org.qortal.block.BlockChain;
|
import org.qortal.block.BlockChain;
|
||||||
import org.qortal.repository.DataException;
|
import org.qortal.crypto.Crypto;
|
||||||
import org.qortal.repository.Repository;
|
|
||||||
import org.qortal.repository.RepositoryManager;
|
|
||||||
import org.qortal.settings.Settings;
|
import org.qortal.settings.Settings;
|
||||||
import org.qortal.utils.NTP;
|
import org.qortal.utils.NTP;
|
||||||
|
|
||||||
@ -227,7 +224,7 @@ public class BlockData implements Serializable {
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isTrimmed() {
|
public boolean isTrimmed() {
|
||||||
long onlineAccountSignaturesTrimmedTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
|
long onlineAccountSignaturesTrimmedTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
|
||||||
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
|
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
|
||||||
@ -235,31 +232,11 @@ public class BlockData implements Serializable {
|
|||||||
return blockTimestamp < onlineAccountSignaturesTrimmedTimestamp && blockTimestamp < currentTrimmableTimestamp;
|
return blockTimestamp < onlineAccountSignaturesTrimmedTimestamp && blockTimestamp < currentTrimmableTimestamp;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getMinterAddressFromPublicKey() {
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
return Account.getRewardShareMintingAddress(repository, this.minterPublicKey);
|
|
||||||
} catch (DataException e) {
|
|
||||||
return "Unknown";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getMinterLevelFromPublicKey() {
|
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
|
||||||
return Account.getRewardShareEffectiveMintingLevel(repository, this.minterPublicKey);
|
|
||||||
} catch (DataException e) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// JAXB special
|
// JAXB special
|
||||||
|
|
||||||
@XmlElement(name = "minterAddress")
|
@XmlElement(name = "minterAddress")
|
||||||
protected String getMinterAddress() {
|
protected String getMinterAddress() {
|
||||||
return getMinterAddressFromPublicKey();
|
return Crypto.toAddress(this.minterPublicKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
@XmlElement(name = "minterLevel")
|
|
||||||
protected int getMinterLevel() {
|
|
||||||
return getMinterLevelFromPublicKey();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -4,15 +4,10 @@ import org.apache.logging.log4j.LogManager;
|
|||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.qortal.network.Network;
|
import org.qortal.network.Network;
|
||||||
import org.qortal.network.Peer;
|
import org.qortal.network.Peer;
|
||||||
import org.qortal.utils.DaemonThreadFactory;
|
|
||||||
import org.qortal.utils.ExecuteProduceConsume.Task;
|
import org.qortal.utils.ExecuteProduceConsume.Task;
|
||||||
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Executors;
|
|
||||||
|
|
||||||
public class PeerConnectTask implements Task {
|
public class PeerConnectTask implements Task {
|
||||||
private static final Logger LOGGER = LogManager.getLogger(PeerConnectTask.class);
|
private static final Logger LOGGER = LogManager.getLogger(PeerConnectTask.class);
|
||||||
private static final ExecutorService connectionExecutor = Executors.newCachedThreadPool(new DaemonThreadFactory(8));
|
|
||||||
|
|
||||||
private final Peer peer;
|
private final Peer peer;
|
||||||
private final String name;
|
private final String name;
|
||||||
@ -29,24 +24,6 @@ public class PeerConnectTask implements Task {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void perform() throws InterruptedException {
|
public void perform() throws InterruptedException {
|
||||||
// Submit connection task to a dedicated thread pool for non-blocking I/O
|
Network.getInstance().connectPeer(peer);
|
||||||
connectionExecutor.submit(() -> {
|
|
||||||
try {
|
|
||||||
connectPeerAsync(peer);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
LOGGER.error("Connection attempt interrupted for peer {}", peer, e);
|
|
||||||
Thread.currentThread().interrupt(); // Reset interrupt flag
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private void connectPeerAsync(Peer peer) throws InterruptedException {
|
|
||||||
// Perform peer connection in a separate thread to avoid blocking main task execution
|
|
||||||
try {
|
|
||||||
Network.getInstance().connectPeer(peer);
|
|
||||||
LOGGER.trace("Successfully connected to peer {}", peer);
|
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error("Error connecting to peer {}", peer, e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,6 @@ public interface ChatRepository {
|
|||||||
|
|
||||||
public ChatMessage toChatMessage(ChatTransactionData chatTransactionData, Encoding encoding) throws DataException;
|
public ChatMessage toChatMessage(ChatTransactionData chatTransactionData, Encoding encoding) throws DataException;
|
||||||
|
|
||||||
public ActiveChats getActiveChats(String address, Encoding encoding, Boolean hasChatReference) throws DataException;
|
public ActiveChats getActiveChats(String address, Encoding encoding) throws DataException;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -5,13 +5,10 @@ import org.apache.logging.log4j.Logger;
|
|||||||
import org.qortal.api.SearchMode;
|
import org.qortal.api.SearchMode;
|
||||||
import org.qortal.arbitrary.misc.Category;
|
import org.qortal.arbitrary.misc.Category;
|
||||||
import org.qortal.arbitrary.misc.Service;
|
import org.qortal.arbitrary.misc.Service;
|
||||||
import org.qortal.controller.Controller;
|
|
||||||
import org.qortal.data.account.AccountBalanceData;
|
|
||||||
import org.qortal.data.arbitrary.ArbitraryResourceCache;
|
import org.qortal.data.arbitrary.ArbitraryResourceCache;
|
||||||
import org.qortal.data.arbitrary.ArbitraryResourceData;
|
import org.qortal.data.arbitrary.ArbitraryResourceData;
|
||||||
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
|
import org.qortal.data.arbitrary.ArbitraryResourceMetadata;
|
||||||
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
|
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
|
||||||
import org.qortal.repository.DataException;
|
|
||||||
|
|
||||||
import java.sql.ResultSet;
|
import java.sql.ResultSet;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
@ -51,11 +48,6 @@ public class HSQLDBCacheUtils {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
private static final String DEFAULT_IDENTIFIER = "default";
|
private static final String DEFAULT_IDENTIFIER = "default";
|
||||||
private static final int ZERO = 0;
|
|
||||||
public static final String DB_CACHE_TIMER = "DB Cache Timer";
|
|
||||||
public static final String DB_CACHE_TIMER_TASK = "DB Cache Timer Task";
|
|
||||||
public static final String BALANCE_RECORDER_TIMER = "Balance Recorder Timer";
|
|
||||||
public static final String BALANCE_RECORDER_TIMER_TASK = "Balance Recorder Timer Task";
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
@ -359,124 +351,13 @@ public class HSQLDBCacheUtils {
|
|||||||
* Start Caching
|
* Start Caching
|
||||||
*
|
*
|
||||||
* @param priorityRequested the thread priority to fill cache in
|
* @param priorityRequested the thread priority to fill cache in
|
||||||
* @param frequency the frequency to fill the cache (in seconds)
|
* @param frequency the frequency to fill the cache (in seconds)
|
||||||
|
* @param respository the data source
|
||||||
*
|
*
|
||||||
* @return the data cache
|
* @return the data cache
|
||||||
*/
|
*/
|
||||||
public static void startCaching(int priorityRequested, int frequency) {
|
public static void startCaching(int priorityRequested, int frequency, HSQLDBRepository respository) {
|
||||||
|
|
||||||
Timer timer = buildTimer(DB_CACHE_TIMER, priorityRequested);
|
|
||||||
|
|
||||||
TimerTask task = new TimerTask() {
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
|
|
||||||
Thread.currentThread().setName(DB_CACHE_TIMER_TASK);
|
|
||||||
|
|
||||||
try (final HSQLDBRepository respository = (HSQLDBRepository) Controller.REPOSITORY_FACTORY.getRepository()) {
|
|
||||||
fillCache(ArbitraryResourceCache.getInstance(), respository);
|
|
||||||
}
|
|
||||||
catch( DataException e ) {
|
|
||||||
LOGGER.error(e.getMessage(), e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// delay 1 second
|
|
||||||
timer.scheduleAtFixedRate(task, 1000, frequency * 1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Start Recording Balances
|
|
||||||
*
|
|
||||||
* @param queue the queue to add to, remove oldest data if necssary
|
|
||||||
* @param repository the db repsoitory
|
|
||||||
* @param priorityRequested the requested thread priority
|
|
||||||
* @param frequency the recording frequencies, in minutes
|
|
||||||
*/
|
|
||||||
public static void startRecordingBalances(
|
|
||||||
final ConcurrentHashMap<Integer, List<AccountBalanceData>> balancesByHeight,
|
|
||||||
final ConcurrentHashMap<String, List<AccountBalanceData>> balancesByAddress,
|
|
||||||
int priorityRequested,
|
|
||||||
int frequency,
|
|
||||||
int capacity) {
|
|
||||||
|
|
||||||
Timer timer = buildTimer(BALANCE_RECORDER_TIMER, priorityRequested);
|
|
||||||
|
|
||||||
TimerTask task = new TimerTask() {
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
|
|
||||||
Thread.currentThread().setName(BALANCE_RECORDER_TIMER_TASK);
|
|
||||||
|
|
||||||
try (final HSQLDBRepository repository = (HSQLDBRepository) Controller.REPOSITORY_FACTORY.getRepository()) {
|
|
||||||
while (balancesByHeight.size() > capacity + 1) {
|
|
||||||
Optional<Integer> firstHeight = balancesByHeight.keySet().stream().sorted().findFirst();
|
|
||||||
|
|
||||||
if (firstHeight.isPresent()) balancesByHeight.remove(firstHeight.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
// get current balances
|
|
||||||
List<AccountBalanceData> accountBalances = getAccountBalances(repository);
|
|
||||||
|
|
||||||
// get anyone of the balances
|
|
||||||
Optional<AccountBalanceData> data = accountBalances.stream().findAny();
|
|
||||||
|
|
||||||
// if there are any balances, then record them
|
|
||||||
if (data.isPresent()) {
|
|
||||||
// map all new balances to the current height
|
|
||||||
balancesByHeight.put(data.get().getHeight(), accountBalances);
|
|
||||||
|
|
||||||
// for each new balance, map to address
|
|
||||||
for (AccountBalanceData accountBalance : accountBalances) {
|
|
||||||
|
|
||||||
// get recorded balances for this address
|
|
||||||
List<AccountBalanceData> establishedBalances
|
|
||||||
= balancesByAddress.getOrDefault(accountBalance.getAddress(), new ArrayList<>(0));
|
|
||||||
|
|
||||||
// start a new list of recordings for this address, add the new balance and add the established
|
|
||||||
// balances
|
|
||||||
List<AccountBalanceData> balances = new ArrayList<>(establishedBalances.size() + 1);
|
|
||||||
balances.add(accountBalance);
|
|
||||||
balances.addAll(establishedBalances);
|
|
||||||
|
|
||||||
// reset tha balances for this address
|
|
||||||
balancesByAddress.put(accountBalance.getAddress(), balances);
|
|
||||||
|
|
||||||
// TODO: reduce account balances to capacity
|
|
||||||
}
|
|
||||||
|
|
||||||
// reduce height balances to capacity
|
|
||||||
while( balancesByHeight.size() > capacity ) {
|
|
||||||
Optional<Integer> lowestHeight
|
|
||||||
= balancesByHeight.entrySet().stream()
|
|
||||||
.min(Comparator.comparingInt(Map.Entry::getKey))
|
|
||||||
.map(Map.Entry::getKey);
|
|
||||||
|
|
||||||
if (lowestHeight.isPresent()) balancesByHeight.entrySet().remove(lowestHeight);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (DataException e) {
|
|
||||||
LOGGER.error(e.getMessage(), e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// wait 5 minutes
|
|
||||||
timer.scheduleAtFixedRate(task, 300_000, frequency * 60_000);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Build Timer
|
|
||||||
*
|
|
||||||
* Build a timer for scheduling a timer task.
|
|
||||||
*
|
|
||||||
* @param name the name for the thread running the timer task
|
|
||||||
* @param priorityRequested the priority for the thread running the timer task
|
|
||||||
*
|
|
||||||
* @return a timer for scheduling a timer task
|
|
||||||
*/
|
|
||||||
private static Timer buildTimer( final String name, int priorityRequested) {
|
|
||||||
// ensure priority is in between 1-10
|
// ensure priority is in between 1-10
|
||||||
final int priority = Math.max(0, Math.min(10, priorityRequested));
|
final int priority = Math.max(0, Math.min(10, priorityRequested));
|
||||||
|
|
||||||
@ -484,7 +365,7 @@ public class HSQLDBCacheUtils {
|
|||||||
Timer timer = new Timer(true) { // 'true' to make the Timer daemon
|
Timer timer = new Timer(true) { // 'true' to make the Timer daemon
|
||||||
@Override
|
@Override
|
||||||
public void schedule(TimerTask task, long delay) {
|
public void schedule(TimerTask task, long delay) {
|
||||||
Thread thread = new Thread(task, name) {
|
Thread thread = new Thread(task) {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
this.setPriority(priority);
|
this.setPriority(priority);
|
||||||
@ -495,7 +376,17 @@ public class HSQLDBCacheUtils {
|
|||||||
thread.start();
|
thread.start();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
return timer;
|
|
||||||
|
TimerTask task = new TimerTask() {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
|
||||||
|
fillCache(ArbitraryResourceCache.getInstance(), respository);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// delay 1 second
|
||||||
|
timer.scheduleAtFixedRate(task, 1000, frequency * 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -650,43 +541,4 @@ public class HSQLDBCacheUtils {
|
|||||||
|
|
||||||
return resources;
|
return resources;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static List<AccountBalanceData> getAccountBalances(HSQLDBRepository repository) {
|
|
||||||
|
|
||||||
StringBuilder sql = new StringBuilder();
|
|
||||||
|
|
||||||
sql.append("SELECT account, balance, height ");
|
|
||||||
sql.append("FROM ACCOUNTBALANCES as balances ");
|
|
||||||
sql.append("JOIN (SELECT height FROM BLOCKS ORDER BY height DESC LIMIT 1) AS max_height ON true ");
|
|
||||||
sql.append("WHERE asset_id=0");
|
|
||||||
|
|
||||||
List<AccountBalanceData> data = new ArrayList<>();
|
|
||||||
|
|
||||||
LOGGER.info( "Getting account balances ...");
|
|
||||||
|
|
||||||
try {
|
|
||||||
Statement statement = repository.connection.createStatement();
|
|
||||||
|
|
||||||
ResultSet resultSet = statement.executeQuery(sql.toString());
|
|
||||||
|
|
||||||
if (resultSet == null || !resultSet.next())
|
|
||||||
return new ArrayList<>(0);
|
|
||||||
|
|
||||||
do {
|
|
||||||
String account = resultSet.getString(1);
|
|
||||||
long balance = resultSet.getLong(2);
|
|
||||||
int height = resultSet.getInt(3);
|
|
||||||
|
|
||||||
data.add(new AccountBalanceData(account, ZERO, balance, height));
|
|
||||||
} while (resultSet.next());
|
|
||||||
} catch (SQLException e) {
|
|
||||||
LOGGER.warn(e.getMessage());
|
|
||||||
} catch (Exception e) {
|
|
||||||
LOGGER.error(e.getMessage(), e);
|
|
||||||
}
|
|
||||||
|
|
||||||
LOGGER.info("Retrieved account balances: count = " + data.size());
|
|
||||||
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
}
|
}
|
@ -23,7 +23,7 @@ public class HSQLDBChatRepository implements ChatRepository {
|
|||||||
public HSQLDBChatRepository(HSQLDBRepository repository) {
|
public HSQLDBChatRepository(HSQLDBRepository repository) {
|
||||||
this.repository = repository;
|
this.repository = repository;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<ChatMessage> getMessagesMatchingCriteria(Long before, Long after, Integer txGroupId, byte[] referenceBytes,
|
public List<ChatMessage> getMessagesMatchingCriteria(Long before, Long after, Integer txGroupId, byte[] referenceBytes,
|
||||||
byte[] chatReferenceBytes, Boolean hasChatReference, List<String> involving, String senderAddress,
|
byte[] chatReferenceBytes, Boolean hasChatReference, List<String> involving, String senderAddress,
|
||||||
@ -176,14 +176,14 @@ public class HSQLDBChatRepository implements ChatRepository {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ActiveChats getActiveChats(String address, Encoding encoding, Boolean hasChatReference) throws DataException {
|
public ActiveChats getActiveChats(String address, Encoding encoding) throws DataException {
|
||||||
List<GroupChat> groupChats = getActiveGroupChats(address, encoding, hasChatReference);
|
List<GroupChat> groupChats = getActiveGroupChats(address, encoding);
|
||||||
List<DirectChat> directChats = getActiveDirectChats(address, hasChatReference);
|
List<DirectChat> directChats = getActiveDirectChats(address);
|
||||||
|
|
||||||
return new ActiveChats(groupChats, directChats);
|
return new ActiveChats(groupChats, directChats);
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<GroupChat> getActiveGroupChats(String address, Encoding encoding, Boolean hasChatReference) throws DataException {
|
private List<GroupChat> getActiveGroupChats(String address, Encoding encoding) throws DataException {
|
||||||
// Find groups where address is a member and potential latest message details
|
// Find groups where address is a member and potential latest message details
|
||||||
String groupsSql = "SELECT group_id, group_name, latest_timestamp, sender, sender_name, signature, data "
|
String groupsSql = "SELECT group_id, group_name, latest_timestamp, sender, sender_name, signature, data "
|
||||||
+ "FROM GroupMembers "
|
+ "FROM GroupMembers "
|
||||||
@ -194,19 +194,11 @@ public class HSQLDBChatRepository implements ChatRepository {
|
|||||||
+ "JOIN Transactions USING (signature) "
|
+ "JOIN Transactions USING (signature) "
|
||||||
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
||||||
// NOTE: We need to qualify "Groups.group_id" here to avoid "General error" bug in HSQLDB v2.5.0
|
// NOTE: We need to qualify "Groups.group_id" here to avoid "General error" bug in HSQLDB v2.5.0
|
||||||
+ "WHERE tx_group_id = Groups.group_id AND type = " + TransactionType.CHAT.value + " ";
|
+ "WHERE tx_group_id = Groups.group_id AND type = " + TransactionType.CHAT.value + " "
|
||||||
|
+ "ORDER BY created_when DESC "
|
||||||
if (hasChatReference != null) {
|
|
||||||
if (hasChatReference) {
|
|
||||||
groupsSql += "AND chat_reference IS NOT NULL ";
|
|
||||||
} else {
|
|
||||||
groupsSql += "AND chat_reference IS NULL ";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
groupsSql += "ORDER BY created_when DESC "
|
|
||||||
+ "LIMIT 1"
|
+ "LIMIT 1"
|
||||||
+ ") AS LatestMessages ON TRUE "
|
+ ") AS LatestMessages ON TRUE "
|
||||||
+ "WHERE address = ?";
|
+ "WHERE address = ?";
|
||||||
|
|
||||||
List<GroupChat> groupChats = new ArrayList<>();
|
List<GroupChat> groupChats = new ArrayList<>();
|
||||||
try (ResultSet resultSet = this.repository.checkedExecute(groupsSql, address)) {
|
try (ResultSet resultSet = this.repository.checkedExecute(groupsSql, address)) {
|
||||||
@ -238,16 +230,8 @@ public class HSQLDBChatRepository implements ChatRepository {
|
|||||||
+ "JOIN Transactions USING (signature) "
|
+ "JOIN Transactions USING (signature) "
|
||||||
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
||||||
+ "WHERE tx_group_id = 0 "
|
+ "WHERE tx_group_id = 0 "
|
||||||
+ "AND recipient IS NULL ";
|
+ "AND recipient IS NULL "
|
||||||
|
+ "ORDER BY created_when DESC "
|
||||||
if (hasChatReference != null) {
|
|
||||||
if (hasChatReference) {
|
|
||||||
grouplessSql += "AND chat_reference IS NOT NULL ";
|
|
||||||
} else {
|
|
||||||
grouplessSql += "AND chat_reference IS NULL ";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
grouplessSql += "ORDER BY created_when DESC "
|
|
||||||
+ "LIMIT 1";
|
+ "LIMIT 1";
|
||||||
|
|
||||||
try (ResultSet resultSet = this.repository.checkedExecute(grouplessSql)) {
|
try (ResultSet resultSet = this.repository.checkedExecute(grouplessSql)) {
|
||||||
@ -275,7 +259,7 @@ public class HSQLDBChatRepository implements ChatRepository {
|
|||||||
return groupChats;
|
return groupChats;
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<DirectChat> getActiveDirectChats(String address, Boolean hasChatReference) throws DataException {
|
private List<DirectChat> getActiveDirectChats(String address) throws DataException {
|
||||||
// Find chat messages involving address
|
// Find chat messages involving address
|
||||||
String directSql = "SELECT other_address, name, latest_timestamp, sender, sender_name "
|
String directSql = "SELECT other_address, name, latest_timestamp, sender, sender_name "
|
||||||
+ "FROM ("
|
+ "FROM ("
|
||||||
@ -291,21 +275,11 @@ public class HSQLDBChatRepository implements ChatRepository {
|
|||||||
+ "NATURAL JOIN Transactions "
|
+ "NATURAL JOIN Transactions "
|
||||||
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
+ "LEFT OUTER JOIN Names AS SenderNames ON SenderNames.owner = sender "
|
||||||
+ "WHERE (sender = other_address AND recipient = ?) "
|
+ "WHERE (sender = other_address AND recipient = ?) "
|
||||||
+ "OR (sender = ? AND recipient = other_address) ";
|
+ "OR (sender = ? AND recipient = other_address) "
|
||||||
|
+ "ORDER BY created_when DESC "
|
||||||
// Apply hasChatReference filter
|
+ "LIMIT 1"
|
||||||
if (hasChatReference != null) {
|
+ ") AS LatestMessages "
|
||||||
if (hasChatReference) {
|
+ "LEFT OUTER JOIN Names ON owner = other_address";
|
||||||
directSql += "AND chat_reference IS NOT NULL ";
|
|
||||||
} else {
|
|
||||||
directSql += "AND chat_reference IS NULL ";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
directSql += "ORDER BY created_when DESC "
|
|
||||||
+ "LIMIT 1"
|
|
||||||
+ ") AS LatestMessages "
|
|
||||||
+ "LEFT OUTER JOIN Names ON owner = other_address";
|
|
||||||
|
|
||||||
Object[] bindParams = new Object[] { address, address, address, address };
|
Object[] bindParams = new Object[] { address, address, address, address };
|
||||||
|
|
||||||
|
@ -454,41 +454,40 @@ public class HSQLDBDatabaseUpdates {
|
|||||||
|
|
||||||
case 12:
|
case 12:
|
||||||
// Groups
|
// Groups
|
||||||
// NOTE: We need to set Groups to `Groups` here to avoid SQL Standard Keywords in HSQLDB v2.7.4
|
stmt.execute("CREATE TABLE Groups (group_id GroupID, owner QortalAddress NOT NULL, group_name GroupName NOT NULL, "
|
||||||
stmt.execute("CREATE TABLE `Groups` (group_id GroupID, owner QortalAddress NOT NULL, group_name GroupName NOT NULL, "
|
|
||||||
+ "created_when EpochMillis NOT NULL, updated_when EpochMillis, is_open BOOLEAN NOT NULL, "
|
+ "created_when EpochMillis NOT NULL, updated_when EpochMillis, is_open BOOLEAN NOT NULL, "
|
||||||
+ "approval_threshold TINYINT NOT NULL, min_block_delay INTEGER NOT NULL, max_block_delay INTEGER NOT NULL, "
|
+ "approval_threshold TINYINT NOT NULL, min_block_delay INTEGER NOT NULL, max_block_delay INTEGER NOT NULL, "
|
||||||
+ "reference Signature, creation_group_id GroupID, reduced_group_name GroupName NOT NULL, "
|
+ "reference Signature, creation_group_id GroupID, reduced_group_name GroupName NOT NULL, "
|
||||||
+ "description GenericDescription NOT NULL, PRIMARY KEY (group_id))");
|
+ "description GenericDescription NOT NULL, PRIMARY KEY (group_id))");
|
||||||
// For finding groups by name
|
// For finding groups by name
|
||||||
stmt.execute("CREATE INDEX GroupNameIndex on `Groups` (group_name)");
|
stmt.execute("CREATE INDEX GroupNameIndex on Groups (group_name)");
|
||||||
// For finding groups by reduced name
|
// For finding groups by reduced name
|
||||||
stmt.execute("CREATE INDEX GroupReducedNameIndex on `Groups` (reduced_group_name)");
|
stmt.execute("CREATE INDEX GroupReducedNameIndex on Groups (reduced_group_name)");
|
||||||
// For finding groups by owner
|
// For finding groups by owner
|
||||||
stmt.execute("CREATE INDEX GroupOwnerIndex ON `Groups` (owner)");
|
stmt.execute("CREATE INDEX GroupOwnerIndex ON Groups (owner)");
|
||||||
|
|
||||||
// We need a corresponding trigger to make sure new group_id values are assigned sequentially starting from 1
|
// We need a corresponding trigger to make sure new group_id values are assigned sequentially starting from 1
|
||||||
stmt.execute("CREATE TRIGGER Group_ID_Trigger BEFORE INSERT ON `Groups` "
|
stmt.execute("CREATE TRIGGER Group_ID_Trigger BEFORE INSERT ON Groups "
|
||||||
+ "REFERENCING NEW ROW AS new_row FOR EACH ROW WHEN (new_row.group_id IS NULL) "
|
+ "REFERENCING NEW ROW AS new_row FOR EACH ROW WHEN (new_row.group_id IS NULL) "
|
||||||
+ "SET new_row.group_id = (SELECT IFNULL(MAX(group_id) + 1, 1) FROM `Groups`)");
|
+ "SET new_row.group_id = (SELECT IFNULL(MAX(group_id) + 1, 1) FROM Groups)");
|
||||||
|
|
||||||
// Admins
|
// Admins
|
||||||
stmt.execute("CREATE TABLE GroupAdmins (group_id GroupID, admin QortalAddress, reference Signature NOT NULL, "
|
stmt.execute("CREATE TABLE GroupAdmins (group_id GroupID, admin QortalAddress, reference Signature NOT NULL, "
|
||||||
+ "PRIMARY KEY (group_id, admin), FOREIGN KEY (group_id) REFERENCES `Groups` (group_id) ON DELETE CASCADE)");
|
+ "PRIMARY KEY (group_id, admin), FOREIGN KEY (group_id) REFERENCES Groups (group_id) ON DELETE CASCADE)");
|
||||||
// For finding groups by admin address
|
// For finding groups by admin address
|
||||||
stmt.execute("CREATE INDEX GroupAdminIndex ON GroupAdmins (admin)");
|
stmt.execute("CREATE INDEX GroupAdminIndex ON GroupAdmins (admin)");
|
||||||
|
|
||||||
// Members
|
// Members
|
||||||
stmt.execute("CREATE TABLE GroupMembers (group_id GroupID, address QortalAddress, "
|
stmt.execute("CREATE TABLE GroupMembers (group_id GroupID, address QortalAddress, "
|
||||||
+ "joined_when EpochMillis NOT NULL, reference Signature NOT NULL, "
|
+ "joined_when EpochMillis NOT NULL, reference Signature NOT NULL, "
|
||||||
+ "PRIMARY KEY (group_id, address), FOREIGN KEY (group_id) REFERENCES `Groups` (group_id) ON DELETE CASCADE)");
|
+ "PRIMARY KEY (group_id, address), FOREIGN KEY (group_id) REFERENCES Groups (group_id) ON DELETE CASCADE)");
|
||||||
// For finding groups by member address
|
// For finding groups by member address
|
||||||
stmt.execute("CREATE INDEX GroupMemberIndex ON GroupMembers (address)");
|
stmt.execute("CREATE INDEX GroupMemberIndex ON GroupMembers (address)");
|
||||||
|
|
||||||
// Invites
|
// Invites
|
||||||
stmt.execute("CREATE TABLE GroupInvites (group_id GroupID, inviter QortalAddress, invitee QortalAddress, "
|
stmt.execute("CREATE TABLE GroupInvites (group_id GroupID, inviter QortalAddress, invitee QortalAddress, "
|
||||||
+ "expires_when EpochMillis, reference Signature, "
|
+ "expires_when EpochMillis, reference Signature, "
|
||||||
+ "PRIMARY KEY (group_id, invitee), FOREIGN KEY (group_id) REFERENCES `Groups` (group_id) ON DELETE CASCADE)");
|
+ "PRIMARY KEY (group_id, invitee), FOREIGN KEY (group_id) REFERENCES Groups (group_id) ON DELETE CASCADE)");
|
||||||
// For finding invites sent by inviter
|
// For finding invites sent by inviter
|
||||||
stmt.execute("CREATE INDEX GroupInviteInviterIndex ON GroupInvites (inviter)");
|
stmt.execute("CREATE INDEX GroupInviteInviterIndex ON GroupInvites (inviter)");
|
||||||
// For finding invites by group
|
// For finding invites by group
|
||||||
@ -504,7 +503,7 @@ public class HSQLDBDatabaseUpdates {
|
|||||||
// NULL expires_when means does not expire!
|
// NULL expires_when means does not expire!
|
||||||
stmt.execute("CREATE TABLE GroupBans (group_id GroupID, offender QortalAddress, admin QortalAddress NOT NULL, "
|
stmt.execute("CREATE TABLE GroupBans (group_id GroupID, offender QortalAddress, admin QortalAddress NOT NULL, "
|
||||||
+ "banned_when EpochMillis NOT NULL, reason GenericDescription NOT NULL, expires_when EpochMillis, reference Signature NOT NULL, "
|
+ "banned_when EpochMillis NOT NULL, reason GenericDescription NOT NULL, expires_when EpochMillis, reference Signature NOT NULL, "
|
||||||
+ "PRIMARY KEY (group_id, offender), FOREIGN KEY (group_id) REFERENCES `Groups` (group_id) ON DELETE CASCADE)");
|
+ "PRIMARY KEY (group_id, offender), FOREIGN KEY (group_id) REFERENCES Groups (group_id) ON DELETE CASCADE)");
|
||||||
// For expiry maintenance
|
// For expiry maintenance
|
||||||
stmt.execute("CREATE INDEX GroupBanExpiryIndex ON GroupBans (expires_when)");
|
stmt.execute("CREATE INDEX GroupBanExpiryIndex ON GroupBans (expires_when)");
|
||||||
break;
|
break;
|
||||||
|
@ -213,7 +213,7 @@ public class Settings {
|
|||||||
public long recoveryModeTimeout = 9999999999999L;
|
public long recoveryModeTimeout = 9999999999999L;
|
||||||
|
|
||||||
/** Minimum peer version number required in order to sync with them */
|
/** Minimum peer version number required in order to sync with them */
|
||||||
private String minPeerVersion = "4.6.5";
|
private String minPeerVersion = "4.6.0";
|
||||||
/** Whether to allow connections with peers below minPeerVersion
|
/** Whether to allow connections with peers below minPeerVersion
|
||||||
* If true, we won't sync with them but they can still sync with us, and will show in the peers list
|
* If true, we won't sync with them but they can still sync with us, and will show in the peers list
|
||||||
* If false, sync will be blocked both ways, and they will not appear in the peers list */
|
* If false, sync will be blocked both ways, and they will not appear in the peers list */
|
||||||
@ -222,7 +222,7 @@ public class Settings {
|
|||||||
/** Minimum time (in seconds) that we should attempt to remain connected to a peer for */
|
/** Minimum time (in seconds) that we should attempt to remain connected to a peer for */
|
||||||
private int minPeerConnectionTime = 2 * 60 * 60; // seconds
|
private int minPeerConnectionTime = 2 * 60 * 60; // seconds
|
||||||
/** Maximum time (in seconds) that we should attempt to remain connected to a peer for */
|
/** Maximum time (in seconds) that we should attempt to remain connected to a peer for */
|
||||||
private int maxPeerConnectionTime = 6 * 60 * 60; // seconds
|
private int maxPeerConnectionTime = 4 * 60 * 60; // seconds
|
||||||
/** Maximum time (in seconds) that a peer should remain connected when requesting QDN data */
|
/** Maximum time (in seconds) that a peer should remain connected when requesting QDN data */
|
||||||
private int maxDataPeerConnectionTime = 30 * 60; // seconds
|
private int maxDataPeerConnectionTime = 30 * 60; // seconds
|
||||||
|
|
||||||
@ -281,10 +281,7 @@ public class Settings {
|
|||||||
// Auto-update sources
|
// Auto-update sources
|
||||||
private String[] autoUpdateRepos = new String[] {
|
private String[] autoUpdateRepos = new String[] {
|
||||||
"https://github.com/Qortal/qortal/raw/%s/qortal.update",
|
"https://github.com/Qortal/qortal/raw/%s/qortal.update",
|
||||||
"https://raw.githubusercontent.com@151.101.16.133/Qortal/qortal/%s/qortal.update",
|
"https://raw.githubusercontent.com@151.101.16.133/Qortal/qortal/%s/qortal.update"
|
||||||
"https://qortal.link/Auto-Update/%s/qortal.update",
|
|
||||||
"https://qortal.name/auto-Update/%s/qortal.update",
|
|
||||||
"https://gitea.qortal.link/qortal/qortal/raw/%s/qortal.update"
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Lists
|
// Lists
|
||||||
@ -444,14 +441,6 @@ public class Settings {
|
|||||||
*/
|
*/
|
||||||
private long archivingPause = 3000;
|
private long archivingPause = 3000;
|
||||||
|
|
||||||
private boolean balanceRecorderEnabled = false;
|
|
||||||
|
|
||||||
private int balanceRecorderPriority = 1;
|
|
||||||
|
|
||||||
private int balanceRecorderFrequency = 2*60*1000;
|
|
||||||
|
|
||||||
private int balanceRecorderCapacity = 1000;
|
|
||||||
|
|
||||||
// Domain mapping
|
// Domain mapping
|
||||||
public static class ThreadLimit {
|
public static class ThreadLimit {
|
||||||
private String messageType;
|
private String messageType;
|
||||||
@ -1241,20 +1230,4 @@ public class Settings {
|
|||||||
public long getArchivingPause() {
|
public long getArchivingPause() {
|
||||||
return archivingPause;
|
return archivingPause;
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getBalanceRecorderPriority() {
|
|
||||||
return balanceRecorderPriority;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getBalanceRecorderFrequency() {
|
|
||||||
return balanceRecorderFrequency;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getBalanceRecorderCapacity() {
|
|
||||||
return balanceRecorderCapacity;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean isBalanceRecorderEnabled() {
|
|
||||||
return balanceRecorderEnabled;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ public class RewardShareTransaction extends Transaction {
|
|||||||
final boolean isCancellingSharePercent = this.rewardShareTransactionData.getSharePercent() < 0;
|
final boolean isCancellingSharePercent = this.rewardShareTransactionData.getSharePercent() < 0;
|
||||||
|
|
||||||
// Creator themselves needs to be allowed to mint (unless cancelling)
|
// Creator themselves needs to be allowed to mint (unless cancelling)
|
||||||
if (!isCancellingSharePercent && !creator.canMint(false))
|
if (!isCancellingSharePercent && !creator.canMint())
|
||||||
return ValidationResult.NOT_MINTING_ACCOUNT;
|
return ValidationResult.NOT_MINTING_ACCOUNT;
|
||||||
|
|
||||||
// Qortal: special rules in play depending whether recipient is also minter
|
// Qortal: special rules in play depending whether recipient is also minter
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
"onlineAccountSignaturesMinLifetime": 43200000,
|
"onlineAccountSignaturesMinLifetime": 43200000,
|
||||||
"onlineAccountSignaturesMaxLifetime": 86400000,
|
"onlineAccountSignaturesMaxLifetime": 86400000,
|
||||||
"onlineAccountsModulusV2Timestamp": 1659801600000,
|
"onlineAccountsModulusV2Timestamp": 1659801600000,
|
||||||
"onlineAccountsModulusV3Timestamp": 1731961800000,
|
|
||||||
"selfSponsorshipAlgoV1SnapshotTimestamp": 1670230000000,
|
"selfSponsorshipAlgoV1SnapshotTimestamp": 1670230000000,
|
||||||
"selfSponsorshipAlgoV2SnapshotTimestamp": 1708360200000,
|
"selfSponsorshipAlgoV2SnapshotTimestamp": 1708360200000,
|
||||||
"selfSponsorshipAlgoV3SnapshotTimestamp": 1708432200000,
|
"selfSponsorshipAlgoV3SnapshotTimestamp": 1708432200000,
|
||||||
@ -96,7 +95,6 @@
|
|||||||
"transactionV6Timestamp": 9999999999999,
|
"transactionV6Timestamp": 9999999999999,
|
||||||
"disableReferenceTimestamp": 1655222400000,
|
"disableReferenceTimestamp": 1655222400000,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 1731958200000,
|
|
||||||
"onlineAccountMinterLevelValidationHeight": 1092000,
|
"onlineAccountMinterLevelValidationHeight": 1092000,
|
||||||
"selfSponsorshipAlgoV1Height": 1092400,
|
"selfSponsorshipAlgoV1Height": 1092400,
|
||||||
"selfSponsorshipAlgoV2Height": 1611200,
|
"selfSponsorshipAlgoV2Height": 1611200,
|
||||||
@ -111,9 +109,7 @@
|
|||||||
"disableRewardshareHeight": 1899100,
|
"disableRewardshareHeight": 1899100,
|
||||||
"enableRewardshareHeight": 1905100,
|
"enableRewardshareHeight": 1905100,
|
||||||
"onlyMintWithNameHeight": 1900300,
|
"onlyMintWithNameHeight": 1900300,
|
||||||
"removeOnlyMintWithNameHeight": 1935500,
|
"groupMemberCheckHeight": 1902700
|
||||||
"groupMemberCheckHeight": 1902700,
|
|
||||||
"fixBatchRewardHeight": 1945900
|
|
||||||
},
|
},
|
||||||
"checkpoints": [
|
"checkpoints": [
|
||||||
{ "height": 1136300, "signature": "3BbwawEF2uN8Ni5ofpJXkukoU8ctAPxYoFB7whq9pKfBnjfZcpfEJT4R95NvBDoTP8WDyWvsUvbfHbcr9qSZuYpSKZjUQTvdFf6eqznHGEwhZApWfvXu6zjGCxYCp65F4jsVYYJjkzbjmkCg5WAwN5voudngA23kMK6PpTNygapCzXt" }
|
{ "height": 1136300, "signature": "3BbwawEF2uN8Ni5ofpJXkukoU8ctAPxYoFB7whq9pKfBnjfZcpfEJT4R95NvBDoTP8WDyWvsUvbfHbcr9qSZuYpSKZjUQTvdFf6eqznHGEwhZApWfvXu6zjGCxYCp65F4jsVYYJjkzbjmkCg5WAwN5voudngA23kMK6PpTNygapCzXt" }
|
||||||
|
@ -614,7 +614,6 @@ function getDefaultTimeout(action) {
|
|||||||
switch (action) {
|
switch (action) {
|
||||||
case "GET_USER_ACCOUNT":
|
case "GET_USER_ACCOUNT":
|
||||||
case "SAVE_FILE":
|
case "SAVE_FILE":
|
||||||
case "SIGN_TRANSACTION":
|
|
||||||
case "DECRYPT_DATA":
|
case "DECRYPT_DATA":
|
||||||
// User may take a long time to accept/deny the popup
|
// User may take a long time to accept/deny the popup
|
||||||
return 60 * 60 * 1000;
|
return 60 * 60 * 1000;
|
||||||
@ -636,11 +635,6 @@ function getDefaultTimeout(action) {
|
|||||||
// Chat messages rely on PoW computations, so allow extra time
|
// Chat messages rely on PoW computations, so allow extra time
|
||||||
return 60 * 1000;
|
return 60 * 1000;
|
||||||
|
|
||||||
case "CREATE_TRADE_BUY_ORDER":
|
|
||||||
case "CREATE_TRADE_SELL_ORDER":
|
|
||||||
case "CANCEL_TRADE_SELL_ORDER":
|
|
||||||
case "VOTE_ON_POLL":
|
|
||||||
case "CREATE_POLL":
|
|
||||||
case "JOIN_GROUP":
|
case "JOIN_GROUP":
|
||||||
case "DEPLOY_AT":
|
case "DEPLOY_AT":
|
||||||
case "SEND_COIN":
|
case "SEND_COIN":
|
||||||
@ -655,7 +649,7 @@ function getDefaultTimeout(action) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 30 * 1000;
|
return 10 * 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -54,39 +54,26 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
public void testWriter() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testWriter() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testWriter");
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
||||||
repository.getATRepository().setAtTrimHeight(901);
|
repository.getATRepository().setAtTrimHeight(901);
|
||||||
System.out.println("Set trim heights to 901.");
|
|
||||||
|
|
||||||
// Check the max archive height - this should be one less than the first untrimmed height
|
// Check the max archive height - this should be one less than the first untrimmed height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
|
|
||||||
assertEquals(900, maximumArchiveHeight);
|
assertEquals(900, maximumArchiveHeight);
|
||||||
|
|
||||||
// Write blocks 2-900 to the archive
|
// Write blocks 2-900 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
|
|
||||||
assertEquals(900 - 1, writer.getWrittenCount());
|
assertEquals(900 - 1, writer.getWrittenCount());
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
@ -97,9 +84,6 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
System.out.println("testWriter completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,39 +91,26 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testWriterAndReader");
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
||||||
repository.getATRepository().setAtTrimHeight(901);
|
repository.getATRepository().setAtTrimHeight(901);
|
||||||
System.out.println("Set trim heights to 901.");
|
|
||||||
|
|
||||||
// Check the max archive height - this should be one less than the first untrimmed height
|
// Check the max archive height - this should be one less than the first untrimmed height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
|
|
||||||
assertEquals(900, maximumArchiveHeight);
|
assertEquals(900, maximumArchiveHeight);
|
||||||
|
|
||||||
// Write blocks 2-900 to the archive
|
// Write blocks 2-900 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
|
|
||||||
assertEquals(900 - 1, writer.getWrittenCount());
|
assertEquals(900 - 1, writer.getWrittenCount());
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
@ -150,10 +121,8 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
// Read block 2 from the archive
|
// Read block 2 from the archive
|
||||||
System.out.println("Reading block 2 from the archive...");
|
|
||||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||||
BlockTransformation block2Info = reader.fetchBlockAtHeight(2);
|
BlockTransformation block2Info = reader.fetchBlockAtHeight(2);
|
||||||
BlockData block2ArchiveData = block2Info.getBlockData();
|
BlockData block2ArchiveData = block2Info.getBlockData();
|
||||||
@ -162,7 +131,6 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2);
|
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2);
|
||||||
|
|
||||||
// Ensure the values match
|
// Ensure the values match
|
||||||
System.out.println("Comparing block 2 data...");
|
|
||||||
assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight());
|
assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight());
|
||||||
assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature());
|
assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature());
|
||||||
|
|
||||||
@ -170,7 +138,6 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
assertEquals(1, block2ArchiveData.getOnlineAccountsCount());
|
assertEquals(1, block2ArchiveData.getOnlineAccountsCount());
|
||||||
|
|
||||||
// Read block 900 from the archive
|
// Read block 900 from the archive
|
||||||
System.out.println("Reading block 900 from the archive...");
|
|
||||||
BlockTransformation block900Info = reader.fetchBlockAtHeight(900);
|
BlockTransformation block900Info = reader.fetchBlockAtHeight(900);
|
||||||
BlockData block900ArchiveData = block900Info.getBlockData();
|
BlockData block900ArchiveData = block900Info.getBlockData();
|
||||||
|
|
||||||
@ -178,14 +145,12 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900);
|
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900);
|
||||||
|
|
||||||
// Ensure the values match
|
// Ensure the values match
|
||||||
System.out.println("Comparing block 900 data...");
|
|
||||||
assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight());
|
assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight());
|
||||||
assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature());
|
assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature());
|
||||||
|
|
||||||
// Test some values in the archive
|
// Test some values in the archive
|
||||||
assertEquals(1, block900ArchiveData.getOnlineAccountsCount());
|
assertEquals(1, block900ArchiveData.getOnlineAccountsCount());
|
||||||
|
|
||||||
System.out.println("testWriterAndReader completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,48 +158,33 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testArchivedAtStates");
|
|
||||||
|
|
||||||
// Deploy an AT so that we have AT state data
|
// Deploy an AT so that we have AT state data
|
||||||
System.out.println("Deploying AT...");
|
|
||||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
||||||
byte[] creationBytes = AtUtils.buildSimpleAT();
|
byte[] creationBytes = AtUtils.buildSimpleAT();
|
||||||
long fundingAmount = 1_00000000L;
|
long fundingAmount = 1_00000000L;
|
||||||
DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
||||||
String atAddress = deployAtTransaction.getATAccount().getAddress();
|
String atAddress = deployAtTransaction.getATAccount().getAddress();
|
||||||
System.out.println("AT deployed at address: " + atAddress);
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// 9 blocks are trimmed (this specifies the first untrimmed height)
|
// 9 blocks are trimmed (this specifies the first untrimmed height)
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10);
|
||||||
repository.getATRepository().setAtTrimHeight(10);
|
repository.getATRepository().setAtTrimHeight(10);
|
||||||
System.out.println("Set trim heights to 10.");
|
|
||||||
|
|
||||||
// Check the max archive height
|
// Check the max archive height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height (Expected 9): " + maximumArchiveHeight);
|
|
||||||
assertEquals(9, maximumArchiveHeight);
|
assertEquals(9, maximumArchiveHeight);
|
||||||
|
|
||||||
// Write blocks 2-9 to the archive
|
// Write blocks 2-9 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 8)");
|
|
||||||
assertEquals(9 - 1, writer.getWrittenCount());
|
assertEquals(9 - 1, writer.getWrittenCount());
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
@ -245,13 +195,10 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
// Check blocks 3-9
|
// Check blocks 3-9
|
||||||
System.out.println("Checking blocks 3 to 9...");
|
|
||||||
for (Integer testHeight = 2; testHeight <= 9; testHeight++) {
|
for (Integer testHeight = 2; testHeight <= 9; testHeight++) {
|
||||||
|
|
||||||
System.out.println("Reading block " + testHeight + " from the archive...");
|
|
||||||
// Read a block from the archive
|
// Read a block from the archive
|
||||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||||
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight);
|
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight);
|
||||||
@ -269,7 +216,6 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
|
|
||||||
// Check the archived AT state
|
// Check the archived AT state
|
||||||
if (testHeight == 2) {
|
if (testHeight == 2) {
|
||||||
System.out.println("Checking block " + testHeight + " AT state data (expected null)...");
|
|
||||||
// Block 2 won't have an AT state hash because it's initial (and has the DEPLOY_AT in the same block)
|
// Block 2 won't have an AT state hash because it's initial (and has the DEPLOY_AT in the same block)
|
||||||
assertNull(archivedAtStateData);
|
assertNull(archivedAtStateData);
|
||||||
|
|
||||||
@ -277,7 +223,6 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType());
|
assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType());
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
System.out.println("Checking block " + testHeight + " AT state data...");
|
|
||||||
// For blocks 3+, ensure the archive has the AT state data, but not the hashes
|
// For blocks 3+, ensure the archive has the AT state data, but not the hashes
|
||||||
assertNotNull(archivedAtStateData.getStateHash());
|
assertNotNull(archivedAtStateData.getStateHash());
|
||||||
assertNull(archivedAtStateData.getStateData());
|
assertNull(archivedAtStateData.getStateData());
|
||||||
@ -310,12 +255,10 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check block 10 (unarchived)
|
// Check block 10 (unarchived)
|
||||||
System.out.println("Checking block 10 (should not be in archive)...");
|
|
||||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||||
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10);
|
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10);
|
||||||
assertNull(blockInfo);
|
assertNull(blockInfo);
|
||||||
|
|
||||||
System.out.println("testArchivedAtStates completed successfully.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -324,46 +267,32 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testArchiveAndPrune");
|
|
||||||
|
|
||||||
// Deploy an AT so that we have AT state data
|
// Deploy an AT so that we have AT state data
|
||||||
System.out.println("Deploying AT...");
|
|
||||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
||||||
byte[] creationBytes = AtUtils.buildSimpleAT();
|
byte[] creationBytes = AtUtils.buildSimpleAT();
|
||||||
long fundingAmount = 1_00000000L;
|
long fundingAmount = 1_00000000L;
|
||||||
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
|
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
||||||
repository.getATRepository().setAtTrimHeight(901);
|
repository.getATRepository().setAtTrimHeight(901);
|
||||||
System.out.println("Set trim heights to 901.");
|
|
||||||
|
|
||||||
// Check the max archive height - this should be one less than the first untrimmed height
|
// Check the max archive height - this should be one less than the first untrimmed height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
|
|
||||||
assertEquals(900, maximumArchiveHeight);
|
assertEquals(900, maximumArchiveHeight);
|
||||||
|
|
||||||
// Write blocks 2-900 to the archive
|
// Write blocks 2-900 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
|
|
||||||
assertEquals(900 - 1, writer.getWrittenCount());
|
assertEquals(900 - 1, writer.getWrittenCount());
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
@ -374,21 +303,17 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
// Ensure the SQL repository contains blocks 2 and 900...
|
// Ensure the SQL repository contains blocks 2 and 900...
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(2));
|
assertNotNull(repository.getBlockRepository().fromHeight(2));
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(900));
|
assertNotNull(repository.getBlockRepository().fromHeight(900));
|
||||||
System.out.println("Blocks 2 and 900 exist in the repository.");
|
|
||||||
|
|
||||||
// Prune all the archived blocks
|
// Prune all the archived blocks
|
||||||
System.out.println("Pruning blocks 2 to 900...");
|
|
||||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900);
|
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900);
|
||||||
assertEquals(900-1, numBlocksPruned);
|
assertEquals(900-1, numBlocksPruned);
|
||||||
repository.getBlockRepository().setBlockPruneHeight(901);
|
repository.getBlockRepository().setBlockPruneHeight(901);
|
||||||
|
|
||||||
// Prune the AT states for the archived blocks
|
// Prune the AT states for the archived blocks
|
||||||
System.out.println("Pruning AT states up to height 900...");
|
|
||||||
repository.getATRepository().rebuildLatestAtStates(900);
|
repository.getATRepository().rebuildLatestAtStates(900);
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900);
|
int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900);
|
||||||
@ -398,19 +323,14 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
// Now ensure the SQL repository is missing blocks 2 and 900...
|
// Now ensure the SQL repository is missing blocks 2 and 900...
|
||||||
assertNull(repository.getBlockRepository().fromHeight(2));
|
assertNull(repository.getBlockRepository().fromHeight(2));
|
||||||
assertNull(repository.getBlockRepository().fromHeight(900));
|
assertNull(repository.getBlockRepository().fromHeight(900));
|
||||||
System.out.println("Blocks 2 and 900 have been pruned from the repository.");
|
|
||||||
|
|
||||||
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
|
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(1));
|
assertNotNull(repository.getBlockRepository().fromHeight(1));
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(901));
|
assertNotNull(repository.getBlockRepository().fromHeight(901));
|
||||||
System.out.println("Blocks 1 and 901 still exist in the repository.");
|
|
||||||
|
|
||||||
// Validate the latest block height in the repository
|
// Validate the latest block height in the repository
|
||||||
int lastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
|
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
|
||||||
System.out.println("Latest block height in repository (Expected 1002): " + lastBlockHeight);
|
|
||||||
assertEquals(1002, lastBlockHeight);
|
|
||||||
|
|
||||||
System.out.println("testArchiveAndPrune completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -418,190 +338,137 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testTrimArchivePruneAndOrphan");
|
|
||||||
|
|
||||||
// Deploy an AT so that we have AT state data
|
// Deploy an AT so that we have AT state data
|
||||||
System.out.println("Deploying AT...");
|
|
||||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
||||||
byte[] creationBytes = AtUtils.buildSimpleAT();
|
byte[] creationBytes = AtUtils.buildSimpleAT();
|
||||||
long fundingAmount = 1_00000000L;
|
long fundingAmount = 1_00000000L;
|
||||||
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
||||||
System.out.println("AT deployed successfully.");
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// Make sure that block 500 has full AT state data and data hash
|
// Make sure that block 500 has full AT state data and data hash
|
||||||
System.out.println("Verifying block 500 AT state data...");
|
|
||||||
List<ATStateData> block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
|
List<ATStateData> block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
|
||||||
ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
|
ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
|
||||||
assertNotNull(atStatesData.getStateHash());
|
assertNotNull(atStatesData.getStateHash());
|
||||||
assertNotNull(atStatesData.getStateData());
|
assertNotNull(atStatesData.getStateData());
|
||||||
System.out.println("Block 500 AT state data verified.");
|
|
||||||
|
|
||||||
// Trim the first 500 blocks
|
// Trim the first 500 blocks
|
||||||
System.out.println("Trimming first 500 blocks...");
|
|
||||||
repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500);
|
repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500);
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501);
|
||||||
repository.getATRepository().rebuildLatestAtStates(500);
|
repository.getATRepository().rebuildLatestAtStates(500);
|
||||||
repository.getATRepository().trimAtStates(0, 500, 1000);
|
repository.getATRepository().trimAtStates(0, 500, 1000);
|
||||||
repository.getATRepository().setAtTrimHeight(501);
|
repository.getATRepository().setAtTrimHeight(501);
|
||||||
System.out.println("Trimming completed.");
|
|
||||||
|
|
||||||
// Now block 499 should only have the AT state data hash
|
// Now block 499 should only have the AT state data hash
|
||||||
System.out.println("Checking block 499 AT state data...");
|
|
||||||
List<ATStateData> block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499);
|
List<ATStateData> block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499);
|
||||||
atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499);
|
atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499);
|
||||||
assertNotNull(atStatesData.getStateHash());
|
assertNotNull(atStatesData.getStateHash());
|
||||||
assertNull(atStatesData.getStateData());
|
assertNull(atStatesData.getStateData());
|
||||||
System.out.println("Block 499 AT state data contains only state hash as expected.");
|
|
||||||
|
|
||||||
// ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range
|
// ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range
|
||||||
System.out.println("Verifying block 500 AT state data again...");
|
|
||||||
block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
|
block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
|
||||||
atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
|
atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
|
||||||
assertNotNull(atStatesData.getStateHash());
|
assertNotNull(atStatesData.getStateHash());
|
||||||
assertNotNull(atStatesData.getStateData());
|
assertNotNull(atStatesData.getStateData());
|
||||||
System.out.println("Block 500 AT state data contains full data.");
|
|
||||||
|
|
||||||
// ... and block 501 should also have the full data
|
// ... and block 501 should also have the full data
|
||||||
System.out.println("Verifying block 501 AT state data...");
|
|
||||||
List<ATStateData> block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501);
|
List<ATStateData> block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501);
|
||||||
atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501);
|
atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501);
|
||||||
assertNotNull(atStatesData.getStateHash());
|
assertNotNull(atStatesData.getStateHash());
|
||||||
assertNotNull(atStatesData.getStateData());
|
assertNotNull(atStatesData.getStateData());
|
||||||
System.out.println("Block 501 AT state data contains full data.");
|
|
||||||
|
|
||||||
// Check the max archive height - this should be one less than the first untrimmed height
|
// Check the max archive height - this should be one less than the first untrimmed height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height determined (Expected 500): " + maximumArchiveHeight);
|
|
||||||
assertEquals(500, maximumArchiveHeight);
|
assertEquals(500, maximumArchiveHeight);
|
||||||
|
|
||||||
BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3);
|
BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3);
|
||||||
|
|
||||||
// Write blocks 2-500 to the archive
|
// Write blocks 2-500 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Number of blocks written to archive (Expected 499): " + writer.getWrittenCount());
|
|
||||||
assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block
|
assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
|
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
|
assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
|
||||||
System.out.println("Block archive height updated to: " + (500 - 1));
|
|
||||||
|
|
||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
// Ensure the SQL repository contains blocks 2 and 500...
|
// Ensure the SQL repository contains blocks 2 and 500...
|
||||||
System.out.println("Verifying that blocks 2 and 500 exist in the repository...");
|
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(2));
|
assertNotNull(repository.getBlockRepository().fromHeight(2));
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(500));
|
assertNotNull(repository.getBlockRepository().fromHeight(500));
|
||||||
System.out.println("Blocks 2 and 500 are present in the repository.");
|
|
||||||
|
|
||||||
// Prune all the archived blocks
|
// Prune all the archived blocks
|
||||||
System.out.println("Pruning blocks 2 to 500...");
|
|
||||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500);
|
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500);
|
||||||
System.out.println("Number of blocks pruned (Expected 499): " + numBlocksPruned);
|
|
||||||
assertEquals(500-1, numBlocksPruned);
|
assertEquals(500-1, numBlocksPruned);
|
||||||
repository.getBlockRepository().setBlockPruneHeight(501);
|
repository.getBlockRepository().setBlockPruneHeight(501);
|
||||||
|
|
||||||
// Prune the AT states for the archived blocks
|
// Prune the AT states for the archived blocks
|
||||||
System.out.println("Pruning AT states up to height 500...");
|
|
||||||
repository.getATRepository().rebuildLatestAtStates(500);
|
repository.getATRepository().rebuildLatestAtStates(500);
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500);
|
int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500);
|
||||||
System.out.println("Number of AT states pruned (Expected 498): " + numATStatesPruned);
|
|
||||||
assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
|
assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
|
||||||
repository.getATRepository().setAtPruneHeight(501);
|
repository.getATRepository().setAtPruneHeight(501);
|
||||||
|
|
||||||
// Now ensure the SQL repository is missing blocks 2 and 500...
|
// Now ensure the SQL repository is missing blocks 2 and 500...
|
||||||
System.out.println("Verifying that blocks 2 and 500 have been pruned...");
|
|
||||||
assertNull(repository.getBlockRepository().fromHeight(2));
|
assertNull(repository.getBlockRepository().fromHeight(2));
|
||||||
assertNull(repository.getBlockRepository().fromHeight(500));
|
assertNull(repository.getBlockRepository().fromHeight(500));
|
||||||
System.out.println("Blocks 2 and 500 have been successfully pruned.");
|
|
||||||
|
|
||||||
// ... but it's not missing blocks 1 and 501 (we don't prune the genesis block)
|
// ... but it's not missing blocks 1 and 501 (we don't prune the genesis block)
|
||||||
System.out.println("Verifying that blocks 1 and 501 still exist...");
|
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(1));
|
assertNotNull(repository.getBlockRepository().fromHeight(1));
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(501));
|
assertNotNull(repository.getBlockRepository().fromHeight(501));
|
||||||
System.out.println("Blocks 1 and 501 are present in the repository.");
|
|
||||||
|
|
||||||
// Validate the latest block height in the repository
|
// Validate the latest block height in the repository
|
||||||
int lastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
|
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
|
||||||
System.out.println("Latest block height in repository (Expected 1002): " + lastBlockHeight);
|
|
||||||
assertEquals(1002, lastBlockHeight);
|
|
||||||
|
|
||||||
// Now orphan some unarchived blocks.
|
// Now orphan some unarchived blocks.
|
||||||
System.out.println("Orphaning 500 blocks...");
|
|
||||||
BlockUtils.orphanBlocks(repository, 500);
|
BlockUtils.orphanBlocks(repository, 500);
|
||||||
int currentLastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
|
assertEquals(502, (int) repository.getBlockRepository().getLastBlock().getHeight());
|
||||||
System.out.println("New last block height after orphaning (Expected 502): " + currentLastBlockHeight);
|
|
||||||
assertEquals(502, currentLastBlockHeight);
|
|
||||||
|
|
||||||
// We're close to the lower limit of the SQL database now, so
|
// We're close to the lower limit of the SQL database now, so
|
||||||
// we need to import some blocks from the archive
|
// we need to import some blocks from the archive
|
||||||
System.out.println("Importing blocks 401 to 500 from the archive...");
|
|
||||||
BlockArchiveUtils.importFromArchive(401, 500, repository);
|
BlockArchiveUtils.importFromArchive(401, 500, repository);
|
||||||
|
|
||||||
// Ensure the SQL repository now contains block 401 but not 400...
|
// Ensure the SQL repository now contains block 401 but not 400...
|
||||||
System.out.println("Verifying that block 401 exists and block 400 does not...");
|
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(401));
|
assertNotNull(repository.getBlockRepository().fromHeight(401));
|
||||||
assertNull(repository.getBlockRepository().fromHeight(400));
|
assertNull(repository.getBlockRepository().fromHeight(400));
|
||||||
System.out.println("Block 401 exists, block 400 does not.");
|
|
||||||
|
|
||||||
// Import the remaining 399 blocks
|
// Import the remaining 399 blocks
|
||||||
System.out.println("Importing blocks 2 to 400 from the archive...");
|
|
||||||
BlockArchiveUtils.importFromArchive(2, 400, repository);
|
BlockArchiveUtils.importFromArchive(2, 400, repository);
|
||||||
|
|
||||||
// Verify that block 3 matches the original
|
// Verify that block 3 matches the original
|
||||||
System.out.println("Verifying that block 3 matches the original data...");
|
|
||||||
BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3);
|
BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3);
|
||||||
assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature());
|
assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature());
|
||||||
assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight());
|
assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight());
|
||||||
System.out.println("Block 3 data matches the original.");
|
|
||||||
|
|
||||||
// Orphan 1 more block, which should be the last one that is possible to be orphaned
|
// Orphan 1 more block, which should be the last one that is possible to be orphaned
|
||||||
System.out.println("Orphaning 1 more block...");
|
|
||||||
BlockUtils.orphanBlocks(repository, 1);
|
BlockUtils.orphanBlocks(repository, 1);
|
||||||
System.out.println("Orphaned 1 block successfully.");
|
|
||||||
|
|
||||||
// Orphan another block, which should fail
|
// Orphan another block, which should fail
|
||||||
System.out.println("Attempting to orphan another block, which should fail...");
|
|
||||||
Exception exception = null;
|
Exception exception = null;
|
||||||
try {
|
try {
|
||||||
BlockUtils.orphanBlocks(repository, 1);
|
BlockUtils.orphanBlocks(repository, 1);
|
||||||
} catch (DataException e) {
|
} catch (DataException e) {
|
||||||
exception = e;
|
exception = e;
|
||||||
System.out.println("Caught expected DataException: " + e.getMessage());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a DataException is thrown because there is no more AT states data available
|
// Ensure that a DataException is thrown because there is no more AT states data available
|
||||||
assertNotNull(exception);
|
assertNotNull(exception);
|
||||||
assertEquals(DataException.class, exception.getClass());
|
assertEquals(DataException.class, exception.getClass());
|
||||||
System.out.println("DataException confirmed due to lack of AT states data.");
|
|
||||||
|
|
||||||
// FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception
|
// FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception
|
||||||
// and allow orphaning back through blocks with trimmed AT states.
|
// and allow orphaning back through blocks with trimmed AT states.
|
||||||
|
|
||||||
System.out.println("testTrimArchivePruneAndOrphan completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -615,26 +482,16 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
public void testMissingAtStatesHeightIndex() throws DataException, SQLException {
|
public void testMissingAtStatesHeightIndex() throws DataException, SQLException {
|
||||||
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testMissingAtStatesHeightIndex");
|
|
||||||
|
|
||||||
// Firstly check that we're able to prune or archive when the index exists
|
// Firstly check that we're able to prune or archive when the index exists
|
||||||
System.out.println("Checking existence of ATStatesHeightIndex...");
|
|
||||||
assertTrue(repository.getATRepository().hasAtStatesHeightIndex());
|
assertTrue(repository.getATRepository().hasAtStatesHeightIndex());
|
||||||
assertTrue(RepositoryManager.canArchiveOrPrune());
|
assertTrue(RepositoryManager.canArchiveOrPrune());
|
||||||
System.out.println("ATStatesHeightIndex exists. Archiving and pruning are possible.");
|
|
||||||
|
|
||||||
// Delete the index
|
// Delete the index
|
||||||
System.out.println("Dropping ATStatesHeightIndex...");
|
|
||||||
repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute();
|
repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute();
|
||||||
System.out.println("ATStatesHeightIndex dropped.");
|
|
||||||
|
|
||||||
// Ensure check that we're unable to prune or archive when the index doesn't exist
|
// Ensure check that we're unable to prune or archive when the index doesn't exist
|
||||||
System.out.println("Verifying that ATStatesHeightIndex no longer exists...");
|
|
||||||
assertFalse(repository.getATRepository().hasAtStatesHeightIndex());
|
assertFalse(repository.getATRepository().hasAtStatesHeightIndex());
|
||||||
assertFalse(RepositoryManager.canArchiveOrPrune());
|
assertFalse(RepositoryManager.canArchiveOrPrune());
|
||||||
System.out.println("ATStatesHeightIndex does not exist. Archiving and pruning are disabled.");
|
|
||||||
|
|
||||||
System.out.println("testMissingAtStatesHeightIndex completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -644,10 +501,8 @@ public class BlockArchiveV1Tests extends Common {
|
|||||||
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
|
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
|
||||||
try {
|
try {
|
||||||
FileUtils.deleteDirectory(archivePath.toFile());
|
FileUtils.deleteDirectory(archivePath.toFile());
|
||||||
System.out.println("Deleted archive directory at: " + archivePath);
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
||||||
System.out.println("Failed to delete archive directory: " + e.getMessage());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,39 +54,26 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
public void testWriter() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testWriter() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testWriter");
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
||||||
repository.getATRepository().setAtTrimHeight(901);
|
repository.getATRepository().setAtTrimHeight(901);
|
||||||
System.out.println("Set trim heights to 901.");
|
|
||||||
|
|
||||||
// Check the max archive height - this should be one less than the first untrimmed height
|
// Check the max archive height - this should be one less than the first untrimmed height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
|
|
||||||
assertEquals(900, maximumArchiveHeight);
|
assertEquals(900, maximumArchiveHeight);
|
||||||
|
|
||||||
// Write blocks 2-900 to the archive
|
// Write blocks 2-900 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
|
|
||||||
assertEquals(900 - 1, writer.getWrittenCount());
|
assertEquals(900 - 1, writer.getWrittenCount());
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
@ -97,9 +84,6 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
System.out.println("testWriter completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,39 +91,26 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testWriterAndReader() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testWriterAndReader");
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
// 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
||||||
repository.getATRepository().setAtTrimHeight(901);
|
repository.getATRepository().setAtTrimHeight(901);
|
||||||
System.out.println("Set trim heights to 901.");
|
|
||||||
|
|
||||||
// Check the max archive height - this should be one less than the first untrimmed height
|
// Check the max archive height - this should be one less than the first untrimmed height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
|
|
||||||
assertEquals(900, maximumArchiveHeight);
|
assertEquals(900, maximumArchiveHeight);
|
||||||
|
|
||||||
// Write blocks 2-900 to the archive
|
// Write blocks 2-900 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
|
|
||||||
assertEquals(900 - 1, writer.getWrittenCount());
|
assertEquals(900 - 1, writer.getWrittenCount());
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
@ -150,10 +121,8 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
// Read block 2 from the archive
|
// Read block 2 from the archive
|
||||||
System.out.println("Reading block 2 from the archive...");
|
|
||||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||||
BlockTransformation block2Info = reader.fetchBlockAtHeight(2);
|
BlockTransformation block2Info = reader.fetchBlockAtHeight(2);
|
||||||
BlockData block2ArchiveData = block2Info.getBlockData();
|
BlockData block2ArchiveData = block2Info.getBlockData();
|
||||||
@ -162,7 +131,6 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2);
|
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2);
|
||||||
|
|
||||||
// Ensure the values match
|
// Ensure the values match
|
||||||
System.out.println("Comparing block 2 data...");
|
|
||||||
assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight());
|
assertEquals(block2ArchiveData.getHeight(), block2RepositoryData.getHeight());
|
||||||
assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature());
|
assertArrayEquals(block2ArchiveData.getSignature(), block2RepositoryData.getSignature());
|
||||||
|
|
||||||
@ -170,7 +138,6 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
assertEquals(1, block2ArchiveData.getOnlineAccountsCount());
|
assertEquals(1, block2ArchiveData.getOnlineAccountsCount());
|
||||||
|
|
||||||
// Read block 900 from the archive
|
// Read block 900 from the archive
|
||||||
System.out.println("Reading block 900 from the archive...");
|
|
||||||
BlockTransformation block900Info = reader.fetchBlockAtHeight(900);
|
BlockTransformation block900Info = reader.fetchBlockAtHeight(900);
|
||||||
BlockData block900ArchiveData = block900Info.getBlockData();
|
BlockData block900ArchiveData = block900Info.getBlockData();
|
||||||
|
|
||||||
@ -178,14 +145,12 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900);
|
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900);
|
||||||
|
|
||||||
// Ensure the values match
|
// Ensure the values match
|
||||||
System.out.println("Comparing block 900 data...");
|
|
||||||
assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight());
|
assertEquals(block900ArchiveData.getHeight(), block900RepositoryData.getHeight());
|
||||||
assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature());
|
assertArrayEquals(block900ArchiveData.getSignature(), block900RepositoryData.getSignature());
|
||||||
|
|
||||||
// Test some values in the archive
|
// Test some values in the archive
|
||||||
assertEquals(1, block900ArchiveData.getOnlineAccountsCount());
|
assertEquals(1, block900ArchiveData.getOnlineAccountsCount());
|
||||||
|
|
||||||
System.out.println("testWriterAndReader completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,66 +158,47 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testArchivedAtStates() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testArchivedAtStates");
|
|
||||||
|
|
||||||
// Deploy an AT so that we have AT state data
|
// Deploy an AT so that we have AT state data
|
||||||
System.out.println("Deploying AT...");
|
|
||||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
||||||
byte[] creationBytes = AtUtils.buildSimpleAT();
|
byte[] creationBytes = AtUtils.buildSimpleAT();
|
||||||
long fundingAmount = 1_00000000L;
|
long fundingAmount = 1_00000000L;
|
||||||
DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
DeployAtTransaction deployAtTransaction = AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
||||||
String atAddress = deployAtTransaction.getATAccount().getAddress();
|
String atAddress = deployAtTransaction.getATAccount().getAddress();
|
||||||
System.out.println("AT deployed at address: " + atAddress);
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// 9 blocks are trimmed (this specifies the first untrimmed height)
|
// 9 blocks are trimmed (this specifies the first untrimmed height)
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(10);
|
||||||
repository.getATRepository().setAtTrimHeight(10);
|
repository.getATRepository().setAtTrimHeight(10);
|
||||||
System.out.println("Set trim heights to 10.");
|
|
||||||
|
|
||||||
// Check the max archive height
|
// Check the max archive height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height (Expected 9): " + maximumArchiveHeight);
|
|
||||||
assertEquals(9, maximumArchiveHeight);
|
assertEquals(9, maximumArchiveHeight);
|
||||||
|
|
||||||
// Write blocks 2-9 to the archive
|
// Write blocks 2-9 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 8)");
|
|
||||||
assertEquals(9 - 1, writer.getWrittenCount());
|
assertEquals(9 - 1, writer.getWrittenCount());
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
|
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
assertEquals(9 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
|
assertEquals(9 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
|
||||||
System.out.println("Block archive height updated to: " + (9 - 1));
|
|
||||||
|
|
||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
// Check blocks 3-9
|
// Check blocks 3-9
|
||||||
System.out.println("Checking blocks 2 to 9...");
|
|
||||||
for (Integer testHeight = 2; testHeight <= 9; testHeight++) {
|
for (Integer testHeight = 2; testHeight <= 9; testHeight++) {
|
||||||
|
|
||||||
System.out.println("Reading block " + testHeight + " from the archive...");
|
|
||||||
// Read a block from the archive
|
// Read a block from the archive
|
||||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||||
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight);
|
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight);
|
||||||
@ -270,18 +216,15 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
|
|
||||||
// Check the archived AT state
|
// Check the archived AT state
|
||||||
if (testHeight == 2) {
|
if (testHeight == 2) {
|
||||||
System.out.println("Checking block " + testHeight + " AT state data (expected transactions)...");
|
|
||||||
assertEquals(1, archivedTransactions.size());
|
assertEquals(1, archivedTransactions.size());
|
||||||
assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType());
|
assertEquals(Transaction.TransactionType.DEPLOY_AT, archivedTransactions.get(0).getType());
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
System.out.println("Checking block " + testHeight + " AT state data (no transactions expected)...");
|
|
||||||
// Blocks 3+ shouldn't have any transactions
|
// Blocks 3+ shouldn't have any transactions
|
||||||
assertTrue(archivedTransactions.isEmpty());
|
assertTrue(archivedTransactions.isEmpty());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the archive has the AT states hash
|
// Ensure the archive has the AT states hash
|
||||||
System.out.println("Checking block " + testHeight + " AT states hash...");
|
|
||||||
assertNotNull(archivedAtStateHash);
|
assertNotNull(archivedAtStateHash);
|
||||||
|
|
||||||
// Also check the online accounts count and height
|
// Also check the online accounts count and height
|
||||||
@ -289,7 +232,6 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
assertEquals(testHeight, archivedBlockData.getHeight());
|
assertEquals(testHeight, archivedBlockData.getHeight());
|
||||||
|
|
||||||
// Ensure the values match
|
// Ensure the values match
|
||||||
System.out.println("Comparing block " + testHeight + " data...");
|
|
||||||
assertEquals(archivedBlockData.getHeight(), repositoryBlockData.getHeight());
|
assertEquals(archivedBlockData.getHeight(), repositoryBlockData.getHeight());
|
||||||
assertArrayEquals(archivedBlockData.getSignature(), repositoryBlockData.getSignature());
|
assertArrayEquals(archivedBlockData.getSignature(), repositoryBlockData.getSignature());
|
||||||
assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount());
|
assertEquals(archivedBlockData.getOnlineAccountsCount(), repositoryBlockData.getOnlineAccountsCount());
|
||||||
@ -307,12 +249,10 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check block 10 (unarchived)
|
// Check block 10 (unarchived)
|
||||||
System.out.println("Checking block 10 (should not be in archive)...");
|
|
||||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||||
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10);
|
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10);
|
||||||
assertNull(blockInfo);
|
assertNull(blockInfo);
|
||||||
|
|
||||||
System.out.println("testArchivedAtStates completed successfully.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -321,47 +261,32 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testArchiveAndPrune() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testArchiveAndPrune");
|
|
||||||
|
|
||||||
// Deploy an AT so that we have AT state data
|
// Deploy an AT so that we have AT state data
|
||||||
System.out.println("Deploying AT...");
|
|
||||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
||||||
byte[] creationBytes = AtUtils.buildSimpleAT();
|
byte[] creationBytes = AtUtils.buildSimpleAT();
|
||||||
long fundingAmount = 1_00000000L;
|
long fundingAmount = 1_00000000L;
|
||||||
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
||||||
System.out.println("AT deployed successfully.");
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
|
// Assume 900 blocks are trimmed (this specifies the first untrimmed height)
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(901);
|
||||||
repository.getATRepository().setAtTrimHeight(901);
|
repository.getATRepository().setAtTrimHeight(901);
|
||||||
System.out.println("Set trim heights to 901.");
|
|
||||||
|
|
||||||
// Check the max archive height - this should be one less than the first untrimmed height
|
// Check the max archive height - this should be one less than the first untrimmed height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height (Expected 900): " + maximumArchiveHeight);
|
|
||||||
assertEquals(900, maximumArchiveHeight);
|
assertEquals(900, maximumArchiveHeight);
|
||||||
|
|
||||||
// Write blocks 2-900 to the archive
|
// Write blocks 2-900 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Archive contains " + writer.getWrittenCount() + " blocks. (Expected 899)");
|
|
||||||
assertEquals(900 - 1, writer.getWrittenCount());
|
assertEquals(900 - 1, writer.getWrittenCount());
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
@ -372,48 +297,34 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
// Ensure the SQL repository contains blocks 2 and 900...
|
// Ensure the SQL repository contains blocks 2 and 900...
|
||||||
System.out.println("Verifying that blocks 2 and 900 exist in the repository...");
|
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(2));
|
assertNotNull(repository.getBlockRepository().fromHeight(2));
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(900));
|
assertNotNull(repository.getBlockRepository().fromHeight(900));
|
||||||
System.out.println("Blocks 2 and 900 are present in the repository.");
|
|
||||||
|
|
||||||
// Prune all the archived blocks
|
// Prune all the archived blocks
|
||||||
System.out.println("Pruning blocks 2 to 900...");
|
|
||||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900);
|
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 900);
|
||||||
System.out.println("Number of blocks pruned (Expected 899): " + numBlocksPruned);
|
|
||||||
assertEquals(900-1, numBlocksPruned);
|
assertEquals(900-1, numBlocksPruned);
|
||||||
repository.getBlockRepository().setBlockPruneHeight(901);
|
repository.getBlockRepository().setBlockPruneHeight(901);
|
||||||
|
|
||||||
// Prune the AT states for the archived blocks
|
// Prune the AT states for the archived blocks
|
||||||
System.out.println("Pruning AT states up to height 900...");
|
|
||||||
repository.getATRepository().rebuildLatestAtStates(900);
|
repository.getATRepository().rebuildLatestAtStates(900);
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900);
|
int numATStatesPruned = repository.getATRepository().pruneAtStates(0, 900);
|
||||||
System.out.println("Number of AT states pruned (Expected 898): " + numATStatesPruned);
|
|
||||||
assertEquals(900-2, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
|
assertEquals(900-2, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
|
||||||
repository.getATRepository().setAtPruneHeight(901);
|
repository.getATRepository().setAtPruneHeight(901);
|
||||||
|
|
||||||
// Now ensure the SQL repository is missing blocks 2 and 900...
|
// Now ensure the SQL repository is missing blocks 2 and 900...
|
||||||
System.out.println("Verifying that blocks 2 and 900 have been pruned...");
|
|
||||||
assertNull(repository.getBlockRepository().fromHeight(2));
|
assertNull(repository.getBlockRepository().fromHeight(2));
|
||||||
assertNull(repository.getBlockRepository().fromHeight(900));
|
assertNull(repository.getBlockRepository().fromHeight(900));
|
||||||
System.out.println("Blocks 2 and 900 have been successfully pruned.");
|
|
||||||
|
|
||||||
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
|
// ... but it's not missing blocks 1 and 901 (we don't prune the genesis block)
|
||||||
System.out.println("Verifying that blocks 1 and 901 still exist...");
|
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(1));
|
assertNotNull(repository.getBlockRepository().fromHeight(1));
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(901));
|
assertNotNull(repository.getBlockRepository().fromHeight(901));
|
||||||
System.out.println("Blocks 1 and 901 are present in the repository.");
|
|
||||||
|
|
||||||
// Validate the latest block height in the repository
|
// Validate the latest block height in the repository
|
||||||
int lastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
|
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
|
||||||
System.out.println("Latest block height in repository (Expected 1002): " + lastBlockHeight);
|
|
||||||
assertEquals(1002, lastBlockHeight);
|
|
||||||
|
|
||||||
System.out.println("testArchiveAndPrune completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -421,191 +332,138 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException {
|
public void testTrimArchivePruneAndOrphan() throws DataException, InterruptedException, TransformationException, IOException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testTrimArchivePruneAndOrphan");
|
|
||||||
|
|
||||||
// Deploy an AT so that we have AT state data
|
// Deploy an AT so that we have AT state data
|
||||||
System.out.println("Deploying AT...");
|
|
||||||
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
PrivateKeyAccount deployer = Common.getTestAccount(repository, "alice");
|
||||||
byte[] creationBytes = AtUtils.buildSimpleAT();
|
byte[] creationBytes = AtUtils.buildSimpleAT();
|
||||||
long fundingAmount = 1_00000000L;
|
long fundingAmount = 1_00000000L;
|
||||||
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
AtUtils.doDeployAT(repository, deployer, creationBytes, fundingAmount);
|
||||||
System.out.println("AT deployed successfully.");
|
|
||||||
|
|
||||||
// Mint some blocks so that we are able to archive them later
|
// Mint some blocks so that we are able to archive them later
|
||||||
System.out.println("Minting 1000 blocks...");
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
BlockMinter.mintTestingBlock(repository, Common.getTestAccount(repository, "alice-reward-share"));
|
||||||
// Log every 100 blocks
|
|
||||||
if ((i + 1) % 100 == 0) {
|
|
||||||
System.out.println("Minted block " + (i + 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Finished minting blocks.");
|
|
||||||
|
|
||||||
// Make sure that block 500 has full AT state data and data hash
|
// Make sure that block 500 has full AT state data and data hash
|
||||||
System.out.println("Verifying block 500 AT state data...");
|
|
||||||
List<ATStateData> block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
|
List<ATStateData> block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
|
||||||
ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
|
ATStateData atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
|
||||||
assertNotNull(atStatesData.getStateHash());
|
assertNotNull(atStatesData.getStateHash());
|
||||||
assertNotNull(atStatesData.getStateData());
|
assertNotNull(atStatesData.getStateData());
|
||||||
System.out.println("Block 500 AT state data verified.");
|
|
||||||
|
|
||||||
// Trim the first 500 blocks
|
// Trim the first 500 blocks
|
||||||
System.out.println("Trimming first 500 blocks...");
|
|
||||||
repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500);
|
repository.getBlockRepository().trimOldOnlineAccountsSignatures(0, 500);
|
||||||
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501);
|
repository.getBlockRepository().setOnlineAccountsSignaturesTrimHeight(501);
|
||||||
repository.getATRepository().rebuildLatestAtStates(500);
|
repository.getATRepository().rebuildLatestAtStates(500);
|
||||||
repository.getATRepository().trimAtStates(0, 500, 1000);
|
repository.getATRepository().trimAtStates(0, 500, 1000);
|
||||||
repository.getATRepository().setAtTrimHeight(501);
|
repository.getATRepository().setAtTrimHeight(501);
|
||||||
System.out.println("Trimming completed.");
|
|
||||||
|
|
||||||
// Now block 499 should only have the AT state data hash
|
// Now block 499 should only have the AT state data hash
|
||||||
System.out.println("Checking block 499 AT state data...");
|
|
||||||
List<ATStateData> block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499);
|
List<ATStateData> block499AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(499);
|
||||||
atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499);
|
atStatesData = repository.getATRepository().getATStateAtHeight(block499AtStatesData.get(0).getATAddress(), 499);
|
||||||
assertNotNull(atStatesData.getStateHash());
|
assertNotNull(atStatesData.getStateHash());
|
||||||
assertNull(atStatesData.getStateData());
|
assertNull(atStatesData.getStateData());
|
||||||
System.out.println("Block 499 AT state data contains only state hash as expected.");
|
|
||||||
|
|
||||||
// ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range
|
// ... but block 500 should have the full data (due to being retained as the "latest" AT state in the trimmed range
|
||||||
System.out.println("Verifying block 500 AT state data again...");
|
|
||||||
block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
|
block500AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(500);
|
||||||
atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
|
atStatesData = repository.getATRepository().getATStateAtHeight(block500AtStatesData.get(0).getATAddress(), 500);
|
||||||
assertNotNull(atStatesData.getStateHash());
|
assertNotNull(atStatesData.getStateHash());
|
||||||
assertNotNull(atStatesData.getStateData());
|
assertNotNull(atStatesData.getStateData());
|
||||||
System.out.println("Block 500 AT state data contains full data.");
|
|
||||||
|
|
||||||
// ... and block 501 should also have the full data
|
// ... and block 501 should also have the full data
|
||||||
System.out.println("Verifying block 501 AT state data...");
|
|
||||||
List<ATStateData> block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501);
|
List<ATStateData> block501AtStatesData = repository.getATRepository().getBlockATStatesAtHeight(501);
|
||||||
atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501);
|
atStatesData = repository.getATRepository().getATStateAtHeight(block501AtStatesData.get(0).getATAddress(), 501);
|
||||||
assertNotNull(atStatesData.getStateHash());
|
assertNotNull(atStatesData.getStateHash());
|
||||||
assertNotNull(atStatesData.getStateData());
|
assertNotNull(atStatesData.getStateData());
|
||||||
System.out.println("Block 501 AT state data contains full data.");
|
|
||||||
|
|
||||||
// Check the max archive height - this should be one less than the first untrimmed height
|
// Check the max archive height - this should be one less than the first untrimmed height
|
||||||
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
final int maximumArchiveHeight = BlockArchiveWriter.getMaxArchiveHeight(repository);
|
||||||
System.out.println("Maximum archive height determined (Expected 500): " + maximumArchiveHeight);
|
|
||||||
assertEquals(500, maximumArchiveHeight);
|
assertEquals(500, maximumArchiveHeight);
|
||||||
|
|
||||||
BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3);
|
BlockData block3DataPreArchive = repository.getBlockRepository().fromHeight(3);
|
||||||
|
|
||||||
// Write blocks 2-500 to the archive
|
// Write blocks 2-500 to the archive
|
||||||
System.out.println("Writing blocks 2 to " + maximumArchiveHeight + " to the archive...");
|
|
||||||
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
BlockArchiveWriter writer = new BlockArchiveWriter(0, maximumArchiveHeight, repository);
|
||||||
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
writer.setShouldEnforceFileSizeTarget(false); // To avoid the need to pre-calculate file sizes
|
||||||
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
BlockArchiveWriter.BlockArchiveWriteResult result = writer.write();
|
||||||
System.out.println("Finished writing blocks to archive. Result: " + result);
|
|
||||||
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
assertEquals(BlockArchiveWriter.BlockArchiveWriteResult.OK, result);
|
||||||
|
|
||||||
// Make sure that the archive contains the correct number of blocks
|
// Make sure that the archive contains the correct number of blocks
|
||||||
System.out.println("Number of blocks written to archive (Expected 499): " + writer.getWrittenCount());
|
|
||||||
assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block
|
assertEquals(500 - 1, writer.getWrittenCount()); // -1 for the genesis block
|
||||||
|
|
||||||
// Increment block archive height
|
// Increment block archive height
|
||||||
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
|
repository.getBlockArchiveRepository().setBlockArchiveHeight(writer.getWrittenCount());
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
|
assertEquals(500 - 1, repository.getBlockArchiveRepository().getBlockArchiveHeight());
|
||||||
System.out.println("Block archive height updated to: " + (500 - 1));
|
|
||||||
|
|
||||||
// Ensure the file exists
|
// Ensure the file exists
|
||||||
File outputFile = writer.getOutputPath().toFile();
|
File outputFile = writer.getOutputPath().toFile();
|
||||||
assertTrue(outputFile.exists());
|
assertTrue(outputFile.exists());
|
||||||
System.out.println("Archive file exists at: " + outputFile.getAbsolutePath());
|
|
||||||
|
|
||||||
// Ensure the SQL repository contains blocks 2 and 500...
|
// Ensure the SQL repository contains blocks 2 and 500...
|
||||||
System.out.println("Verifying that blocks 2 and 500 exist in the repository...");
|
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(2));
|
assertNotNull(repository.getBlockRepository().fromHeight(2));
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(500));
|
assertNotNull(repository.getBlockRepository().fromHeight(500));
|
||||||
System.out.println("Blocks 2 and 500 are present in the repository.");
|
|
||||||
|
|
||||||
// Prune all the archived blocks
|
// Prune all the archived blocks
|
||||||
System.out.println("Pruning blocks 2 to 500...");
|
|
||||||
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500);
|
int numBlocksPruned = repository.getBlockRepository().pruneBlocks(0, 500);
|
||||||
System.out.println("Number of blocks pruned (Expected 499): " + numBlocksPruned);
|
|
||||||
assertEquals(500-1, numBlocksPruned);
|
assertEquals(500-1, numBlocksPruned);
|
||||||
repository.getBlockRepository().setBlockPruneHeight(501);
|
repository.getBlockRepository().setBlockPruneHeight(501);
|
||||||
|
|
||||||
// Prune the AT states for the archived blocks
|
// Prune the AT states for the archived blocks
|
||||||
System.out.println("Pruning AT states up to height 500...");
|
|
||||||
repository.getATRepository().rebuildLatestAtStates(500);
|
repository.getATRepository().rebuildLatestAtStates(500);
|
||||||
repository.saveChanges();
|
repository.saveChanges();
|
||||||
int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500);
|
int numATStatesPruned = repository.getATRepository().pruneAtStates(2, 500);
|
||||||
System.out.println("Number of AT states pruned (Expected 498): " + numATStatesPruned);
|
|
||||||
assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
|
assertEquals(498, numATStatesPruned); // Minus 1 for genesis block, and another for the latest AT state
|
||||||
repository.getATRepository().setAtPruneHeight(501);
|
repository.getATRepository().setAtPruneHeight(501);
|
||||||
|
|
||||||
// Now ensure the SQL repository is missing blocks 2 and 500...
|
// Now ensure the SQL repository is missing blocks 2 and 500...
|
||||||
System.out.println("Verifying that blocks 2 and 500 have been pruned...");
|
|
||||||
assertNull(repository.getBlockRepository().fromHeight(2));
|
assertNull(repository.getBlockRepository().fromHeight(2));
|
||||||
assertNull(repository.getBlockRepository().fromHeight(500));
|
assertNull(repository.getBlockRepository().fromHeight(500));
|
||||||
System.out.println("Blocks 2 and 500 have been successfully pruned.");
|
|
||||||
|
|
||||||
// ... but it's not missing blocks 1 and 501 (we don't prune the genesis block)
|
// ... but it's not missing blocks 1 and 501 (we don't prune the genesis block)
|
||||||
System.out.println("Verifying that blocks 1 and 501 still exist...");
|
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(1));
|
assertNotNull(repository.getBlockRepository().fromHeight(1));
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(501));
|
assertNotNull(repository.getBlockRepository().fromHeight(501));
|
||||||
System.out.println("Blocks 1 and 501 are present in the repository.");
|
|
||||||
|
|
||||||
// Validate the latest block height in the repository
|
// Validate the latest block height in the repository
|
||||||
int lastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
|
assertEquals(1002, (int) repository.getBlockRepository().getLastBlock().getHeight());
|
||||||
System.out.println("Latest block height in repository (Expected 1002): " + lastBlockHeight);
|
|
||||||
assertEquals(1002, lastBlockHeight);
|
|
||||||
|
|
||||||
// Now orphan some unarchived blocks.
|
// Now orphan some unarchived blocks.
|
||||||
System.out.println("Orphaning 500 blocks...");
|
|
||||||
BlockUtils.orphanBlocks(repository, 500);
|
BlockUtils.orphanBlocks(repository, 500);
|
||||||
int currentLastBlockHeight = repository.getBlockRepository().getLastBlock().getHeight();
|
assertEquals(502, (int) repository.getBlockRepository().getLastBlock().getHeight());
|
||||||
System.out.println("New last block height after orphaning (Expected 502): " + currentLastBlockHeight);
|
|
||||||
assertEquals(502, currentLastBlockHeight);
|
|
||||||
|
|
||||||
// We're close to the lower limit of the SQL database now, so
|
// We're close to the lower limit of the SQL database now, so
|
||||||
// we need to import some blocks from the archive
|
// we need to import some blocks from the archive
|
||||||
System.out.println("Importing blocks 401 to 500 from the archive...");
|
|
||||||
BlockArchiveUtils.importFromArchive(401, 500, repository);
|
BlockArchiveUtils.importFromArchive(401, 500, repository);
|
||||||
|
|
||||||
// Ensure the SQL repository now contains block 401 but not 400...
|
// Ensure the SQL repository now contains block 401 but not 400...
|
||||||
System.out.println("Verifying that block 401 exists and block 400 does not...");
|
|
||||||
assertNotNull(repository.getBlockRepository().fromHeight(401));
|
assertNotNull(repository.getBlockRepository().fromHeight(401));
|
||||||
assertNull(repository.getBlockRepository().fromHeight(400));
|
assertNull(repository.getBlockRepository().fromHeight(400));
|
||||||
System.out.println("Block 401 exists, block 400 does not.");
|
|
||||||
|
|
||||||
// Import the remaining 399 blocks
|
// Import the remaining 399 blocks
|
||||||
System.out.println("Importing blocks 2 to 400 from the archive...");
|
|
||||||
BlockArchiveUtils.importFromArchive(2, 400, repository);
|
BlockArchiveUtils.importFromArchive(2, 400, repository);
|
||||||
|
|
||||||
// Verify that block 3 matches the original
|
// Verify that block 3 matches the original
|
||||||
System.out.println("Verifying that block 3 matches the original data...");
|
|
||||||
BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3);
|
BlockData block3DataPostArchive = repository.getBlockRepository().fromHeight(3);
|
||||||
assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature());
|
assertArrayEquals(block3DataPreArchive.getSignature(), block3DataPostArchive.getSignature());
|
||||||
assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight());
|
assertEquals(block3DataPreArchive.getHeight(), block3DataPostArchive.getHeight());
|
||||||
System.out.println("Block 3 data matches the original.");
|
|
||||||
|
|
||||||
// Orphan 2 more block, which should be the last one that is possible to be orphaned
|
// Orphan 2 more block, which should be the last one that is possible to be orphaned
|
||||||
// TODO: figure out why this is 1 block more than in the equivalent block archive V1 test
|
// TODO: figure out why this is 1 block more than in the equivalent block archive V1 test
|
||||||
System.out.println("Orphaning 2 more blocks...");
|
|
||||||
BlockUtils.orphanBlocks(repository, 2);
|
BlockUtils.orphanBlocks(repository, 2);
|
||||||
System.out.println("Orphaned 2 blocks successfully.");
|
|
||||||
|
|
||||||
// Orphan another block, which should fail
|
// Orphan another block, which should fail
|
||||||
System.out.println("Attempting to orphan another block, which should fail...");
|
|
||||||
Exception exception = null;
|
Exception exception = null;
|
||||||
try {
|
try {
|
||||||
BlockUtils.orphanBlocks(repository, 1);
|
BlockUtils.orphanBlocks(repository, 1);
|
||||||
} catch (DataException e) {
|
} catch (DataException e) {
|
||||||
exception = e;
|
exception = e;
|
||||||
System.out.println("Caught expected DataException: " + e.getMessage());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a DataException is thrown because there is no more AT states data available
|
// Ensure that a DataException is thrown because there is no more AT states data available
|
||||||
assertNotNull(exception);
|
assertNotNull(exception);
|
||||||
assertEquals(DataException.class, exception.getClass());
|
assertEquals(DataException.class, exception.getClass());
|
||||||
System.out.println("DataException confirmed due to lack of AT states data.");
|
|
||||||
|
|
||||||
// FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception
|
// FUTURE: we may be able to retain unique AT states when trimming, to avoid this exception
|
||||||
// and allow orphaning back through blocks with trimmed AT states.
|
// and allow orphaning back through blocks with trimmed AT states.
|
||||||
|
|
||||||
System.out.println("testTrimArchivePruneAndOrphan completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -619,26 +477,16 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
public void testMissingAtStatesHeightIndex() throws DataException, SQLException {
|
public void testMissingAtStatesHeightIndex() throws DataException, SQLException {
|
||||||
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
try (final HSQLDBRepository repository = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
||||||
|
|
||||||
System.out.println("Starting testMissingAtStatesHeightIndex");
|
|
||||||
|
|
||||||
// Firstly check that we're able to prune or archive when the index exists
|
// Firstly check that we're able to prune or archive when the index exists
|
||||||
System.out.println("Checking existence of ATStatesHeightIndex...");
|
|
||||||
assertTrue(repository.getATRepository().hasAtStatesHeightIndex());
|
assertTrue(repository.getATRepository().hasAtStatesHeightIndex());
|
||||||
assertTrue(RepositoryManager.canArchiveOrPrune());
|
assertTrue(RepositoryManager.canArchiveOrPrune());
|
||||||
System.out.println("ATStatesHeightIndex exists. Archiving and pruning are possible.");
|
|
||||||
|
|
||||||
// Delete the index
|
// Delete the index
|
||||||
System.out.println("Dropping ATStatesHeightIndex...");
|
|
||||||
repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute();
|
repository.prepareStatement("DROP INDEX ATSTATESHEIGHTINDEX").execute();
|
||||||
System.out.println("ATStatesHeightIndex dropped.");
|
|
||||||
|
|
||||||
// Ensure check that we're unable to prune or archive when the index doesn't exist
|
// Ensure check that we're unable to prune or archive when the index doesn't exist
|
||||||
System.out.println("Verifying that ATStatesHeightIndex no longer exists...");
|
|
||||||
assertFalse(repository.getATRepository().hasAtStatesHeightIndex());
|
assertFalse(repository.getATRepository().hasAtStatesHeightIndex());
|
||||||
assertFalse(RepositoryManager.canArchiveOrPrune());
|
assertFalse(RepositoryManager.canArchiveOrPrune());
|
||||||
System.out.println("ATStatesHeightIndex does not exist. Archiving and pruning are disabled.");
|
|
||||||
|
|
||||||
System.out.println("testMissingAtStatesHeightIndex completed successfully.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -648,10 +496,8 @@ public class BlockArchiveV2Tests extends Common {
|
|||||||
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
|
Path archivePath = Paths.get(Settings.getInstance().getRepositoryPath(), "archive").toAbsolutePath();
|
||||||
try {
|
try {
|
||||||
FileUtils.deleteDirectory(archivePath.toFile());
|
FileUtils.deleteDirectory(archivePath.toFile());
|
||||||
System.out.println("Deleted archive directory at: " + archivePath);
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
||||||
System.out.println("Failed to delete archive directory: " + e.getMessage());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,20 +411,13 @@ public class RepositoryTests extends Common {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Specifically test LATERAL() usage in Chat repository with hasChatReference */
|
/** Specifically test LATERAL() usage in Chat repository */
|
||||||
@Test
|
@Test
|
||||||
public void testChatLateral() {
|
public void testChatLateral() {
|
||||||
try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
try (final HSQLDBRepository hsqldb = (HSQLDBRepository) RepositoryManager.getRepository()) {
|
||||||
String address = Crypto.toAddress(new byte[32]);
|
String address = Crypto.toAddress(new byte[32]);
|
||||||
|
|
||||||
// Test without hasChatReference
|
hsqldb.getChatRepository().getActiveChats(address, ChatMessage.Encoding.BASE58);
|
||||||
hsqldb.getChatRepository().getActiveChats(address, ChatMessage.Encoding.BASE58, null);
|
|
||||||
|
|
||||||
// Test with hasChatReference = true
|
|
||||||
hsqldb.getChatRepository().getActiveChats(address, ChatMessage.Encoding.BASE58, true);
|
|
||||||
|
|
||||||
// Test with hasChatReference = false
|
|
||||||
hsqldb.getChatRepository().getActiveChats(address, ChatMessage.Encoding.BASE58, false);
|
|
||||||
} catch (DataException e) {
|
} catch (DataException e) {
|
||||||
fail("HSQLDB bug #1580");
|
fail("HSQLDB bug #1580");
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ public class TransferPrivsTests extends Common {
|
|||||||
public void testAliceIntoNewAccountTransferPrivs() throws DataException {
|
public void testAliceIntoNewAccountTransferPrivs() throws DataException {
|
||||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||||
TestAccount alice = Common.getTestAccount(repository, "alice");
|
TestAccount alice = Common.getTestAccount(repository, "alice");
|
||||||
assertTrue(alice.canMint(false));
|
assertTrue(alice.canMint());
|
||||||
|
|
||||||
PrivateKeyAccount aliceMintingAccount = Common.getTestAccount(repository, "alice-reward-share");
|
PrivateKeyAccount aliceMintingAccount = Common.getTestAccount(repository, "alice-reward-share");
|
||||||
|
|
||||||
@ -86,8 +86,8 @@ public class TransferPrivsTests extends Common {
|
|||||||
|
|
||||||
combineAccounts(repository, alice, randomAccount, aliceMintingAccount);
|
combineAccounts(repository, alice, randomAccount, aliceMintingAccount);
|
||||||
|
|
||||||
assertFalse(alice.canMint(false));
|
assertFalse(alice.canMint());
|
||||||
assertTrue(randomAccount.canMint(false));
|
assertTrue(randomAccount.canMint());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,8 +97,8 @@ public class TransferPrivsTests extends Common {
|
|||||||
TestAccount alice = Common.getTestAccount(repository, "alice");
|
TestAccount alice = Common.getTestAccount(repository, "alice");
|
||||||
TestAccount dilbert = Common.getTestAccount(repository, "dilbert");
|
TestAccount dilbert = Common.getTestAccount(repository, "dilbert");
|
||||||
|
|
||||||
assertTrue(alice.canMint(false));
|
assertTrue(alice.canMint());
|
||||||
assertTrue(dilbert.canMint(false));
|
assertTrue(dilbert.canMint());
|
||||||
|
|
||||||
// Dilbert has level, Alice does not so we need Alice to mint enough blocks to bump Dilbert's level post-combine
|
// Dilbert has level, Alice does not so we need Alice to mint enough blocks to bump Dilbert's level post-combine
|
||||||
final int expectedPostCombineLevel = dilbert.getLevel() + 1;
|
final int expectedPostCombineLevel = dilbert.getLevel() + 1;
|
||||||
@ -118,11 +118,11 @@ public class TransferPrivsTests extends Common {
|
|||||||
|
|
||||||
// Post-combine sender checks
|
// Post-combine sender checks
|
||||||
checkSenderPostTransfer(postCombineAliceData);
|
checkSenderPostTransfer(postCombineAliceData);
|
||||||
assertFalse(alice.canMint(false));
|
assertFalse(alice.canMint());
|
||||||
|
|
||||||
// Post-combine recipient checks
|
// Post-combine recipient checks
|
||||||
checkRecipientPostTransfer(preCombineAliceData, preCombineDilbertData, postCombineDilbertData, expectedPostCombineLevel);
|
checkRecipientPostTransfer(preCombineAliceData, preCombineDilbertData, postCombineDilbertData, expectedPostCombineLevel);
|
||||||
assertTrue(dilbert.canMint(false));
|
assertTrue(dilbert.canMint());
|
||||||
|
|
||||||
// Orphan previous block
|
// Orphan previous block
|
||||||
BlockUtils.orphanLastBlock(repository);
|
BlockUtils.orphanLastBlock(repository);
|
||||||
@ -130,12 +130,12 @@ public class TransferPrivsTests extends Common {
|
|||||||
// Sender checks
|
// Sender checks
|
||||||
AccountData orphanedAliceData = repository.getAccountRepository().getAccount(alice.getAddress());
|
AccountData orphanedAliceData = repository.getAccountRepository().getAccount(alice.getAddress());
|
||||||
checkAccountDataRestored("sender", preCombineAliceData, orphanedAliceData);
|
checkAccountDataRestored("sender", preCombineAliceData, orphanedAliceData);
|
||||||
assertTrue(alice.canMint(false));
|
assertTrue(alice.canMint());
|
||||||
|
|
||||||
// Recipient checks
|
// Recipient checks
|
||||||
AccountData orphanedDilbertData = repository.getAccountRepository().getAccount(dilbert.getAddress());
|
AccountData orphanedDilbertData = repository.getAccountRepository().getAccount(dilbert.getAddress());
|
||||||
checkAccountDataRestored("recipient", preCombineDilbertData, orphanedDilbertData);
|
checkAccountDataRestored("recipient", preCombineDilbertData, orphanedDilbertData);
|
||||||
assertTrue(dilbert.canMint(false));
|
assertTrue(dilbert.canMint());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,8 +145,8 @@ public class TransferPrivsTests extends Common {
|
|||||||
TestAccount alice = Common.getTestAccount(repository, "alice");
|
TestAccount alice = Common.getTestAccount(repository, "alice");
|
||||||
TestAccount dilbert = Common.getTestAccount(repository, "dilbert");
|
TestAccount dilbert = Common.getTestAccount(repository, "dilbert");
|
||||||
|
|
||||||
assertTrue(dilbert.canMint(false));
|
assertTrue(dilbert.canMint());
|
||||||
assertTrue(alice.canMint(false));
|
assertTrue(alice.canMint());
|
||||||
|
|
||||||
// Dilbert has level, Alice does not so we need Alice to mint enough blocks to surpass Dilbert's level post-combine
|
// Dilbert has level, Alice does not so we need Alice to mint enough blocks to surpass Dilbert's level post-combine
|
||||||
final int expectedPostCombineLevel = dilbert.getLevel() + 1;
|
final int expectedPostCombineLevel = dilbert.getLevel() + 1;
|
||||||
@ -166,11 +166,11 @@ public class TransferPrivsTests extends Common {
|
|||||||
|
|
||||||
// Post-combine sender checks
|
// Post-combine sender checks
|
||||||
checkSenderPostTransfer(postCombineDilbertData);
|
checkSenderPostTransfer(postCombineDilbertData);
|
||||||
assertFalse(dilbert.canMint(false));
|
assertFalse(dilbert.canMint());
|
||||||
|
|
||||||
// Post-combine recipient checks
|
// Post-combine recipient checks
|
||||||
checkRecipientPostTransfer(preCombineDilbertData, preCombineAliceData, postCombineAliceData, expectedPostCombineLevel);
|
checkRecipientPostTransfer(preCombineDilbertData, preCombineAliceData, postCombineAliceData, expectedPostCombineLevel);
|
||||||
assertTrue(alice.canMint(false));
|
assertTrue(alice.canMint());
|
||||||
|
|
||||||
// Orphan previous block
|
// Orphan previous block
|
||||||
BlockUtils.orphanLastBlock(repository);
|
BlockUtils.orphanLastBlock(repository);
|
||||||
@ -178,12 +178,12 @@ public class TransferPrivsTests extends Common {
|
|||||||
// Sender checks
|
// Sender checks
|
||||||
AccountData orphanedDilbertData = repository.getAccountRepository().getAccount(dilbert.getAddress());
|
AccountData orphanedDilbertData = repository.getAccountRepository().getAccount(dilbert.getAddress());
|
||||||
checkAccountDataRestored("sender", preCombineDilbertData, orphanedDilbertData);
|
checkAccountDataRestored("sender", preCombineDilbertData, orphanedDilbertData);
|
||||||
assertTrue(dilbert.canMint(false));
|
assertTrue(dilbert.canMint());
|
||||||
|
|
||||||
// Recipient checks
|
// Recipient checks
|
||||||
AccountData orphanedAliceData = repository.getAccountRepository().getAccount(alice.getAddress());
|
AccountData orphanedAliceData = repository.getAccountRepository().getAccount(alice.getAddress());
|
||||||
checkAccountDataRestored("recipient", preCombineAliceData, orphanedAliceData);
|
checkAccountDataRestored("recipient", preCombineAliceData, orphanedAliceData);
|
||||||
assertTrue(alice.canMint(false));
|
assertTrue(alice.canMint());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,8 +202,8 @@ public class TransferPrivsTests extends Common {
|
|||||||
TestAccount chloe = Common.getTestAccount(repository, "chloe");
|
TestAccount chloe = Common.getTestAccount(repository, "chloe");
|
||||||
TestAccount dilbert = Common.getTestAccount(repository, "dilbert");
|
TestAccount dilbert = Common.getTestAccount(repository, "dilbert");
|
||||||
|
|
||||||
assertTrue(dilbert.canMint(false));
|
assertTrue(dilbert.canMint());
|
||||||
assertFalse(chloe.canMint(false));
|
assertFalse(chloe.canMint());
|
||||||
|
|
||||||
// COMBINE DILBERT INTO CHLOE
|
// COMBINE DILBERT INTO CHLOE
|
||||||
|
|
||||||
@ -225,16 +225,16 @@ public class TransferPrivsTests extends Common {
|
|||||||
|
|
||||||
// Post-combine sender checks
|
// Post-combine sender checks
|
||||||
checkSenderPostTransfer(post1stCombineDilbertData);
|
checkSenderPostTransfer(post1stCombineDilbertData);
|
||||||
assertFalse(dilbert.canMint(false));
|
assertFalse(dilbert.canMint());
|
||||||
|
|
||||||
// Post-combine recipient checks
|
// Post-combine recipient checks
|
||||||
checkRecipientPostTransfer(pre1stCombineDilbertData, pre1stCombineChloeData, post1stCombineChloeData, expectedPost1stCombineLevel);
|
checkRecipientPostTransfer(pre1stCombineDilbertData, pre1stCombineChloeData, post1stCombineChloeData, expectedPost1stCombineLevel);
|
||||||
assertTrue(chloe.canMint(false));
|
assertTrue(chloe.canMint());
|
||||||
|
|
||||||
// COMBINE ALICE INTO CHLOE
|
// COMBINE ALICE INTO CHLOE
|
||||||
|
|
||||||
assertTrue(alice.canMint(false));
|
assertTrue(alice.canMint());
|
||||||
assertTrue(chloe.canMint(false));
|
assertTrue(chloe.canMint());
|
||||||
|
|
||||||
// Alice needs to mint enough blocks to surpass Chloe's level post-combine
|
// Alice needs to mint enough blocks to surpass Chloe's level post-combine
|
||||||
final int expectedPost2ndCombineLevel = chloe.getLevel() + 1;
|
final int expectedPost2ndCombineLevel = chloe.getLevel() + 1;
|
||||||
@ -254,11 +254,11 @@ public class TransferPrivsTests extends Common {
|
|||||||
|
|
||||||
// Post-combine sender checks
|
// Post-combine sender checks
|
||||||
checkSenderPostTransfer(post2ndCombineAliceData);
|
checkSenderPostTransfer(post2ndCombineAliceData);
|
||||||
assertFalse(alice.canMint(false));
|
assertFalse(alice.canMint());
|
||||||
|
|
||||||
// Post-combine recipient checks
|
// Post-combine recipient checks
|
||||||
checkRecipientPostTransfer(pre2ndCombineAliceData, pre2ndCombineChloeData, post2ndCombineChloeData, expectedPost2ndCombineLevel);
|
checkRecipientPostTransfer(pre2ndCombineAliceData, pre2ndCombineChloeData, post2ndCombineChloeData, expectedPost2ndCombineLevel);
|
||||||
assertTrue(chloe.canMint(false));
|
assertTrue(chloe.canMint());
|
||||||
|
|
||||||
// Orphan 2nd combine
|
// Orphan 2nd combine
|
||||||
BlockUtils.orphanLastBlock(repository);
|
BlockUtils.orphanLastBlock(repository);
|
||||||
@ -266,12 +266,12 @@ public class TransferPrivsTests extends Common {
|
|||||||
// Sender checks
|
// Sender checks
|
||||||
AccountData orphanedAliceData = repository.getAccountRepository().getAccount(alice.getAddress());
|
AccountData orphanedAliceData = repository.getAccountRepository().getAccount(alice.getAddress());
|
||||||
checkAccountDataRestored("sender", pre2ndCombineAliceData, orphanedAliceData);
|
checkAccountDataRestored("sender", pre2ndCombineAliceData, orphanedAliceData);
|
||||||
assertTrue(alice.canMint(false));
|
assertTrue(alice.canMint());
|
||||||
|
|
||||||
// Recipient checks
|
// Recipient checks
|
||||||
AccountData orphanedChloeData = repository.getAccountRepository().getAccount(chloe.getAddress());
|
AccountData orphanedChloeData = repository.getAccountRepository().getAccount(chloe.getAddress());
|
||||||
checkAccountDataRestored("recipient", pre2ndCombineChloeData, orphanedChloeData);
|
checkAccountDataRestored("recipient", pre2ndCombineChloeData, orphanedChloeData);
|
||||||
assertTrue(chloe.canMint(false));
|
assertTrue(chloe.canMint());
|
||||||
|
|
||||||
// Orphan 1nd combine
|
// Orphan 1nd combine
|
||||||
BlockUtils.orphanToBlock(repository, pre1stCombineBlockHeight);
|
BlockUtils.orphanToBlock(repository, pre1stCombineBlockHeight);
|
||||||
@ -279,7 +279,7 @@ public class TransferPrivsTests extends Common {
|
|||||||
// Sender checks
|
// Sender checks
|
||||||
AccountData orphanedDilbertData = repository.getAccountRepository().getAccount(dilbert.getAddress());
|
AccountData orphanedDilbertData = repository.getAccountRepository().getAccount(dilbert.getAddress());
|
||||||
checkAccountDataRestored("sender", pre1stCombineDilbertData, orphanedDilbertData);
|
checkAccountDataRestored("sender", pre1stCombineDilbertData, orphanedDilbertData);
|
||||||
assertTrue(dilbert.canMint(false));
|
assertTrue(dilbert.canMint());
|
||||||
|
|
||||||
// Recipient checks
|
// Recipient checks
|
||||||
orphanedChloeData = repository.getAccountRepository().getAccount(chloe.getAddress());
|
orphanedChloeData = repository.getAccountRepository().getAccount(chloe.getAddress());
|
||||||
@ -287,7 +287,7 @@ public class TransferPrivsTests extends Common {
|
|||||||
|
|
||||||
// Chloe canMint() would return true here due to Alice-Chloe reward-share minting at top of method, so undo that minting by orphaning back to block 1
|
// Chloe canMint() would return true here due to Alice-Chloe reward-share minting at top of method, so undo that minting by orphaning back to block 1
|
||||||
BlockUtils.orphanToBlock(repository, 1);
|
BlockUtils.orphanToBlock(repository, 1);
|
||||||
assertFalse(chloe.canMint(false));
|
assertFalse(chloe.canMint());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 9999999999999,
|
"transactionV6Timestamp": 9999999999999,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -91,14 +91,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -84,7 +84,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 0,
|
"disableReferenceTimestamp": 0,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -94,14 +94,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 0,
|
"disableReferenceTimestamp": 0,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 9999999999999,
|
"arbitraryOptionalFeeTimestamp": 9999999999999,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -83,24 +83,16 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 99999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
|
||||||
"selfSponsorshipAlgoV3Height": 999999999,
|
|
||||||
"feeValidationFixTimestamp": 0,
|
"feeValidationFixTimestamp": 0,
|
||||||
"chatReferenceTimestamp": 0,
|
"chatReferenceTimestamp": 0,
|
||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
|
"selfSponsorshipAlgoV2Height": 9999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950,
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999,
|
|
||||||
"penaltyFixHeight": 5
|
"penaltyFixHeight": 5
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -86,7 +86,7 @@
|
|||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"aggregateSignatureTimestamp": 0,
|
"aggregateSignatureTimestamp": 0,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -96,14 +96,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 500,
|
"unconfirmableRewardSharesHeight": 500,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 20,
|
"selfSponsorshipAlgoV1Height": 20,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 30,
|
"selfSponsorshipAlgoV2Height": 30,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -85,7 +85,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -95,14 +95,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
@ -86,7 +86,7 @@
|
|||||||
"transactionV5Timestamp": 0,
|
"transactionV5Timestamp": 0,
|
||||||
"transactionV6Timestamp": 0,
|
"transactionV6Timestamp": 0,
|
||||||
"disableReferenceTimestamp": 9999999999999,
|
"disableReferenceTimestamp": 9999999999999,
|
||||||
"increaseOnlineAccountsDifficultyTimestamp": 9999999999990,
|
"increaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
||||||
"onlineAccountMinterLevelValidationHeight": 0,
|
"onlineAccountMinterLevelValidationHeight": 0,
|
||||||
"selfSponsorshipAlgoV1Height": 999999999,
|
"selfSponsorshipAlgoV1Height": 999999999,
|
||||||
"selfSponsorshipAlgoV2Height": 999999999,
|
"selfSponsorshipAlgoV2Height": 999999999,
|
||||||
@ -96,14 +96,7 @@
|
|||||||
"arbitraryOptionalFeeTimestamp": 0,
|
"arbitraryOptionalFeeTimestamp": 0,
|
||||||
"unconfirmableRewardSharesHeight": 99999999,
|
"unconfirmableRewardSharesHeight": 99999999,
|
||||||
"disableTransferPrivsTimestamp": 9999999999500,
|
"disableTransferPrivsTimestamp": 9999999999500,
|
||||||
"enableTransferPrivsTimestamp": 9999999999950,
|
"enableTransferPrivsTimestamp": 9999999999950
|
||||||
"cancelSellNameValidationTimestamp": 9999999999999,
|
|
||||||
"disableRewardshareHeight": 9999999999990,
|
|
||||||
"enableRewardshareHeight": 9999999999999,
|
|
||||||
"onlyMintWithNameHeight": 9999999999990,
|
|
||||||
"groupMemberCheckHeight": 9999999999999,
|
|
||||||
"decreaseOnlineAccountsDifficultyTimestamp": 9999999999999,
|
|
||||||
"removeOnlyMintWithNameHeight": 9999999999999
|
|
||||||
},
|
},
|
||||||
"genesisInfo": {
|
"genesisInfo": {
|
||||||
"version": 4,
|
"version": 4,
|
||||||
|
9
start.sh
9
start.sh
@ -33,13 +33,8 @@ fi
|
|||||||
# Limits Java JVM stack size and maximum heap usage.
|
# Limits Java JVM stack size and maximum heap usage.
|
||||||
# Comment out for bigger systems, e.g. non-routers
|
# Comment out for bigger systems, e.g. non-routers
|
||||||
# or when API documentation is enabled
|
# or when API documentation is enabled
|
||||||
# JAVA MEMORY SETTINGS BELOW - These settings are essentially optimized default settings.
|
# Uncomment (remove '#' sign) line below if your system has less than 12GB of RAM for optimal RAM defaults
|
||||||
# Combined with the latest changes on the Qortal Core in version 4.6.6 and beyond,
|
JVM_MEMORY_ARGS="-Xss256m -XX:+UseSerialGC"
|
||||||
# should give a dramatic increase In performance due to optimized Garbage Collection.
|
|
||||||
# These memory arguments should work on machines with as little as 6GB of RAM.
|
|
||||||
# If you want to run on a machine with less than 6GB of RAM, it is suggested to increase the '50' below to '75'
|
|
||||||
# The Qortal Core will utilize only as much RAM as it needs, but up-to the amount set in percentage below.
|
|
||||||
JVM_MEMORY_ARGS="-XX:MaxRAMPercentage=50 -XX:+UseG1GC -Xss1024k"
|
|
||||||
|
|
||||||
# Although java.net.preferIPv4Stack is supposed to be false
|
# Although java.net.preferIPv4Stack is supposed to be false
|
||||||
# by default in Java 11, on some platforms (e.g. FreeBSD 12),
|
# by default in Java 11, on some platforms (e.g. FreeBSD 12),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user