forked from Qortal/qortal
Merge branch 'master' into increase-online-timestamp-modulus
# Conflicts: # src/main/java/org/qortal/block/Block.java # src/main/java/org/qortal/controller/OnlineAccountsManager.java
This commit is contained in:
commit
f77093731c
@ -17,10 +17,10 @@
|
||||
<ROW Property="Manufacturer" Value="Qortal"/>
|
||||
<ROW Property="MsiLogging" MultiBuildValue="DefaultBuild:vp"/>
|
||||
<ROW Property="NTP_GOOD" Value="false"/>
|
||||
<ROW Property="ProductCode" Value="1033:{437DDDEF-AC13-4ACF-BA9A-C4611B0F19D8} 1049:{70FAE0CA-E5FD-49A7-BA6C-EF38A6E46BDA} 2052:{7BEC8D7D-E46A-4224-A030-F6CDEAF1381D} 2057:{6D2ED002-6B0B-4D4D-AB1A-8E844CD7ED04} " Type="16"/>
|
||||
<ROW Property="ProductCode" Value="1033:{B786B6C1-86FA-4917-BAF9-7C9D10959D66} 1049:{60881A63-53FC-4DBE-AF3B-0568F55D2150} 2052:{108D1268-8111-49B9-B768-CC0A0A0CEDE1} 2057:{46DB692E-D942-40D5-B32E-FB94458478BF} " Type="16"/>
|
||||
<ROW Property="ProductLanguage" Value="2057"/>
|
||||
<ROW Property="ProductName" Value="Qortal"/>
|
||||
<ROW Property="ProductVersion" Value="3.3.2" Type="32"/>
|
||||
<ROW Property="ProductVersion" Value="3.4.0" Type="32"/>
|
||||
<ROW Property="RECONFIG_NTP" Value="true"/>
|
||||
<ROW Property="REMOVE_BLOCKCHAIN" Value="YES" Type="4"/>
|
||||
<ROW Property="REPAIR_BLOCKCHAIN" Value="YES" Type="4"/>
|
||||
@ -212,7 +212,7 @@
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_71" ComponentId="{12A3ADBE-BB7A-496C-8869-410681E6232F}" Directory_="jdk.zipfs_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_71" Type="0"/>
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_8" ComponentId="{D53AD95E-CF96-4999-80FC-5812277A7456}" Directory_="java.naming_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_8" Type="0"/>
|
||||
<ROW Component="ADDITIONAL_LICENSE_INFO_9" ComponentId="{6B7EA9B0-5D17-47A8-B78C-FACE86D15E01}" Directory_="java.net.http_Dir" Attributes="0" KeyPath="ADDITIONAL_LICENSE_INFO_9" Type="0"/>
|
||||
<ROW Component="AI_CustomARPName" ComponentId="{759069A2-6CF1-45FD-B7D1-53ECC1AC8537}" Directory_="APPDIR" Attributes="260" KeyPath="DisplayName" Options="1"/>
|
||||
<ROW Component="AI_CustomARPName" ComponentId="{D57E945C-0FFB-447C-ADF7-2253CEBF4C0C}" Directory_="APPDIR" Attributes="260" KeyPath="DisplayName" Options="1"/>
|
||||
<ROW Component="AI_ExePath" ComponentId="{3644948D-AE0B-41BB-9FAF-A79E70490A08}" Directory_="APPDIR" Attributes="260" KeyPath="AI_ExePath"/>
|
||||
<ROW Component="APPDIR" ComponentId="{680DFDDE-3FB4-47A5-8FF5-934F576C6F91}" Directory_="APPDIR" Attributes="0"/>
|
||||
<ROW Component="AccessBridgeCallbacks.h" ComponentId="{288055D1-1062-47A3-AA44-5601B4E38AED}" Directory_="bridge_Dir" Attributes="0" KeyPath="AccessBridgeCallbacks.h" Type="0"/>
|
||||
|
2
pom.xml
2
pom.xml
@ -3,7 +3,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.qortal</groupId>
|
||||
<artifactId>qortal</artifactId>
|
||||
<version>3.3.2</version>
|
||||
<version>3.4.0</version>
|
||||
<packaging>jar</packaging>
|
||||
<properties>
|
||||
<skipTests>true</skipTests>
|
||||
|
@ -11,15 +11,15 @@ public class PrivateKeyAccount extends PublicKeyAccount {
|
||||
private final Ed25519PrivateKeyParameters edPrivateKeyParams;
|
||||
|
||||
/**
|
||||
* Create PrivateKeyAccount using byte[32] seed.
|
||||
* Create PrivateKeyAccount using byte[32] private key.
|
||||
*
|
||||
* @param seed
|
||||
* @param privateKey
|
||||
* byte[32] used to create private/public key pair
|
||||
* @throws IllegalArgumentException
|
||||
* if passed invalid seed
|
||||
* if passed invalid privateKey
|
||||
*/
|
||||
public PrivateKeyAccount(Repository repository, byte[] seed) {
|
||||
this(repository, new Ed25519PrivateKeyParameters(seed, 0));
|
||||
public PrivateKeyAccount(Repository repository, byte[] privateKey) {
|
||||
this(repository, new Ed25519PrivateKeyParameters(privateKey, 0));
|
||||
}
|
||||
|
||||
private PrivateKeyAccount(Repository repository, Ed25519PrivateKeyParameters edPrivateKeyParams) {
|
||||
@ -37,10 +37,6 @@ public class PrivateKeyAccount extends PublicKeyAccount {
|
||||
return this.privateKey;
|
||||
}
|
||||
|
||||
public static byte[] toPublicKey(byte[] seed) {
|
||||
return new Ed25519PrivateKeyParameters(seed, 0).generatePublicKey().getEncoded();
|
||||
}
|
||||
|
||||
public byte[] sign(byte[] message) {
|
||||
return Crypto.sign(this.edPrivateKeyParams, message);
|
||||
}
|
||||
|
@ -57,6 +57,7 @@ import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.transaction.ArbitraryTransactionTransformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.ZipUtils;
|
||||
|
||||
@Path("/arbitrary")
|
||||
@ -1099,7 +1100,8 @@ public class ArbitraryResource {
|
||||
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, error);
|
||||
}
|
||||
|
||||
if (!Controller.getInstance().isUpToDate()) {
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) {
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,7 @@ import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
@Path("/crosschain/tradebot")
|
||||
@Tag(name = "Cross-Chain (Trade-Bot)")
|
||||
@ -137,7 +138,8 @@ public class CrossChainTradeBotResource {
|
||||
if (tradeBotCreateRequest.qortAmount <= 0 || tradeBotCreateRequest.fundingQortAmount <= 0)
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ORDER_SIZE_TOO_SMALL);
|
||||
|
||||
if (!Controller.getInstance().isUpToDate())
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
@ -198,7 +200,8 @@ public class CrossChainTradeBotResource {
|
||||
if (tradeBotRespondRequest.receivingAddress == null || !Crypto.isValidAddress(tradeBotRespondRequest.receivingAddress))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS);
|
||||
|
||||
if (!Controller.getInstance().isUpToDate())
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
|
||||
|
||||
// Extract data from cross-chain trading AT
|
||||
|
@ -723,9 +723,9 @@ public class TransactionsResource {
|
||||
ApiError.BLOCKCHAIN_NEEDS_SYNC, ApiError.INVALID_SIGNATURE, ApiError.INVALID_DATA, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE
|
||||
})
|
||||
public String processTransaction(String rawBytes58) {
|
||||
// Only allow a transaction to be processed if our latest block is less than 30 minutes old
|
||||
// Only allow a transaction to be processed if our latest block is less than 60 minutes old
|
||||
// If older than this, we should first wait until the blockchain is synced
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L);
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
|
||||
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC);
|
||||
|
||||
|
@ -3,10 +3,14 @@ package org.qortal.block;
|
||||
import static java.util.Arrays.stream;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.math.RoundingMode;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.MessageFormat;
|
||||
import java.text.NumberFormat;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
@ -24,6 +28,7 @@ import org.qortal.block.BlockChain.BlockTimingByHeight;
|
||||
import org.qortal.block.BlockChain.AccountLevelShareBin;
|
||||
import org.qortal.controller.OnlineAccountsManager;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.crypto.Qortal25519Extras;
|
||||
import org.qortal.data.account.AccountBalanceData;
|
||||
import org.qortal.data.account.AccountData;
|
||||
import org.qortal.data.account.EligibleQoraHolderData;
|
||||
@ -118,6 +123,8 @@ public class Block {
|
||||
|
||||
/** Remote/imported/loaded AT states */
|
||||
protected List<ATStateData> atStates;
|
||||
/** Remote hash of AT states - in lieu of full AT state data in {@code atStates} */
|
||||
protected byte[] atStatesHash;
|
||||
/** Locally-generated AT states */
|
||||
protected List<ATStateData> ourAtStates;
|
||||
/** Locally-generated AT fees */
|
||||
@ -216,11 +223,10 @@ public class Block {
|
||||
return accountAmount;
|
||||
}
|
||||
}
|
||||
|
||||
/** Always use getExpandedAccounts() to access this, as it's lazy-instantiated. */
|
||||
private List<ExpandedAccount> cachedExpandedAccounts = null;
|
||||
|
||||
/** Opportunistic cache of this block's valid online accounts. Only created by call to isValid(). */
|
||||
private List<OnlineAccountData> cachedValidOnlineAccounts = null;
|
||||
/** Opportunistic cache of this block's valid online reward-shares. Only created by call to isValid(). */
|
||||
private List<RewardShareData> cachedOnlineRewardShares = null;
|
||||
|
||||
@ -281,6 +287,35 @@ public class Block {
|
||||
this.blockData.setTotalFees(totalFees);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs new Block using passed transaction and minimal AT state info.
|
||||
* <p>
|
||||
* This constructor typically used when receiving a serialized block over the network.
|
||||
*
|
||||
* @param repository
|
||||
* @param blockData
|
||||
* @param transactions
|
||||
* @param atStatesHash
|
||||
*/
|
||||
public Block(Repository repository, BlockData blockData, List<TransactionData> transactions, byte[] atStatesHash) {
|
||||
this(repository, blockData);
|
||||
|
||||
this.transactions = new ArrayList<>();
|
||||
|
||||
long totalFees = 0;
|
||||
|
||||
// We have to sum fees too
|
||||
for (TransactionData transactionData : transactions) {
|
||||
this.transactions.add(Transaction.fromData(repository, transactionData));
|
||||
totalFees += transactionData.getFee();
|
||||
}
|
||||
|
||||
this.atStatesHash = atStatesHash;
|
||||
totalFees += this.blockData.getATFees();
|
||||
|
||||
this.blockData.setTotalFees(totalFees);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs new Block with empty transaction list, using passed minter account.
|
||||
*
|
||||
@ -313,18 +348,21 @@ public class Block {
|
||||
int version = parentBlock.getNextBlockVersion();
|
||||
byte[] reference = parentBlockData.getSignature();
|
||||
|
||||
// Fetch our list of online accounts
|
||||
List<OnlineAccountData> onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts();
|
||||
if (onlineAccounts.isEmpty()) {
|
||||
LOGGER.error("No online accounts - not even our own?");
|
||||
// Qortal: minter is always a reward-share, so find actual minter and get their effective minting level
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey());
|
||||
if (minterLevel == 0) {
|
||||
LOGGER.error("Minter effective level returned zero?");
|
||||
return null;
|
||||
}
|
||||
|
||||
// Find newest online accounts timestamp
|
||||
long onlineAccountsTimestamp = 0;
|
||||
for (OnlineAccountData onlineAccountData : onlineAccounts) {
|
||||
if (onlineAccountData.getTimestamp() > onlineAccountsTimestamp)
|
||||
onlineAccountsTimestamp = onlineAccountData.getTimestamp();
|
||||
long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel);
|
||||
long onlineAccountsTimestamp = OnlineAccountsManager.getCurrentOnlineAccountTimestamp();
|
||||
|
||||
// Fetch our list of online accounts
|
||||
List<OnlineAccountData> onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(onlineAccountsTimestamp);
|
||||
if (onlineAccounts.isEmpty()) {
|
||||
LOGGER.error("No online accounts - not even our own?");
|
||||
return null;
|
||||
}
|
||||
|
||||
// Load sorted list of reward share public keys into memory, so that the indexes can be obtained.
|
||||
@ -335,10 +373,6 @@ public class Block {
|
||||
// Map using index into sorted list of reward-shares as key
|
||||
Map<Integer, OnlineAccountData> indexedOnlineAccounts = new HashMap<>();
|
||||
for (OnlineAccountData onlineAccountData : onlineAccounts) {
|
||||
// Disregard online accounts with different timestamps
|
||||
if (onlineAccountData.getTimestamp() != onlineAccountsTimestamp)
|
||||
continue;
|
||||
|
||||
Integer accountIndex = getRewardShareIndex(onlineAccountData.getPublicKey(), allRewardSharePublicKeys);
|
||||
if (accountIndex == null)
|
||||
// Online account (reward-share) with current timestamp but reward-share cancelled
|
||||
@ -355,26 +389,29 @@ public class Block {
|
||||
byte[] encodedOnlineAccounts = BlockTransformer.encodeOnlineAccounts(onlineAccountsSet);
|
||||
int onlineAccountsCount = onlineAccountsSet.size();
|
||||
|
||||
byte[] onlineAccountsSignatures;
|
||||
if (timestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp()) {
|
||||
// Collate all signatures
|
||||
Collection<byte[]> signaturesToAggregate = indexedOnlineAccounts.values()
|
||||
.stream()
|
||||
.map(OnlineAccountData::getSignature)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Aggregated, single signature
|
||||
onlineAccountsSignatures = Qortal25519Extras.aggregateSignatures(signaturesToAggregate);
|
||||
} else {
|
||||
// Concatenate online account timestamp signatures (in correct order)
|
||||
byte[] onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH];
|
||||
onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH];
|
||||
for (int i = 0; i < onlineAccountsCount; ++i) {
|
||||
Integer accountIndex = accountIndexes.get(i);
|
||||
OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex);
|
||||
System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH);
|
||||
}
|
||||
}
|
||||
|
||||
byte[] minterSignature = minter.sign(BlockTransformer.getBytesForMinterSignature(parentBlockData,
|
||||
minter.getPublicKey(), encodedOnlineAccounts));
|
||||
|
||||
// Qortal: minter is always a reward-share, so find actual minter and get their effective minting level
|
||||
int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey());
|
||||
if (minterLevel == 0) {
|
||||
LOGGER.error("Minter effective level returned zero?");
|
||||
return null;
|
||||
}
|
||||
|
||||
long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel);
|
||||
|
||||
int transactionCount = 0;
|
||||
byte[] transactionsSignature = null;
|
||||
int height = parentBlockData.getHeight() + 1;
|
||||
@ -979,49 +1016,59 @@ public class Block {
|
||||
if (this.blockData.getOnlineAccountsSignatures() == null || this.blockData.getOnlineAccountsSignatures().length == 0)
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MISSING;
|
||||
|
||||
if (this.blockData.getTimestamp() >= BlockChain.getInstance().getAggregateSignatureTimestamp()) {
|
||||
// We expect just the one, aggregated signature
|
||||
if (this.blockData.getOnlineAccountsSignatures().length != Transformer.SIGNATURE_LENGTH)
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED;
|
||||
} else {
|
||||
if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH)
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED;
|
||||
}
|
||||
|
||||
// Check signatures
|
||||
long onlineTimestamp = this.blockData.getOnlineAccountsTimestamp();
|
||||
byte[] onlineTimestampBytes = Longs.toByteArray(onlineTimestamp);
|
||||
|
||||
// If this block is much older than current online timestamp, then there's no point checking current online accounts
|
||||
List<OnlineAccountData> currentOnlineAccounts = onlineTimestamp < NTP.getTime() - OnlineAccountsManager.getOnlineTimestampModulus()
|
||||
? null
|
||||
: OnlineAccountsManager.getInstance().getOnlineAccounts();
|
||||
List<OnlineAccountData> latestBlocksOnlineAccounts = OnlineAccountsManager.getInstance().getLatestBlocksOnlineAccounts();
|
||||
|
||||
// Extract online accounts' timestamp signatures from block data
|
||||
// Extract online accounts' timestamp signatures from block data. Only one signature if aggregated.
|
||||
List<byte[]> onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(this.blockData.getOnlineAccountsSignatures());
|
||||
|
||||
// We'll build up a list of online accounts to hand over to Controller if block is added to chain
|
||||
// and this will become latestBlocksOnlineAccounts (above) to reduce CPU load when we process next block...
|
||||
List<OnlineAccountData> ourOnlineAccounts = new ArrayList<>();
|
||||
if (this.blockData.getTimestamp() >= BlockChain.getInstance().getAggregateSignatureTimestamp()) {
|
||||
// Aggregate all public keys
|
||||
Collection<byte[]> publicKeys = onlineRewardShares.stream()
|
||||
.map(RewardShareData::getRewardSharePublicKey)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
byte[] aggregatePublicKey = Qortal25519Extras.aggregatePublicKeys(publicKeys);
|
||||
|
||||
byte[] aggregateSignature = onlineAccountsSignatures.get(0);
|
||||
|
||||
// One-step verification of aggregate signature using aggregate public key
|
||||
if (!Qortal25519Extras.verifyAggregated(aggregatePublicKey, aggregateSignature, onlineTimestampBytes))
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT;
|
||||
} else {
|
||||
// Build block's view of online accounts
|
||||
Set<OnlineAccountData> onlineAccounts = new HashSet<>();
|
||||
for (int i = 0; i < onlineAccountsSignatures.size(); ++i) {
|
||||
byte[] signature = onlineAccountsSignatures.get(i);
|
||||
byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey();
|
||||
|
||||
OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey);
|
||||
ourOnlineAccounts.add(onlineAccountData);
|
||||
onlineAccounts.add(onlineAccountData);
|
||||
}
|
||||
|
||||
// If signature is still current then no need to perform Ed25519 verify
|
||||
if (currentOnlineAccounts != null && currentOnlineAccounts.remove(onlineAccountData))
|
||||
// remove() returned true, so online account still current
|
||||
// and one less entry in currentOnlineAccounts to check next time
|
||||
continue;
|
||||
// Remove those already validated & cached by online accounts manager - no need to re-validate them
|
||||
OnlineAccountsManager.getInstance().removeKnown(onlineAccounts, onlineTimestamp);
|
||||
|
||||
// If signature was okay in latest block then no need to perform Ed25519 verify
|
||||
if (latestBlocksOnlineAccounts != null && latestBlocksOnlineAccounts.contains(onlineAccountData))
|
||||
continue;
|
||||
|
||||
if (!Crypto.verify(publicKey, signature, onlineTimestampBytes))
|
||||
// Validate the rest
|
||||
for (OnlineAccountData onlineAccount : onlineAccounts)
|
||||
if (!Crypto.verify(onlineAccount.getPublicKey(), onlineAccount.getSignature(), onlineTimestampBytes))
|
||||
return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT;
|
||||
|
||||
// We've validated these, so allow online accounts manager to cache
|
||||
OnlineAccountsManager.getInstance().addBlocksOnlineAccounts(onlineAccounts, onlineTimestamp);
|
||||
}
|
||||
|
||||
// All online accounts valid, so save our list of online accounts for potential later use
|
||||
this.cachedValidOnlineAccounts = ourOnlineAccounts;
|
||||
this.cachedOnlineRewardShares = onlineRewardShares;
|
||||
|
||||
return ValidationResult.OK;
|
||||
@ -1194,7 +1241,7 @@ public class Block {
|
||||
*/
|
||||
private ValidationResult areAtsValid() throws DataException {
|
||||
// Locally generated AT states should be valid so no need to re-execute them
|
||||
if (this.ourAtStates == this.getATStates()) // Note object reference compare
|
||||
if (this.ourAtStates != null && this.ourAtStates == this.atStates) // Note object reference compare
|
||||
return ValidationResult.OK;
|
||||
|
||||
// Generate local AT states for comparison
|
||||
@ -1208,8 +1255,33 @@ public class Block {
|
||||
if (this.ourAtFees != this.blockData.getATFees())
|
||||
return ValidationResult.AT_STATES_MISMATCH;
|
||||
|
||||
// Note: this.atStates fully loaded thanks to this.getATStates() call above
|
||||
for (int s = 0; s < this.atStates.size(); ++s) {
|
||||
// If we have a single AT states hash then compare that in preference
|
||||
if (this.atStatesHash != null) {
|
||||
int atBytesLength = blockData.getATCount() * BlockTransformer.AT_ENTRY_LENGTH;
|
||||
ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength);
|
||||
|
||||
try {
|
||||
for (ATStateData atStateData : this.ourAtStates) {
|
||||
atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8));
|
||||
atHashBytes.write(atStateData.getStateHash());
|
||||
atHashBytes.write(Longs.toByteArray(atStateData.getFees()));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new DataException("Couldn't validate AT states hash due to serialization issue?", e);
|
||||
}
|
||||
|
||||
byte[] ourAtStatesHash = Crypto.digest(atHashBytes.toByteArray());
|
||||
if (!Arrays.equals(ourAtStatesHash, this.atStatesHash))
|
||||
return ValidationResult.AT_STATES_MISMATCH;
|
||||
|
||||
// Use our AT state data from now on
|
||||
this.atStates = this.ourAtStates;
|
||||
return ValidationResult.OK;
|
||||
}
|
||||
|
||||
// Note: this.atStates fully loaded thanks to this.getATStates() call:
|
||||
this.getATStates();
|
||||
for (int s = 0; s < this.ourAtStates.size(); ++s) {
|
||||
ATStateData ourAtState = this.ourAtStates.get(s);
|
||||
ATStateData theirAtState = this.atStates.get(s);
|
||||
|
||||
@ -1367,9 +1439,6 @@ public class Block {
|
||||
|
||||
postBlockTidy();
|
||||
|
||||
// Give Controller our cached, valid online accounts data (if any) to help reduce CPU load for next block
|
||||
OnlineAccountsManager.getInstance().pushLatestBlocksOnlineAccounts(this.cachedValidOnlineAccounts);
|
||||
|
||||
// Log some debugging info relating to the block weight calculation
|
||||
this.logDebugInfo();
|
||||
}
|
||||
@ -1585,9 +1654,6 @@ public class Block {
|
||||
this.blockData.setHeight(null);
|
||||
|
||||
postBlockTidy();
|
||||
|
||||
// Remove any cached, valid online accounts data from Controller
|
||||
OnlineAccountsManager.getInstance().popLatestBlocksOnlineAccounts();
|
||||
}
|
||||
|
||||
protected void orphanTransactionsFromBlock() throws DataException {
|
||||
|
@ -70,7 +70,9 @@ public class BlockChain {
|
||||
shareBinFix,
|
||||
calcChainWeightTimestamp,
|
||||
transactionV5Timestamp,
|
||||
transactionV6Timestamp;
|
||||
transactionV6Timestamp,
|
||||
disableReferenceTimestamp,
|
||||
aggregateSignatureTimestamp;
|
||||
}
|
||||
|
||||
// Custom transaction fees
|
||||
@ -419,6 +421,14 @@ public class BlockChain {
|
||||
return this.featureTriggers.get(FeatureTrigger.transactionV6Timestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public long getDisableReferenceTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.disableReferenceTimestamp.name()).longValue();
|
||||
}
|
||||
|
||||
public long getAggregateSignatureTimestamp() {
|
||||
return this.featureTriggers.get(FeatureTrigger.aggregateSignatureTimestamp.name()).longValue();
|
||||
}
|
||||
|
||||
// More complex getters for aspects that change by height or timestamp
|
||||
|
||||
public long getRewardAtHeight(int ourHeight) {
|
||||
|
@ -65,9 +65,8 @@ public class BlockMinter extends Thread {
|
||||
// Lite nodes do not mint
|
||||
return;
|
||||
}
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
if (Settings.getInstance().getWipeUnconfirmedOnStart()) {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Wipe existing unconfirmed transactions
|
||||
List<TransactionData> unconfirmedTransactions = repository.getTransactionRepository().getUnconfirmedTransactions();
|
||||
|
||||
@ -77,10 +76,12 @@ public class BlockMinter extends Thread {
|
||||
}
|
||||
|
||||
repository.saveChanges();
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Repository issue trying to wipe unconfirmed transactions on start-up: {}", e.getMessage());
|
||||
// Fall-through to normal behaviour in case we can recover
|
||||
}
|
||||
}
|
||||
|
||||
// Going to need this a lot...
|
||||
BlockRepository blockRepository = repository.getBlockRepository();
|
||||
BlockData previousBlockData = null;
|
||||
|
||||
// Vars to keep track of blocks that were skipped due to chain weight
|
||||
@ -94,13 +95,12 @@ public class BlockMinter extends Thread {
|
||||
boolean isMintingPossible = false;
|
||||
boolean wasMintingPossible = isMintingPossible;
|
||||
while (running) {
|
||||
repository.discardChanges(); // Free repository locks, if any
|
||||
|
||||
if (isMintingPossible != wasMintingPossible)
|
||||
Controller.getInstance().onMintingPossibleChange(isMintingPossible);
|
||||
|
||||
wasMintingPossible = isMintingPossible;
|
||||
|
||||
try {
|
||||
// Sleep for a while
|
||||
Thread.sleep(1000);
|
||||
|
||||
@ -114,10 +114,14 @@ public class BlockMinter extends Thread {
|
||||
if (minLatestBlockTimestamp == null)
|
||||
continue;
|
||||
|
||||
// No online accounts? (e.g. during startup)
|
||||
if (OnlineAccountsManager.getInstance().getOnlineAccounts().isEmpty())
|
||||
// No online accounts for current timestamp? (e.g. during startup)
|
||||
if (!OnlineAccountsManager.getInstance().hasOnlineAccounts())
|
||||
continue;
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
// Going to need this a lot...
|
||||
BlockRepository blockRepository = repository.getBlockRepository();
|
||||
|
||||
List<MintingAccountData> mintingAccountsData = repository.getAccountRepository().getMintingAccounts();
|
||||
// No minting accounts?
|
||||
if (mintingAccountsData.isEmpty())
|
||||
@ -194,6 +198,10 @@ public class BlockMinter extends Thread {
|
||||
// so go ahead and mint a block if possible.
|
||||
isMintingPossible = true;
|
||||
|
||||
// Reattach newBlocks to new repository handle
|
||||
for (Block newBlock : newBlocks)
|
||||
newBlock.setRepository(repository);
|
||||
|
||||
// Check blockchain hasn't changed
|
||||
if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) {
|
||||
previousBlockData = lastBlockData;
|
||||
@ -213,8 +221,8 @@ public class BlockMinter extends Thread {
|
||||
List<PrivateKeyAccount> newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList());
|
||||
|
||||
// We might need to sit the next block out, if one of our minting accounts signed the previous one
|
||||
final byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||
final boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||
byte[] previousBlockMinter = previousBlockData.getMinterPublicKey();
|
||||
boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter));
|
||||
if (mintedLastBlock) {
|
||||
LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one"));
|
||||
continue;
|
||||
@ -224,7 +232,7 @@ public class BlockMinter extends Thread {
|
||||
// The last iteration found a higher weight block in the network, so sleep for a while
|
||||
// to allow is to sync the higher weight chain. We are sleeping here rather than when
|
||||
// detected as we don't want to hold the blockchain lock open.
|
||||
LOGGER.debug("Sleeping for 10 seconds...");
|
||||
LOGGER.info("Sleeping for 10 seconds...");
|
||||
Thread.sleep(10 * 1000L);
|
||||
}
|
||||
|
||||
@ -276,7 +284,12 @@ public class BlockMinter extends Thread {
|
||||
continue;
|
||||
|
||||
List<Block> goodBlocks = new ArrayList<>();
|
||||
for (Block testBlock : newBlocks) {
|
||||
boolean wasInvalidBlockDiscarded = false;
|
||||
Iterator<Block> newBlocksIterator = newBlocks.iterator();
|
||||
|
||||
while (newBlocksIterator.hasNext()) {
|
||||
Block testBlock = newBlocksIterator.next();
|
||||
|
||||
// Is new block's timestamp valid yet?
|
||||
// We do a separate check as some timestamp checks are skipped for testchains
|
||||
if (testBlock.isTimestampValid() != ValidationResult.OK)
|
||||
@ -289,13 +302,21 @@ public class BlockMinter extends Thread {
|
||||
if (result != ValidationResult.OK) {
|
||||
moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name())));
|
||||
|
||||
continue;
|
||||
newBlocksIterator.remove();
|
||||
wasInvalidBlockDiscarded = true;
|
||||
/*
|
||||
* Bail out fast so that we loop around from the top again.
|
||||
* This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks,
|
||||
* via the Blocks.remint() method, which avoids having to re-process Block ATs all over again.
|
||||
* Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class).
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
goodBlocks.add(testBlock);
|
||||
}
|
||||
|
||||
if (goodBlocks.isEmpty())
|
||||
if (wasInvalidBlockDiscarded || goodBlocks.isEmpty())
|
||||
continue;
|
||||
|
||||
// Pick best block
|
||||
@ -333,16 +354,14 @@ public class BlockMinter extends Thread {
|
||||
// If less than 30 seconds has passed since first detection the higher weight chain,
|
||||
// we should skip our block submission to give us the opportunity to sync to the better chain
|
||||
if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) {
|
||||
LOGGER.debug("Higher weight chain found in peers, so not signing a block this round");
|
||||
LOGGER.debug("Time since detected: {}ms", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||
LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
|
||||
LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// More than 30 seconds have passed, so we should submit our block candidate anyway.
|
||||
LOGGER.debug("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||
LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
LOGGER.debug("No higher weight chain found in peers");
|
||||
}
|
||||
} catch (DataException e) {
|
||||
@ -356,7 +375,6 @@ public class BlockMinter extends Thread {
|
||||
parentSignatureForLastLowWeightBlock = null;
|
||||
timeOfLastLowWeightBlock = null;
|
||||
|
||||
|
||||
// Add unconfirmed transactions
|
||||
addUnconfirmedTransactions(repository, newBlock);
|
||||
|
||||
@ -421,14 +439,15 @@ public class BlockMinter extends Thread {
|
||||
Network network = Network.getInstance();
|
||||
network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData));
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn("Repository issue while running block minter", e);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// We've been interrupted - time to exit
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds unconfirmed transactions to passed block.
|
||||
@ -557,19 +576,24 @@ public class BlockMinter extends Thread {
|
||||
// This peer has common block data
|
||||
CommonBlockData commonBlockData = peer.getCommonBlockData();
|
||||
BlockSummaryData commonBlockSummaryData = commonBlockData.getCommonBlockSummary();
|
||||
if (commonBlockData.getChainWeight() != null) {
|
||||
if (commonBlockData.getChainWeight() != null && peer.getCommonBlockData().getBlockSummariesAfterCommonBlock() != null) {
|
||||
// The synchronizer has calculated this peer's chain weight
|
||||
if (!Synchronizer.getInstance().containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) {
|
||||
// .. and it doesn't hold any invalid blocks
|
||||
BigInteger ourChainWeightSinceCommonBlock = this.getOurChainWeightSinceBlock(repository, commonBlockSummaryData, commonBlockData.getBlockSummariesAfterCommonBlock());
|
||||
BigInteger ourChainWeight = ourChainWeightSinceCommonBlock.add(blockCandidateWeight);
|
||||
BigInteger peerChainWeight = commonBlockData.getChainWeight();
|
||||
if (peerChainWeight.compareTo(ourChainWeight) >= 0) {
|
||||
// This peer has a higher weight chain than ours
|
||||
LOGGER.debug("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
|
||||
LOGGER.info("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
|
||||
return true;
|
||||
|
||||
} else {
|
||||
LOGGER.debug("Peer {} is on a lower weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("Peer {} has an invalid block", peer);
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("Peer {} has no chain weight", peer);
|
||||
}
|
||||
|
@ -113,6 +113,7 @@ public class Controller extends Thread {
|
||||
private long repositoryBackupTimestamp = startTime; // ms
|
||||
private long repositoryMaintenanceTimestamp = startTime; // ms
|
||||
private long repositoryCheckpointTimestamp = startTime; // ms
|
||||
private long prunePeersTimestamp = startTime; // ms
|
||||
private long ntpCheckTimestamp = startTime; // ms
|
||||
private long deleteExpiredTimestamp = startTime + DELETE_EXPIRED_INTERVAL; // ms
|
||||
|
||||
@ -552,6 +553,7 @@ public class Controller extends Thread {
|
||||
final long repositoryBackupInterval = Settings.getInstance().getRepositoryBackupInterval();
|
||||
final long repositoryCheckpointInterval = Settings.getInstance().getRepositoryCheckpointInterval();
|
||||
long repositoryMaintenanceInterval = getRandomRepositoryMaintenanceInterval();
|
||||
final long prunePeersInterval = 5 * 60 * 1000L; // Every 5 minutes
|
||||
|
||||
// Start executor service for trimming or pruning
|
||||
PruneManager.getInstance().start();
|
||||
@ -649,11 +651,16 @@ public class Controller extends Thread {
|
||||
}
|
||||
|
||||
// Prune stuck/slow/old peers
|
||||
if (now >= prunePeersTimestamp + prunePeersInterval) {
|
||||
prunePeersTimestamp = now + prunePeersInterval;
|
||||
|
||||
try {
|
||||
LOGGER.debug("Pruning peers...");
|
||||
Network.getInstance().prunePeers();
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn(String.format("Repository issue when trying to prune peers: %s", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
// Delete expired transactions
|
||||
if (now >= deleteExpiredTimestamp) {
|
||||
@ -787,23 +794,24 @@ public class Controller extends Thread {
|
||||
String actionText;
|
||||
|
||||
// Use a more tolerant latest block timestamp in the isUpToDate() calls below to reduce misleading statuses.
|
||||
// Any block in the last 30 minutes is considered "up to date" for the purposes of displaying statuses.
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L);
|
||||
// Any block in the last 2 hours is considered "up to date" for the purposes of displaying statuses.
|
||||
// This also aligns with the time interval required for continued online account submission.
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (2 * 60 * 60 * 1000L);
|
||||
|
||||
// Only show sync percent if it's less than 100, to avoid confusion
|
||||
final Integer syncPercent = Synchronizer.getInstance().getSyncPercent();
|
||||
final boolean isSyncing = (syncPercent != null && syncPercent < 100);
|
||||
|
||||
synchronized (Synchronizer.getInstance().syncLock) {
|
||||
if (Settings.getInstance().isLite()) {
|
||||
actionText = Translator.INSTANCE.translate("SysTray", "LITE_NODE");
|
||||
SysTray.getInstance().setTrayIcon(4);
|
||||
}
|
||||
else if (this.isMintingPossible) {
|
||||
actionText = Translator.INSTANCE.translate("SysTray", "MINTING_ENABLED");
|
||||
SysTray.getInstance().setTrayIcon(2);
|
||||
}
|
||||
else if (numberOfPeers < Settings.getInstance().getMinBlockchainPeers()) {
|
||||
actionText = Translator.INSTANCE.translate("SysTray", "CONNECTING");
|
||||
SysTray.getInstance().setTrayIcon(3);
|
||||
}
|
||||
else if (!this.isUpToDate(minLatestBlockTimestamp) && Synchronizer.getInstance().isSynchronizing()) {
|
||||
else if (!this.isUpToDate(minLatestBlockTimestamp) && isSyncing) {
|
||||
actionText = String.format("%s - %d%%", Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_BLOCKCHAIN"), Synchronizer.getInstance().getSyncPercent());
|
||||
SysTray.getInstance().setTrayIcon(3);
|
||||
}
|
||||
@ -811,6 +819,10 @@ public class Controller extends Thread {
|
||||
actionText = String.format("%s", Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_BLOCKCHAIN"));
|
||||
SysTray.getInstance().setTrayIcon(3);
|
||||
}
|
||||
else if (OnlineAccountsManager.getInstance().hasOnlineAccounts()) {
|
||||
actionText = Translator.INSTANCE.translate("SysTray", "MINTING_ENABLED");
|
||||
SysTray.getInstance().setTrayIcon(2);
|
||||
}
|
||||
else {
|
||||
actionText = Translator.INSTANCE.translate("SysTray", "MINTING_DISABLED");
|
||||
SysTray.getInstance().setTrayIcon(4);
|
||||
@ -1229,6 +1241,10 @@ public class Controller extends Thread {
|
||||
OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV2Message(peer, message);
|
||||
break;
|
||||
|
||||
case GET_ONLINE_ACCOUNTS_V3:
|
||||
OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsV3Message(peer, message);
|
||||
break;
|
||||
|
||||
case GET_ARBITRARY_DATA:
|
||||
// Not currently supported
|
||||
break;
|
||||
@ -1362,6 +1378,18 @@ public class Controller extends Thread {
|
||||
|
||||
Block block = new Block(repository, blockData);
|
||||
|
||||
// V2 support
|
||||
if (peer.getPeersVersion() >= BlockV2Message.MIN_PEER_VERSION) {
|
||||
Message blockMessage = new BlockV2Message(block);
|
||||
blockMessage.setId(message.getId());
|
||||
if (!peer.sendMessage(blockMessage)) {
|
||||
peer.disconnect("failed to send block");
|
||||
// Don't fall-through to caching because failure to send might be from failure to build message
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
CachedBlockMessage blockMessage = new CachedBlockMessage(block);
|
||||
blockMessage.setId(message.getId());
|
||||
|
||||
|
@ -1,12 +1,15 @@
|
||||
package org.qortal.controller;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Longs;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.account.PublicKeyAccount;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.crypto.Qortal25519Extras;
|
||||
import org.qortal.data.account.MintingAccountData;
|
||||
import org.qortal.data.account.RewardShareData;
|
||||
import org.qortal.data.network.OnlineAccountData;
|
||||
@ -18,103 +21,63 @@ import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.NamedThreadFactory;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class OnlineAccountsManager extends Thread {
|
||||
|
||||
private class OurOnlineAccountsThread extends Thread {
|
||||
|
||||
public void run() {
|
||||
try {
|
||||
while (!isStopping) {
|
||||
Thread.sleep(10000L);
|
||||
|
||||
// Refresh our online accounts signatures?
|
||||
sendOurOnlineAccountsInfo();
|
||||
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Fall through to exit thread
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class OnlineAccountsManager {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(OnlineAccountsManager.class);
|
||||
|
||||
private static OnlineAccountsManager instance;
|
||||
// 'Current' as in 'now'
|
||||
|
||||
/**
|
||||
* How long online accounts signatures last before they expire.
|
||||
*/
|
||||
private static final long ONLINE_TIMESTAMP_MODULUS_V1 = 5 * 60 * 1000L;
|
||||
private static final long ONLINE_TIMESTAMP_MODULUS_V2 = 30 * 60 * 1000L;
|
||||
|
||||
/**
|
||||
* How many 'current' timestamp-sets of online accounts we cache.
|
||||
*/
|
||||
private static final int MAX_CACHED_TIMESTAMP_SETS = 2;
|
||||
|
||||
/**
|
||||
* How many timestamp-sets of online accounts we cache for 'latest blocks'.
|
||||
*/
|
||||
private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 3;
|
||||
|
||||
private static final long ONLINE_ACCOUNTS_QUEUE_INTERVAL = 100L; //ms
|
||||
private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms
|
||||
private static final long ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL = 60 * 1000L; // ms
|
||||
private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 15 * 1000L; // ms
|
||||
|
||||
private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; // v3.2.0
|
||||
private static final long ONLINE_ACCOUNTS_V3_PEER_VERSION = 0x03000300cbL; // v3.3.203
|
||||
|
||||
private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts"));
|
||||
private volatile boolean isStopping = false;
|
||||
|
||||
// To do with online accounts list
|
||||
private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms
|
||||
private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 1 * 60 * 1000L; // ms
|
||||
public static final long ONLINE_TIMESTAMP_MODULUS_V1 = 5 * 60 * 1000L;
|
||||
public static final long ONLINE_TIMESTAMP_MODULUS_V2 = 30 * 60 * 1000L;
|
||||
/** How many (latest) blocks' worth of online accounts we cache */
|
||||
private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 2;
|
||||
private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L;
|
||||
private final Set<OnlineAccountData> onlineAccountsImportQueue = ConcurrentHashMap.newKeySet();
|
||||
|
||||
private long onlineAccountsTasksTimestamp = Controller.startTime + ONLINE_ACCOUNTS_TASKS_INTERVAL; // ms
|
||||
/**
|
||||
* Cache of 'current' online accounts, keyed by timestamp
|
||||
*/
|
||||
private final Map<Long, Set<OnlineAccountData>> currentOnlineAccounts = new ConcurrentHashMap<>();
|
||||
/**
|
||||
* Cache of hash-summary of 'current' online accounts, keyed by timestamp, then leading byte of public key.
|
||||
*/
|
||||
private final Map<Long, Map<Byte, byte[]>> currentOnlineAccountsHashes = new ConcurrentHashMap<>();
|
||||
|
||||
private final List<OnlineAccountData> onlineAccountsImportQueue = Collections.synchronizedList(new ArrayList<>());
|
||||
/**
|
||||
* Cache of online accounts for latest blocks - not necessarily 'current' / now.
|
||||
* <i>Probably</i> only accessed / modified by a single Synchronizer thread.
|
||||
*/
|
||||
private final SortedMap<Long, Set<OnlineAccountData>> latestBlocksOnlineAccounts = new ConcurrentSkipListMap<>();
|
||||
|
||||
|
||||
/** Cache of current 'online accounts' */
|
||||
List<OnlineAccountData> onlineAccounts = new ArrayList<>();
|
||||
/** Cache of latest blocks' online accounts */
|
||||
Deque<List<OnlineAccountData>> latestBlocksOnlineAccounts = new ArrayDeque<>(MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS);
|
||||
|
||||
public OnlineAccountsManager() {
|
||||
|
||||
}
|
||||
|
||||
public static synchronized OnlineAccountsManager getInstance() {
|
||||
if (instance == null) {
|
||||
instance = new OnlineAccountsManager();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
|
||||
// Start separate thread to prepare our online accounts
|
||||
// This could be converted to a thread pool later if more concurrency is needed
|
||||
OurOnlineAccountsThread ourOnlineAccountsThread = new OurOnlineAccountsThread();
|
||||
ourOnlineAccountsThread.start();
|
||||
|
||||
try {
|
||||
while (!Controller.isStopping()) {
|
||||
Thread.sleep(100L);
|
||||
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Perform tasks to do with managing online accounts list
|
||||
if (now >= onlineAccountsTasksTimestamp) {
|
||||
onlineAccountsTasksTimestamp = now + ONLINE_ACCOUNTS_TASKS_INTERVAL;
|
||||
performOnlineAccountsTasks();
|
||||
}
|
||||
|
||||
// Process queued online account verifications
|
||||
this.processOnlineAccountsImportQueue();
|
||||
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Fall through to exit thread
|
||||
}
|
||||
|
||||
ourOnlineAccountsThread.interrupt();
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
isStopping = true;
|
||||
this.interrupt();
|
||||
}
|
||||
private boolean hasOurOnlineAccounts = false;
|
||||
|
||||
public static long getOnlineTimestampModulus() {
|
||||
Long now = NTP.getTime();
|
||||
@ -123,183 +86,316 @@ public class OnlineAccountsManager extends Thread {
|
||||
}
|
||||
return ONLINE_TIMESTAMP_MODULUS_V1;
|
||||
}
|
||||
|
||||
|
||||
// Online accounts import queue
|
||||
|
||||
private void processOnlineAccountsImportQueue() {
|
||||
if (this.onlineAccountsImportQueue.isEmpty()) {
|
||||
// Nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size());
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
|
||||
List<OnlineAccountData> onlineAccountDataCopy = new ArrayList<>(this.onlineAccountsImportQueue);
|
||||
for (OnlineAccountData onlineAccountData : onlineAccountDataCopy) {
|
||||
if (isStopping) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.verifyAndAddAccount(repository, onlineAccountData);
|
||||
|
||||
// Remove from queue
|
||||
onlineAccountsImportQueue.remove(onlineAccountData);
|
||||
}
|
||||
|
||||
LOGGER.debug("Finished processing online accounts import queue");
|
||||
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while verifying online accounts"), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Utilities
|
||||
|
||||
private void verifyAndAddAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException {
|
||||
final Long now = NTP.getTime();
|
||||
public static Long getCurrentOnlineAccountTimestamp() {
|
||||
Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return;
|
||||
return null;
|
||||
|
||||
PublicKeyAccount otherAccount = new PublicKeyAccount(repository, onlineAccountData.getPublicKey());
|
||||
|
||||
// Check timestamp is 'recent' here
|
||||
if (Math.abs(onlineAccountData.getTimestamp() - now) > getOnlineTimestampModulus() * 2) {
|
||||
LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp()));
|
||||
return;
|
||||
long onlineTimestampModulus = getOnlineTimestampModulus();
|
||||
return (now / onlineTimestampModulus) * onlineTimestampModulus;
|
||||
}
|
||||
|
||||
// Verify
|
||||
byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp());
|
||||
if (!otherAccount.verify(onlineAccountData.getSignature(), data)) {
|
||||
LOGGER.trace(() -> String.format("Rejecting invalid online account %s", otherAccount.getAddress()));
|
||||
return;
|
||||
private OnlineAccountsManager() {
|
||||
}
|
||||
|
||||
// Qortal: check online account is actually reward-share
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(onlineAccountData.getPublicKey());
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't even exist - probably not a good sign
|
||||
LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(onlineAccountData.getPublicKey())));
|
||||
return;
|
||||
private static class SingletonContainer {
|
||||
private static final OnlineAccountsManager INSTANCE = new OnlineAccountsManager();
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint()) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress()));
|
||||
return;
|
||||
public static OnlineAccountsManager getInstance() {
|
||||
return SingletonContainer.INSTANCE;
|
||||
}
|
||||
|
||||
synchronized (this.onlineAccounts) {
|
||||
OnlineAccountData existingAccountData = this.onlineAccounts.stream().filter(account -> Arrays.equals(account.getPublicKey(), onlineAccountData.getPublicKey())).findFirst().orElse(null);
|
||||
public void start() {
|
||||
// Expire old online accounts signatures
|
||||
executor.scheduleAtFixedRate(this::expireOldOnlineAccounts, ONLINE_ACCOUNTS_TASKS_INTERVAL, ONLINE_ACCOUNTS_TASKS_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
|
||||
if (existingAccountData != null) {
|
||||
if (existingAccountData.getTimestamp() < onlineAccountData.getTimestamp()) {
|
||||
this.onlineAccounts.remove(existingAccountData);
|
||||
// Send our online accounts
|
||||
executor.scheduleAtFixedRate(this::sendOurOnlineAccountsInfo, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
|
||||
LOGGER.trace(() -> String.format("Updated online account %s with timestamp %d (was %d)", otherAccount.getAddress(), onlineAccountData.getTimestamp(), existingAccountData.getTimestamp()));
|
||||
} else {
|
||||
LOGGER.trace(() -> String.format("Not updating existing online account %s", otherAccount.getAddress()));
|
||||
// Request online accounts from peers (legacy)
|
||||
executor.scheduleAtFixedRate(this::requestLegacyRemoteOnlineAccounts, ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
// Request online accounts from peers (V3+)
|
||||
executor.scheduleAtFixedRate(this::requestRemoteOnlineAccounts, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp()));
|
||||
// Process import queue
|
||||
executor.scheduleWithFixedDelay(this::processOnlineAccountsImportQueue, ONLINE_ACCOUNTS_QUEUE_INTERVAL, ONLINE_ACCOUNTS_QUEUE_INTERVAL, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
this.onlineAccounts.add(onlineAccountData);
|
||||
}
|
||||
public void shutdown() {
|
||||
isStopping = true;
|
||||
executor.shutdownNow();
|
||||
}
|
||||
|
||||
// Testing support
|
||||
public void ensureTestingAccountsOnline(PrivateKeyAccount... onlineAccounts) {
|
||||
if (!BlockChain.getInstance().isTestChain()) {
|
||||
LOGGER.warn("Ignoring attempt to ensure test account is online for non-test chain!");
|
||||
return;
|
||||
}
|
||||
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (onlineAccountsTimestamp == null)
|
||||
return;
|
||||
|
||||
final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now);
|
||||
byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp);
|
||||
final boolean useAggregateCompatibleSignature = onlineAccountsTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp();
|
||||
|
||||
synchronized (this.onlineAccounts) {
|
||||
this.onlineAccounts.clear();
|
||||
|
||||
Set<OnlineAccountData> replacementAccounts = new HashSet<>();
|
||||
for (PrivateKeyAccount onlineAccount : onlineAccounts) {
|
||||
// Check mintingAccount is actually reward-share?
|
||||
|
||||
byte[] signature = onlineAccount.sign(timestampBytes);
|
||||
byte[] signature = useAggregateCompatibleSignature
|
||||
? Qortal25519Extras.signForAggregation(onlineAccount.getPrivateKey(), timestampBytes)
|
||||
: onlineAccount.sign(timestampBytes);
|
||||
byte[] publicKey = onlineAccount.getPublicKey();
|
||||
|
||||
OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey);
|
||||
this.onlineAccounts.add(ourOnlineAccountData);
|
||||
replacementAccounts.add(ourOnlineAccountData);
|
||||
}
|
||||
|
||||
this.currentOnlineAccounts.clear();
|
||||
addAccounts(replacementAccounts);
|
||||
}
|
||||
|
||||
// Online accounts import queue
|
||||
|
||||
private void processOnlineAccountsImportQueue() {
|
||||
if (this.onlineAccountsImportQueue.isEmpty())
|
||||
// Nothing to do
|
||||
return;
|
||||
|
||||
LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size());
|
||||
|
||||
Set<OnlineAccountData> onlineAccountsToAdd = new HashSet<>();
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) {
|
||||
if (isStopping)
|
||||
return;
|
||||
|
||||
boolean isValid = this.isValidCurrentAccount(repository, onlineAccountData);
|
||||
if (isValid)
|
||||
onlineAccountsToAdd.add(onlineAccountData);
|
||||
|
||||
// Remove from queue
|
||||
onlineAccountsImportQueue.remove(onlineAccountData);
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.error("Repository issue while verifying online accounts", e);
|
||||
}
|
||||
|
||||
if (!onlineAccountsToAdd.isEmpty()) {
|
||||
LOGGER.debug("Merging {} validated online accounts from import queue", onlineAccountsToAdd.size());
|
||||
addAccounts(onlineAccountsToAdd);
|
||||
}
|
||||
}
|
||||
|
||||
private void performOnlineAccountsTasks() {
|
||||
// Utilities
|
||||
|
||||
public static byte[] xorByteArrayInPlace(byte[] inplaceArray, byte[] otherArray) {
|
||||
if (inplaceArray == null)
|
||||
return Arrays.copyOf(otherArray, otherArray.length);
|
||||
|
||||
// Start from index 1 to enforce static leading byte
|
||||
for (int i = 1; i < otherArray.length; i++)
|
||||
inplaceArray[i] ^= otherArray[i];
|
||||
|
||||
return inplaceArray;
|
||||
}
|
||||
|
||||
private static boolean isValidCurrentAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException {
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return false;
|
||||
|
||||
byte[] rewardSharePublicKey = onlineAccountData.getPublicKey();
|
||||
long onlineAccountTimestamp = onlineAccountData.getTimestamp();
|
||||
|
||||
// Check timestamp is 'recent' here
|
||||
if (Math.abs(onlineAccountTimestamp - now) > getOnlineTimestampModulus() * 2) {
|
||||
LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Verify signature
|
||||
byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp());
|
||||
boolean isSignatureValid = onlineAccountTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp()
|
||||
? Qortal25519Extras.verifyAggregated(rewardSharePublicKey, onlineAccountData.getSignature(), data)
|
||||
: Crypto.verify(rewardSharePublicKey, onlineAccountData.getSignature(), data);
|
||||
if (!isSignatureValid) {
|
||||
LOGGER.trace(() -> String.format("Rejecting invalid online account %s", Base58.encode(rewardSharePublicKey)));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Qortal: check online account is actually reward-share
|
||||
RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey);
|
||||
if (rewardShareData == null) {
|
||||
// Reward-share doesn't even exist - probably not a good sign
|
||||
LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(rewardSharePublicKey)));
|
||||
return false;
|
||||
}
|
||||
|
||||
Account mintingAccount = new Account(repository, rewardShareData.getMinter());
|
||||
if (!mintingAccount.canMint()) {
|
||||
// Minting-account component of reward-share can no longer mint - disregard
|
||||
LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress()));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Adds accounts, maybe rebuilds hashes, returns whether any new accounts were added / hashes rebuilt. */
|
||||
private boolean addAccounts(Collection<OnlineAccountData> onlineAccountsToAdd) {
|
||||
// For keeping track of which hashes to rebuild
|
||||
Map<Long, Set<Byte>> hashesToRebuild = new HashMap<>();
|
||||
|
||||
for (OnlineAccountData onlineAccountData : onlineAccountsToAdd) {
|
||||
boolean isNewEntry = this.addAccount(onlineAccountData);
|
||||
|
||||
if (isNewEntry)
|
||||
hashesToRebuild.computeIfAbsent(onlineAccountData.getTimestamp(), k -> new HashSet<>()).add(onlineAccountData.getPublicKey()[0]);
|
||||
}
|
||||
|
||||
if (hashesToRebuild.isEmpty())
|
||||
return false;
|
||||
|
||||
for (var entry : hashesToRebuild.entrySet()) {
|
||||
Long timestamp = entry.getKey();
|
||||
|
||||
LOGGER.debug(() -> String.format("Rehashing for timestamp %d and leading bytes %s",
|
||||
timestamp,
|
||||
entry.getValue().stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", "))
|
||||
)
|
||||
);
|
||||
|
||||
for (Byte leadingByte : entry.getValue()) {
|
||||
byte[] pubkeyHash = currentOnlineAccounts.get(timestamp).stream()
|
||||
.map(OnlineAccountData::getPublicKey)
|
||||
.filter(publicKey -> leadingByte == publicKey[0])
|
||||
.reduce(null, OnlineAccountsManager::xorByteArrayInPlace);
|
||||
|
||||
currentOnlineAccountsHashes.computeIfAbsent(timestamp, k -> new ConcurrentHashMap<>()).put(leadingByte, pubkeyHash);
|
||||
|
||||
LOGGER.trace(() -> String.format("Rebuilt hash %s for timestamp %d and leading byte %02x using %d public keys",
|
||||
HashCode.fromBytes(pubkeyHash),
|
||||
timestamp,
|
||||
leadingByte,
|
||||
currentOnlineAccounts.get(timestamp).stream()
|
||||
.map(OnlineAccountData::getPublicKey)
|
||||
.filter(publicKey -> leadingByte == publicKey[0])
|
||||
.count()
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
LOGGER.debug(String.format("we have online accounts for timestamps: %s", String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", ")))));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean addAccount(OnlineAccountData onlineAccountData) {
|
||||
byte[] rewardSharePublicKey = onlineAccountData.getPublicKey();
|
||||
long onlineAccountTimestamp = onlineAccountData.getTimestamp();
|
||||
|
||||
Set<OnlineAccountData> onlineAccounts = this.currentOnlineAccounts.computeIfAbsent(onlineAccountTimestamp, k -> ConcurrentHashMap.newKeySet());
|
||||
boolean isNewEntry = onlineAccounts.add(onlineAccountData);
|
||||
|
||||
if (isNewEntry)
|
||||
LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp));
|
||||
else
|
||||
LOGGER.trace(() -> String.format("Not updating existing online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp));
|
||||
|
||||
return isNewEntry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Expire old entries.
|
||||
*/
|
||||
private void expireOldOnlineAccounts() {
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return;
|
||||
|
||||
// Expire old entries
|
||||
final long lastSeenExpiryPeriod = (getOnlineTimestampModulus() * 2) + (1 * 60 * 1000L);
|
||||
final long cutoffThreshold = now - lastSeenExpiryPeriod;
|
||||
synchronized (this.onlineAccounts) {
|
||||
Iterator<OnlineAccountData> iterator = this.onlineAccounts.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
OnlineAccountData onlineAccountData = iterator.next();
|
||||
|
||||
if (onlineAccountData.getTimestamp() < cutoffThreshold) {
|
||||
iterator.remove();
|
||||
|
||||
LOGGER.trace(() -> {
|
||||
PublicKeyAccount otherAccount = new PublicKeyAccount(null, onlineAccountData.getPublicKey());
|
||||
return String.format("Removed expired online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp());
|
||||
});
|
||||
}
|
||||
}
|
||||
final long cutoffThreshold = now - MAX_CACHED_TIMESTAMP_SETS * getOnlineTimestampModulus();
|
||||
this.currentOnlineAccounts.keySet().removeIf(timestamp -> timestamp < cutoffThreshold);
|
||||
this.currentOnlineAccountsHashes.keySet().removeIf(timestamp -> timestamp < cutoffThreshold);
|
||||
}
|
||||
|
||||
// Request data from other peers?
|
||||
if ((this.onlineAccountsTasksTimestamp % ONLINE_ACCOUNTS_BROADCAST_INTERVAL) < ONLINE_ACCOUNTS_TASKS_INTERVAL) {
|
||||
List<OnlineAccountData> safeOnlineAccounts;
|
||||
synchronized (this.onlineAccounts) {
|
||||
safeOnlineAccounts = new ArrayList<>(this.onlineAccounts);
|
||||
}
|
||||
/**
|
||||
* Request data from other peers. (Pre-V3)
|
||||
*/
|
||||
private void requestLegacyRemoteOnlineAccounts() {
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return;
|
||||
|
||||
Message messageV1 = new GetOnlineAccountsMessage(safeOnlineAccounts);
|
||||
Message messageV2 = new GetOnlineAccountsV2Message(safeOnlineAccounts);
|
||||
// Don't bother if we're not up to date
|
||||
if (!Controller.getInstance().isUpToDate())
|
||||
return;
|
||||
|
||||
List<OnlineAccountData> mergedOnlineAccounts = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList());
|
||||
|
||||
Message messageV2 = new GetOnlineAccountsV2Message(mergedOnlineAccounts);
|
||||
|
||||
Network.getInstance().broadcast(peer ->
|
||||
peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1
|
||||
peer.getPeersVersion() < ONLINE_ACCOUNTS_V3_PEER_VERSION
|
||||
? messageV2
|
||||
: null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Request data from other peers. V3+
|
||||
*/
|
||||
private void requestRemoteOnlineAccounts() {
|
||||
final Long now = NTP.getTime();
|
||||
if (now == null)
|
||||
return;
|
||||
|
||||
// Don't bother if we're not up to date
|
||||
if (!Controller.getInstance().isUpToDate())
|
||||
return;
|
||||
|
||||
Message messageV3 = new GetOnlineAccountsV3Message(currentOnlineAccountsHashes);
|
||||
|
||||
Network.getInstance().broadcast(peer ->
|
||||
peer.getPeersVersion() >= ONLINE_ACCOUNTS_V3_PEER_VERSION
|
||||
? messageV3
|
||||
: null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send online accounts that are minting on this node.
|
||||
*/
|
||||
private void sendOurOnlineAccountsInfo() {
|
||||
final Long now = NTP.getTime();
|
||||
// 'current' timestamp
|
||||
final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (onlineAccountsTimestamp == null)
|
||||
return;
|
||||
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't submit if we're more than 2 hours out of sync (unless we're in recovery mode)
|
||||
final Long minLatestBlockTimestamp = now - (2 * 60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp) && !Synchronizer.getInstance().getRecoveryMode()) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<MintingAccountData> mintingAccounts;
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
mintingAccounts = repository.getAccountRepository().getMintingAccounts();
|
||||
|
||||
// We have no accounts, but don't reset timestamp
|
||||
// We have no accounts to send
|
||||
if (mintingAccounts.isEmpty())
|
||||
return;
|
||||
|
||||
// Only reward-share accounts allowed
|
||||
// Only active reward-shares allowed
|
||||
Iterator<MintingAccountData> iterator = mintingAccounts.iterator();
|
||||
int i = 0;
|
||||
while (iterator.hasNext()) {
|
||||
MintingAccountData mintingAccountData = iterator.next();
|
||||
|
||||
@ -316,107 +412,138 @@ public class OnlineAccountsManager extends Thread {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (++i > 1+1) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} catch (DataException e) {
|
||||
LOGGER.warn(String.format("Repository issue trying to fetch minting accounts: %s", e.getMessage()));
|
||||
return;
|
||||
}
|
||||
|
||||
// 'current' timestamp
|
||||
final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now);
|
||||
boolean hasInfoChanged = false;
|
||||
final boolean useAggregateCompatibleSignature = onlineAccountsTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp();
|
||||
|
||||
byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp);
|
||||
List<OnlineAccountData> ourOnlineAccounts = new ArrayList<>();
|
||||
|
||||
MINTING_ACCOUNTS:
|
||||
for (MintingAccountData mintingAccountData : mintingAccounts) {
|
||||
PrivateKeyAccount mintingAccount = new PrivateKeyAccount(null, mintingAccountData.getPrivateKey());
|
||||
byte[] privateKey = mintingAccountData.getPrivateKey();
|
||||
byte[] publicKey = Crypto.toPublicKey(privateKey);
|
||||
|
||||
byte[] signature = mintingAccount.sign(timestampBytes);
|
||||
byte[] publicKey = mintingAccount.getPublicKey();
|
||||
byte[] signature = useAggregateCompatibleSignature
|
||||
? Qortal25519Extras.signForAggregation(privateKey, timestampBytes)
|
||||
: Crypto.sign(privateKey, timestampBytes);
|
||||
|
||||
// Our account is online
|
||||
OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey);
|
||||
synchronized (this.onlineAccounts) {
|
||||
Iterator<OnlineAccountData> iterator = this.onlineAccounts.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
OnlineAccountData existingOnlineAccountData = iterator.next();
|
||||
|
||||
if (Arrays.equals(existingOnlineAccountData.getPublicKey(), ourOnlineAccountData.getPublicKey())) {
|
||||
// If our online account is already present, with same timestamp, then move on to next mintingAccount
|
||||
if (existingOnlineAccountData.getTimestamp() == onlineAccountsTimestamp)
|
||||
continue MINTING_ACCOUNTS;
|
||||
|
||||
// If our online account is already present, but with older timestamp, then remove it
|
||||
iterator.remove();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
this.onlineAccounts.add(ourOnlineAccountData);
|
||||
}
|
||||
|
||||
LOGGER.trace(() -> String.format("Added our online account %s with timestamp %d", mintingAccount.getAddress(), onlineAccountsTimestamp));
|
||||
ourOnlineAccounts.add(ourOnlineAccountData);
|
||||
hasInfoChanged = true;
|
||||
}
|
||||
|
||||
this.hasOurOnlineAccounts = !ourOnlineAccounts.isEmpty();
|
||||
|
||||
boolean hasInfoChanged = addAccounts(ourOnlineAccounts);
|
||||
|
||||
if (!hasInfoChanged)
|
||||
return;
|
||||
|
||||
Message messageV1 = new OnlineAccountsMessage(ourOnlineAccounts);
|
||||
Message messageV2 = new OnlineAccountsV2Message(ourOnlineAccounts);
|
||||
Message messageV3 = new OnlineAccountsV2Message(ourOnlineAccounts); // TODO: V3 message
|
||||
|
||||
Network.getInstance().broadcast(peer ->
|
||||
peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1
|
||||
peer.getPeersVersion() >= ONLINE_ACCOUNTS_V3_PEER_VERSION
|
||||
? messageV3
|
||||
: peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION
|
||||
? messageV2
|
||||
: messageV1
|
||||
);
|
||||
|
||||
LOGGER.trace(() -> String.format("Broadcasted %d online account%s with timestamp %d", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp));
|
||||
LOGGER.debug("Broadcasted {} online account{} with timestamp {}", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp);
|
||||
}
|
||||
|
||||
public static long toOnlineAccountTimestamp(long timestamp) {
|
||||
return (timestamp / getOnlineTimestampModulus()) * getOnlineTimestampModulus();
|
||||
/**
|
||||
* Returns whether online accounts manager has any online accounts with timestamp recent enough to be considered currently online.
|
||||
*/
|
||||
// BlockMinter: only calls this to check whether returned list is empty or not, to determine whether minting is even possible or not
|
||||
public boolean hasOnlineAccounts() {
|
||||
// 'current' timestamp
|
||||
final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (onlineAccountsTimestamp == null)
|
||||
return false;
|
||||
|
||||
return this.currentOnlineAccounts.containsKey(onlineAccountsTimestamp);
|
||||
}
|
||||
|
||||
/** Returns list of online accounts with timestamp recent enough to be considered currently online. */
|
||||
public boolean hasOurOnlineAccounts() {
|
||||
return this.hasOurOnlineAccounts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of online accounts matching given timestamp.
|
||||
*/
|
||||
// Block::mint() - only wants online accounts with (online) timestamp that matches block's (online) timestamp so they can be added to new block
|
||||
public List<OnlineAccountData> getOnlineAccounts(long onlineTimestamp) {
|
||||
LOGGER.info(String.format("caller's timestamp: %d, our timestamps: %s", onlineTimestamp, String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", ")))));
|
||||
|
||||
return new ArrayList<>(Set.copyOf(this.currentOnlineAccounts.getOrDefault(onlineTimestamp, Collections.emptySet())));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of online accounts with timestamp recent enough to be considered currently online.
|
||||
*/
|
||||
// API: calls this to return list of online accounts - probably expects ALL timestamps - but going to get 'current' from now on
|
||||
public List<OnlineAccountData> getOnlineAccounts() {
|
||||
final long onlineTimestamp = toOnlineAccountTimestamp(NTP.getTime());
|
||||
// 'current' timestamp
|
||||
final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp();
|
||||
if (onlineAccountsTimestamp == null)
|
||||
return Collections.emptyList();
|
||||
|
||||
synchronized (this.onlineAccounts) {
|
||||
return this.onlineAccounts.stream().filter(account -> account.getTimestamp() == onlineTimestamp).collect(Collectors.toList());
|
||||
}
|
||||
return getOnlineAccounts(onlineAccountsTimestamp);
|
||||
}
|
||||
|
||||
// Block processing
|
||||
|
||||
/** Returns cached, unmodifiable list of latest block's online accounts. */
|
||||
public List<OnlineAccountData> getLatestBlocksOnlineAccounts() {
|
||||
synchronized (this.latestBlocksOnlineAccounts) {
|
||||
return this.latestBlocksOnlineAccounts.peekFirst();
|
||||
}
|
||||
/**
|
||||
* Removes previously validated entries from block's online accounts.
|
||||
* <p>
|
||||
* Checks both 'current' and block caches.
|
||||
* <p>
|
||||
* Typically called by {@link Block#areOnlineAccountsValid()}
|
||||
*/
|
||||
public void removeKnown(Set<OnlineAccountData> blocksOnlineAccounts, Long timestamp) {
|
||||
Set<OnlineAccountData> onlineAccounts = this.currentOnlineAccounts.get(timestamp);
|
||||
|
||||
// If not 'current' timestamp - try block cache instead
|
||||
if (onlineAccounts == null)
|
||||
onlineAccounts = this.latestBlocksOnlineAccounts.get(timestamp);
|
||||
|
||||
if (onlineAccounts != null)
|
||||
blocksOnlineAccounts.removeAll(onlineAccounts);
|
||||
}
|
||||
|
||||
/** Caches list of latest block's online accounts. Typically called by Block.process() */
|
||||
public void pushLatestBlocksOnlineAccounts(List<OnlineAccountData> latestBlocksOnlineAccounts) {
|
||||
synchronized (this.latestBlocksOnlineAccounts) {
|
||||
if (this.latestBlocksOnlineAccounts.size() == MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS)
|
||||
this.latestBlocksOnlineAccounts.pollLast();
|
||||
|
||||
this.latestBlocksOnlineAccounts.addFirst(latestBlocksOnlineAccounts == null
|
||||
? Collections.emptyList()
|
||||
: Collections.unmodifiableList(latestBlocksOnlineAccounts));
|
||||
}
|
||||
/**
|
||||
* Adds block's online accounts to one of OnlineAccountManager's caches.
|
||||
* <p>
|
||||
* It is assumed that the online accounts have been verified.
|
||||
* <p>
|
||||
* Typically called by {@link Block#areOnlineAccountsValid()}
|
||||
*/
|
||||
public void addBlocksOnlineAccounts(Set<OnlineAccountData> blocksOnlineAccounts, Long timestamp) {
|
||||
// We want to add to 'current' in preference if possible
|
||||
if (this.currentOnlineAccounts.containsKey(timestamp)) {
|
||||
addAccounts(blocksOnlineAccounts);
|
||||
return;
|
||||
}
|
||||
|
||||
/** Reverts list of latest block's online accounts. Typically called by Block.orphan() */
|
||||
public void popLatestBlocksOnlineAccounts() {
|
||||
synchronized (this.latestBlocksOnlineAccounts) {
|
||||
this.latestBlocksOnlineAccounts.pollFirst();
|
||||
// Add to block cache instead
|
||||
this.latestBlocksOnlineAccounts.computeIfAbsent(timestamp, k -> ConcurrentHashMap.newKeySet())
|
||||
.addAll(blocksOnlineAccounts);
|
||||
|
||||
// If block cache has grown too large then we need to trim.
|
||||
if (this.latestBlocksOnlineAccounts.size() > MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) {
|
||||
// However, be careful to trim the opposite end to the entry we just added!
|
||||
Long firstKey = this.latestBlocksOnlineAccounts.firstKey();
|
||||
if (!firstKey.equals(timestamp))
|
||||
this.latestBlocksOnlineAccounts.remove(firstKey);
|
||||
else
|
||||
this.latestBlocksOnlineAccounts.remove(this.latestBlocksOnlineAccounts.lastKey());
|
||||
}
|
||||
}
|
||||
|
||||
@ -429,45 +556,48 @@ public class OnlineAccountsManager extends Thread {
|
||||
List<OnlineAccountData> excludeAccounts = getOnlineAccountsMessage.getOnlineAccounts();
|
||||
|
||||
// Send online accounts info, excluding entries with matching timestamp & public key from excludeAccounts
|
||||
List<OnlineAccountData> accountsToSend;
|
||||
synchronized (this.onlineAccounts) {
|
||||
accountsToSend = new ArrayList<>(this.onlineAccounts);
|
||||
}
|
||||
List<OnlineAccountData> accountsToSend = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList());
|
||||
int prefilterSize = accountsToSend.size();
|
||||
|
||||
Iterator<OnlineAccountData> iterator = accountsToSend.iterator();
|
||||
|
||||
SEND_ITERATOR:
|
||||
while (iterator.hasNext()) {
|
||||
OnlineAccountData onlineAccountData = iterator.next();
|
||||
|
||||
for (int i = 0; i < excludeAccounts.size(); ++i) {
|
||||
OnlineAccountData excludeAccountData = excludeAccounts.get(i);
|
||||
|
||||
for (OnlineAccountData excludeAccountData : excludeAccounts) {
|
||||
if (onlineAccountData.getTimestamp() == excludeAccountData.getTimestamp() && Arrays.equals(onlineAccountData.getPublicKey(), excludeAccountData.getPublicKey())) {
|
||||
iterator.remove();
|
||||
continue SEND_ITERATOR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (accountsToSend.isEmpty())
|
||||
return;
|
||||
|
||||
Message onlineAccountsMessage = new OnlineAccountsMessage(accountsToSend);
|
||||
peer.sendMessage(onlineAccountsMessage);
|
||||
|
||||
LOGGER.trace(() -> String.format("Sent %d of our %d online accounts to %s", accountsToSend.size(), this.onlineAccounts.size(), peer));
|
||||
LOGGER.debug("Sent {} of our {} online accounts to {}", accountsToSend.size(), prefilterSize, peer);
|
||||
}
|
||||
|
||||
public void onNetworkOnlineAccountsMessage(Peer peer, Message message) {
|
||||
OnlineAccountsMessage onlineAccountsMessage = (OnlineAccountsMessage) message;
|
||||
|
||||
List<OnlineAccountData> peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts();
|
||||
LOGGER.trace(() -> String.format("Received %d online accounts from %s", peersOnlineAccounts.size(), peer));
|
||||
LOGGER.debug("Received {} online accounts from {}", peersOnlineAccounts.size(), peer);
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
for (OnlineAccountData onlineAccountData : peersOnlineAccounts)
|
||||
this.verifyAndAddAccount(repository, onlineAccountData);
|
||||
} catch (DataException e) {
|
||||
LOGGER.error(String.format("Repository issue while verifying online accounts from peer %s", peer), e);
|
||||
int importCount = 0;
|
||||
|
||||
// Add any online accounts to the queue that aren't already present
|
||||
for (OnlineAccountData onlineAccountData : peersOnlineAccounts) {
|
||||
boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData);
|
||||
|
||||
if (isNewEntry)
|
||||
importCount++;
|
||||
}
|
||||
|
||||
if (importCount > 0)
|
||||
LOGGER.debug("Added {} online accounts to queue", importCount);
|
||||
}
|
||||
|
||||
public void onNetworkGetOnlineAccountsV2Message(Peer peer, Message message) {
|
||||
@ -476,58 +606,106 @@ public class OnlineAccountsManager extends Thread {
|
||||
List<OnlineAccountData> excludeAccounts = getOnlineAccountsMessage.getOnlineAccounts();
|
||||
|
||||
// Send online accounts info, excluding entries with matching timestamp & public key from excludeAccounts
|
||||
List<OnlineAccountData> accountsToSend;
|
||||
synchronized (this.onlineAccounts) {
|
||||
accountsToSend = new ArrayList<>(this.onlineAccounts);
|
||||
}
|
||||
List<OnlineAccountData> accountsToSend = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList());
|
||||
int prefilterSize = accountsToSend.size();
|
||||
|
||||
Iterator<OnlineAccountData> iterator = accountsToSend.iterator();
|
||||
|
||||
SEND_ITERATOR:
|
||||
while (iterator.hasNext()) {
|
||||
OnlineAccountData onlineAccountData = iterator.next();
|
||||
|
||||
for (int i = 0; i < excludeAccounts.size(); ++i) {
|
||||
OnlineAccountData excludeAccountData = excludeAccounts.get(i);
|
||||
|
||||
for (OnlineAccountData excludeAccountData : excludeAccounts) {
|
||||
if (onlineAccountData.getTimestamp() == excludeAccountData.getTimestamp() && Arrays.equals(onlineAccountData.getPublicKey(), excludeAccountData.getPublicKey())) {
|
||||
iterator.remove();
|
||||
continue SEND_ITERATOR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (accountsToSend.isEmpty())
|
||||
return;
|
||||
|
||||
Message onlineAccountsMessage = new OnlineAccountsV2Message(accountsToSend);
|
||||
peer.sendMessage(onlineAccountsMessage);
|
||||
|
||||
LOGGER.trace(() -> String.format("Sent %d of our %d online accounts to %s", accountsToSend.size(), this.onlineAccounts.size(), peer));
|
||||
LOGGER.debug("Sent {} of our {} online accounts to {}", accountsToSend.size(), prefilterSize, peer);
|
||||
}
|
||||
|
||||
public void onNetworkOnlineAccountsV2Message(Peer peer, Message message) {
|
||||
OnlineAccountsV2Message onlineAccountsMessage = (OnlineAccountsV2Message) message;
|
||||
|
||||
List<OnlineAccountData> peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts();
|
||||
LOGGER.debug(String.format("Received %d online accounts from %s", peersOnlineAccounts.size(), peer));
|
||||
LOGGER.debug("Received {} online accounts from {}", peersOnlineAccounts.size(), peer);
|
||||
|
||||
int importCount = 0;
|
||||
|
||||
// Add any online accounts to the queue that aren't already present
|
||||
for (OnlineAccountData onlineAccountData : peersOnlineAccounts) {
|
||||
boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData);
|
||||
|
||||
// Do we already know about this online account data?
|
||||
if (onlineAccounts.contains(onlineAccountData)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Is it already in the import queue?
|
||||
if (onlineAccountsImportQueue.contains(onlineAccountData)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
onlineAccountsImportQueue.add(onlineAccountData);
|
||||
if (isNewEntry)
|
||||
importCount++;
|
||||
}
|
||||
|
||||
LOGGER.debug(String.format("Added %d online accounts to queue", importCount));
|
||||
if (importCount > 0)
|
||||
LOGGER.debug("Added {} online accounts to queue", importCount);
|
||||
}
|
||||
|
||||
public void onNetworkGetOnlineAccountsV3Message(Peer peer, Message message) {
|
||||
GetOnlineAccountsV3Message getOnlineAccountsMessage = (GetOnlineAccountsV3Message) message;
|
||||
|
||||
Map<Long, Map<Byte, byte[]>> peersHashes = getOnlineAccountsMessage.getHashesByTimestampThenByte();
|
||||
List<OnlineAccountData> outgoingOnlineAccounts = new ArrayList<>();
|
||||
|
||||
// Warning: no double-checking/fetching - we must be ConcurrentMap compatible!
|
||||
// So no contains()-then-get() or multiple get()s on the same key/map.
|
||||
// We also use getOrDefault() with emptySet() on currentOnlineAccounts in case corresponding timestamp entry isn't there.
|
||||
for (var ourOuterMapEntry : currentOnlineAccountsHashes.entrySet()) {
|
||||
Long timestamp = ourOuterMapEntry.getKey();
|
||||
|
||||
var ourInnerMap = ourOuterMapEntry.getValue();
|
||||
var peersInnerMap = peersHashes.get(timestamp);
|
||||
|
||||
if (peersInnerMap == null) {
|
||||
// Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours
|
||||
Set<OnlineAccountData> timestampsOnlineAccounts = this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet());
|
||||
outgoingOnlineAccounts.addAll(timestampsOnlineAccounts);
|
||||
|
||||
LOGGER.debug(() -> String.format("Going to send all %d online accounts for timestamp %d", timestampsOnlineAccounts.size(), timestamp));
|
||||
} else {
|
||||
// Quick cache of which leading bytes to send so we only have to filter once
|
||||
Set<Byte> outgoingLeadingBytes = new HashSet<>();
|
||||
|
||||
// We have entries for this timestamp so compare against peer's entries
|
||||
for (var ourInnerMapEntry : ourInnerMap.entrySet()) {
|
||||
Byte leadingByte = ourInnerMapEntry.getKey();
|
||||
byte[] peersHash = peersInnerMap.get(leadingByte);
|
||||
|
||||
if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) {
|
||||
// For this leading byte: hashes don't match or peer doesn't have entry
|
||||
// Send all online accounts for this timestamp and leading byte
|
||||
outgoingLeadingBytes.add(leadingByte);
|
||||
}
|
||||
}
|
||||
|
||||
int beforeAddSize = outgoingOnlineAccounts.size();
|
||||
|
||||
this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet()).stream()
|
||||
.filter(account -> outgoingLeadingBytes.contains(account.getPublicKey()[0]))
|
||||
.forEach(outgoingOnlineAccounts::add);
|
||||
|
||||
if (outgoingOnlineAccounts.size() > beforeAddSize)
|
||||
LOGGER.debug(String.format("Going to send %d online accounts for timestamp %d and leading bytes %s",
|
||||
outgoingOnlineAccounts.size() - beforeAddSize,
|
||||
timestamp,
|
||||
outgoingLeadingBytes.stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", "))
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Message onlineAccountsMessage = new OnlineAccountsV2Message(outgoingOnlineAccounts); // TODO: V3 message
|
||||
peer.sendMessage(onlineAccountsMessage);
|
||||
|
||||
LOGGER.debug("Sent {} online accounts to {}", outgoingOnlineAccounts.size(), peer);
|
||||
}
|
||||
}
|
||||
|
@ -26,14 +26,7 @@ import org.qortal.event.Event;
|
||||
import org.qortal.event.EventBus;
|
||||
import org.qortal.network.Network;
|
||||
import org.qortal.network.Peer;
|
||||
import org.qortal.network.message.BlockMessage;
|
||||
import org.qortal.network.message.BlockSummariesMessage;
|
||||
import org.qortal.network.message.GetBlockMessage;
|
||||
import org.qortal.network.message.GetBlockSummariesMessage;
|
||||
import org.qortal.network.message.GetSignaturesV2Message;
|
||||
import org.qortal.network.message.Message;
|
||||
import org.qortal.network.message.SignaturesMessage;
|
||||
import org.qortal.network.message.MessageType;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
@ -88,7 +81,7 @@ public class Synchronizer extends Thread {
|
||||
private boolean syncRequestPending = false;
|
||||
|
||||
// Keep track of invalid blocks so that we don't keep trying to sync them
|
||||
private Map<String, Long> invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>());
|
||||
private Map<ByteArray, Long> invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>());
|
||||
public Long timeValidBlockLastReceived = null;
|
||||
public Long timeInvalidBlockLastReceived = null;
|
||||
|
||||
@ -178,8 +171,8 @@ public class Synchronizer extends Thread {
|
||||
|
||||
public Integer getSyncPercent() {
|
||||
synchronized (this.syncLock) {
|
||||
// Report as 100% synced if the latest block is within the last 30 mins
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L);
|
||||
// Report as 100% synced if the latest block is within the last 60 mins
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) {
|
||||
return 100;
|
||||
}
|
||||
@ -624,7 +617,7 @@ public class Synchronizer extends Thread {
|
||||
// We have already determined that the correct chain diverged from a lower height. We are safe to skip these peers.
|
||||
for (Peer peer : peersSharingCommonBlock) {
|
||||
LOGGER.debug(String.format("Peer %s has common block at height %d but the superior chain is at height %d. Removing it from this round.", peer, commonBlockSummary.getHeight(), dropPeersAfterCommonBlockHeight));
|
||||
this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature());
|
||||
//this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@ -635,7 +628,9 @@ public class Synchronizer extends Thread {
|
||||
int minChainLength = this.calculateMinChainLengthOfPeers(peersSharingCommonBlock, commonBlockSummary);
|
||||
|
||||
// Fetch block summaries from each peer
|
||||
for (Peer peer : peersSharingCommonBlock) {
|
||||
Iterator peersSharingCommonBlockIterator = peersSharingCommonBlock.iterator();
|
||||
while (peersSharingCommonBlockIterator.hasNext()) {
|
||||
Peer peer = (Peer) peersSharingCommonBlockIterator.next();
|
||||
|
||||
// If we're shutting down, just return the latest peer list
|
||||
if (Controller.isStopping())
|
||||
@ -692,6 +687,8 @@ public class Synchronizer extends Thread {
|
||||
if (this.containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) {
|
||||
LOGGER.debug("Ignoring peer %s because it holds an invalid block", peer);
|
||||
peers.remove(peer);
|
||||
peersSharingCommonBlockIterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Reduce minChainLength if needed. If we don't have any blocks, this peer will be excluded from chain weight comparisons later in the process, so we shouldn't update minChainLength
|
||||
@ -847,6 +844,10 @@ public class Synchronizer extends Thread {
|
||||
|
||||
/* Invalid block signature tracking */
|
||||
|
||||
public Map<ByteArray, Long> getInvalidBlockSignatures() {
|
||||
return this.invalidBlockSignatures;
|
||||
}
|
||||
|
||||
private void addInvalidBlockSignature(byte[] signature) {
|
||||
Long now = NTP.getTime();
|
||||
if (now == null) {
|
||||
@ -854,8 +855,7 @@ public class Synchronizer extends Thread {
|
||||
}
|
||||
|
||||
// Add or update existing entry
|
||||
String sig58 = Base58.encode(signature);
|
||||
invalidBlockSignatures.put(sig58, now);
|
||||
invalidBlockSignatures.put(ByteArray.wrap(signature), now);
|
||||
}
|
||||
private void deleteOlderInvalidSignatures(Long now) {
|
||||
if (now == null) {
|
||||
@ -874,17 +874,16 @@ public class Synchronizer extends Thread {
|
||||
}
|
||||
}
|
||||
}
|
||||
private boolean containsInvalidBlockSummary(List<BlockSummaryData> blockSummaries) {
|
||||
public boolean containsInvalidBlockSummary(List<BlockSummaryData> blockSummaries) {
|
||||
if (blockSummaries == null || invalidBlockSignatures == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Loop through our known invalid blocks and check each one against supplied block summaries
|
||||
for (String invalidSignature58 : invalidBlockSignatures.keySet()) {
|
||||
byte[] invalidSignature = Base58.decode(invalidSignature58);
|
||||
for (ByteArray invalidSignature : invalidBlockSignatures.keySet()) {
|
||||
for (BlockSummaryData blockSummary : blockSummaries) {
|
||||
byte[] signature = blockSummary.getSignature();
|
||||
if (Arrays.equals(signature, invalidSignature)) {
|
||||
if (Arrays.equals(signature, invalidSignature.value)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -897,10 +896,9 @@ public class Synchronizer extends Thread {
|
||||
}
|
||||
|
||||
// Loop through our known invalid blocks and check each one against supplied block signatures
|
||||
for (String invalidSignature58 : invalidBlockSignatures.keySet()) {
|
||||
byte[] invalidSignature = Base58.decode(invalidSignature58);
|
||||
for (ByteArray invalidSignature : invalidBlockSignatures.keySet()) {
|
||||
for (byte[] signature : blockSignatures) {
|
||||
if (Arrays.equals(signature, invalidSignature)) {
|
||||
if (Arrays.equals(signature, invalidSignature.value)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1579,14 +1577,25 @@ public class Synchronizer extends Thread {
|
||||
Message getBlockMessage = new GetBlockMessage(signature);
|
||||
|
||||
Message message = peer.getResponse(getBlockMessage);
|
||||
if (message == null || message.getType() != MessageType.BLOCK)
|
||||
if (message == null)
|
||||
return null;
|
||||
|
||||
switch (message.getType()) {
|
||||
case BLOCK: {
|
||||
BlockMessage blockMessage = (BlockMessage) message;
|
||||
|
||||
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates());
|
||||
}
|
||||
|
||||
case BLOCK_V2: {
|
||||
BlockV2Message blockMessage = (BlockV2Message) message;
|
||||
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStatesHash());
|
||||
}
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public void populateBlockSummariesMinterLevels(Repository repository, List<BlockSummaryData> blockSummaries) throws DataException {
|
||||
final int firstBlockHeight = blockSummaries.get(0).getHeight();
|
||||
|
||||
|
@ -67,6 +67,9 @@ public class ArbitraryDataFileListManager {
|
||||
/** Maximum number of hops that a file list relay request is allowed to make */
|
||||
public static int RELAY_REQUEST_MAX_HOPS = 4;
|
||||
|
||||
/** Minimum peer version to use relay */
|
||||
public static String RELAY_MIN_PEER_VERSION = "3.4.0";
|
||||
|
||||
|
||||
private ArbitraryDataFileListManager() {
|
||||
}
|
||||
@ -524,6 +527,7 @@ public class ArbitraryDataFileListManager {
|
||||
forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops,
|
||||
arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible());
|
||||
}
|
||||
forwardArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
|
||||
@ -690,12 +694,14 @@ public class ArbitraryDataFileListManager {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer);
|
||||
relayGetArbitraryDataFileListMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer -> broadcastPeer == peer ||
|
||||
Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost())
|
||||
? null : relayGetArbitraryDataFileListMessage);
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage
|
||||
);
|
||||
|
||||
}
|
||||
else {
|
||||
|
@ -22,8 +22,7 @@ import org.qortal.utils.Triple;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.RELAY_REQUEST_MAX_DURATION;
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.RELAY_REQUEST_MAX_HOPS;
|
||||
import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.*;
|
||||
|
||||
public class ArbitraryMetadataManager {
|
||||
|
||||
@ -339,6 +338,7 @@ public class ArbitraryMetadataManager {
|
||||
if (requestingPeer != null) {
|
||||
|
||||
ArbitraryMetadataMessage forwardArbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, arbitraryMetadataMessage.getArbitraryMetadataFile());
|
||||
forwardArbitraryMetadataMessage.setId(arbitraryMetadataMessage.getId());
|
||||
|
||||
// Forward to requesting peer
|
||||
LOGGER.debug("Forwarding metadata to requesting peer: {}", requestingPeer);
|
||||
@ -434,12 +434,13 @@ public class ArbitraryMetadataManager {
|
||||
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
|
||||
|
||||
Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops);
|
||||
relayGetArbitraryMetadataMessage.setId(message.getId());
|
||||
|
||||
LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
|
||||
Network.getInstance().broadcast(
|
||||
broadcastPeer -> broadcastPeer == peer ||
|
||||
Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost())
|
||||
? null : relayGetArbitraryMetadataMessage);
|
||||
broadcastPeer ->
|
||||
!broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null :
|
||||
broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage);
|
||||
|
||||
}
|
||||
else {
|
||||
|
@ -242,8 +242,8 @@ public class TradeBot implements Listener {
|
||||
if (!(event instanceof Synchronizer.NewChainTipEvent))
|
||||
return;
|
||||
|
||||
// Don't process trade bots or broadcast presence timestamps if our chain is more than 30 minutes old
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L);
|
||||
// Don't process trade bots or broadcast presence timestamps if our chain is more than 60 minutes old
|
||||
final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L);
|
||||
if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp))
|
||||
return;
|
||||
|
||||
@ -292,7 +292,7 @@ public class TradeBot implements Listener {
|
||||
}
|
||||
|
||||
public static byte[] deriveTradeNativePublicKey(byte[] privateKey) {
|
||||
return PrivateKeyAccount.toPublicKey(privateKey);
|
||||
return Crypto.toPublicKey(privateKey);
|
||||
}
|
||||
|
||||
public static byte[] deriveTradeForeignPublicKey(byte[] privateKey) {
|
||||
|
@ -375,7 +375,7 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
|
||||
public Long getWalletBalanceFromTransactions(String key58) throws ForeignBlockchainException {
|
||||
long balance = 0;
|
||||
Comparator<SimpleTransaction> oldestTimestampFirstComparator = Comparator.comparingInt(SimpleTransaction::getTimestamp);
|
||||
Comparator<SimpleTransaction> oldestTimestampFirstComparator = Comparator.comparingLong(SimpleTransaction::getTimestamp);
|
||||
List<SimpleTransaction> transactions = getWalletTransactions(key58).stream().sorted(oldestTimestampFirstComparator).collect(Collectors.toList());
|
||||
for (SimpleTransaction transaction : transactions) {
|
||||
balance += transaction.getTotalAmount();
|
||||
@ -455,7 +455,7 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
// Process new keys
|
||||
} while (true);
|
||||
|
||||
Comparator<SimpleTransaction> newestTimestampFirstComparator = Comparator.comparingInt(SimpleTransaction::getTimestamp).reversed();
|
||||
Comparator<SimpleTransaction> newestTimestampFirstComparator = Comparator.comparingLong(SimpleTransaction::getTimestamp).reversed();
|
||||
|
||||
// Update cache and return
|
||||
transactionsCacheTimestamp = NTP.getTime();
|
||||
@ -537,7 +537,8 @@ public abstract class Bitcoiny implements ForeignBlockchain {
|
||||
// All inputs and outputs relate to this wallet, so the balance should be unaffected
|
||||
amount = 0;
|
||||
}
|
||||
return new SimpleTransaction(t.txHash, t.timestamp, amount, fee, inputs, outputs);
|
||||
long timestampMillis = t.timestamp * 1000L;
|
||||
return new SimpleTransaction(t.txHash, timestampMillis, amount, fee, inputs, outputs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7,7 +7,7 @@ import java.util.List;
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class SimpleTransaction {
|
||||
private String txHash;
|
||||
private Integer timestamp;
|
||||
private Long timestamp;
|
||||
private long totalAmount;
|
||||
private long feeAmount;
|
||||
private List<Input> inputs;
|
||||
@ -74,7 +74,7 @@ public class SimpleTransaction {
|
||||
public SimpleTransaction() {
|
||||
}
|
||||
|
||||
public SimpleTransaction(String txHash, Integer timestamp, long totalAmount, long feeAmount, List<Input> inputs, List<Output> outputs) {
|
||||
public SimpleTransaction(String txHash, Long timestamp, long totalAmount, long feeAmount, List<Input> inputs, List<Output> outputs) {
|
||||
this.txHash = txHash;
|
||||
this.timestamp = timestamp;
|
||||
this.totalAmount = totalAmount;
|
||||
@ -87,7 +87,7 @@ public class SimpleTransaction {
|
||||
return txHash;
|
||||
}
|
||||
|
||||
public Integer getTimestamp() {
|
||||
public Long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
|
@ -1,99 +0,0 @@
|
||||
package org.qortal.crypto;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.bouncycastle.crypto.Digest;
|
||||
import org.bouncycastle.math.ec.rfc7748.X25519;
|
||||
import org.bouncycastle.math.ec.rfc7748.X25519Field;
|
||||
import org.bouncycastle.math.ec.rfc8032.Ed25519;
|
||||
|
||||
/** Additions to BouncyCastle providing Ed25519 to X25519 key conversion. */
|
||||
public class BouncyCastle25519 {
|
||||
|
||||
private static final Class<?> pointAffineClass;
|
||||
private static final Constructor<?> pointAffineCtor;
|
||||
private static final Method decodePointVarMethod;
|
||||
private static final Field yField;
|
||||
|
||||
static {
|
||||
try {
|
||||
Class<?> ed25519Class = Ed25519.class;
|
||||
pointAffineClass = Arrays.stream(ed25519Class.getDeclaredClasses()).filter(clazz -> clazz.getSimpleName().equals("PointAffine")).findFirst().get();
|
||||
if (pointAffineClass == null)
|
||||
throw new ClassNotFoundException("Can't locate PointExt inner class inside Ed25519");
|
||||
|
||||
decodePointVarMethod = ed25519Class.getDeclaredMethod("decodePointVar", byte[].class, int.class, boolean.class, pointAffineClass);
|
||||
decodePointVarMethod.setAccessible(true);
|
||||
|
||||
pointAffineCtor = pointAffineClass.getDeclaredConstructors()[0];
|
||||
pointAffineCtor.setAccessible(true);
|
||||
|
||||
yField = pointAffineClass.getDeclaredField("y");
|
||||
yField.setAccessible(true);
|
||||
} catch (NoSuchMethodException | SecurityException | IllegalArgumentException | NoSuchFieldException | ClassNotFoundException e) {
|
||||
throw new RuntimeException("Can't initialize BouncyCastle25519 shim", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static int[] obtainYFromPublicKey(byte[] ed25519PublicKey) {
|
||||
try {
|
||||
Object pA = pointAffineCtor.newInstance();
|
||||
|
||||
Boolean result = (Boolean) decodePointVarMethod.invoke(null, ed25519PublicKey, 0, true, pA);
|
||||
if (result == null || !result)
|
||||
return null;
|
||||
|
||||
return (int[]) yField.get(pA);
|
||||
} catch (SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
|
||||
throw new RuntimeException("Can't reflect into BouncyCastle", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static byte[] toX25519PublicKey(byte[] ed25519PublicKey) {
|
||||
int[] one = new int[X25519Field.SIZE];
|
||||
X25519Field.one(one);
|
||||
|
||||
int[] y = obtainYFromPublicKey(ed25519PublicKey);
|
||||
|
||||
int[] oneMinusY = new int[X25519Field.SIZE];
|
||||
X25519Field.sub(one, y, oneMinusY);
|
||||
|
||||
int[] onePlusY = new int[X25519Field.SIZE];
|
||||
X25519Field.add(one, y, onePlusY);
|
||||
|
||||
int[] oneMinusYInverted = new int[X25519Field.SIZE];
|
||||
X25519Field.inv(oneMinusY, oneMinusYInverted);
|
||||
|
||||
int[] u = new int[X25519Field.SIZE];
|
||||
X25519Field.mul(onePlusY, oneMinusYInverted, u);
|
||||
|
||||
X25519Field.normalize(u);
|
||||
|
||||
byte[] x25519PublicKey = new byte[X25519.SCALAR_SIZE];
|
||||
X25519Field.encode(u, x25519PublicKey, 0);
|
||||
|
||||
return x25519PublicKey;
|
||||
}
|
||||
|
||||
public static byte[] toX25519PrivateKey(byte[] ed25519PrivateKey) {
|
||||
Digest d = Ed25519.createPrehash();
|
||||
byte[] h = new byte[d.getDigestSize()];
|
||||
|
||||
d.update(ed25519PrivateKey, 0, ed25519PrivateKey.length);
|
||||
d.doFinal(h, 0);
|
||||
|
||||
byte[] s = new byte[X25519.SCALAR_SIZE];
|
||||
|
||||
System.arraycopy(h, 0, s, 0, X25519.SCALAR_SIZE);
|
||||
s[0] &= 0xF8;
|
||||
s[X25519.SCALAR_SIZE - 1] &= 0x7F;
|
||||
s[X25519.SCALAR_SIZE - 1] |= 0x40;
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
}
|
1427
src/main/java/org/qortal/crypto/BouncyCastleEd25519.java
Normal file
1427
src/main/java/org/qortal/crypto/BouncyCastleEd25519.java
Normal file
File diff suppressed because it is too large
Load Diff
@ -253,6 +253,10 @@ public abstract class Crypto {
|
||||
return false;
|
||||
}
|
||||
|
||||
public static byte[] toPublicKey(byte[] privateKey) {
|
||||
return new Ed25519PrivateKeyParameters(privateKey, 0).generatePublicKey().getEncoded();
|
||||
}
|
||||
|
||||
public static boolean verify(byte[] publicKey, byte[] signature, byte[] message) {
|
||||
try {
|
||||
return Ed25519.verify(signature, 0, publicKey, 0, message, 0, message.length);
|
||||
@ -264,16 +268,24 @@ public abstract class Crypto {
|
||||
public static byte[] sign(Ed25519PrivateKeyParameters edPrivateKeyParams, byte[] message) {
|
||||
byte[] signature = new byte[SIGNATURE_LENGTH];
|
||||
|
||||
edPrivateKeyParams.sign(Ed25519.Algorithm.Ed25519, edPrivateKeyParams.generatePublicKey(), null, message, 0, message.length, signature, 0);
|
||||
edPrivateKeyParams.sign(Ed25519.Algorithm.Ed25519,null, message, 0, message.length, signature, 0);
|
||||
|
||||
return signature;
|
||||
}
|
||||
|
||||
public static byte[] sign(byte[] privateKey, byte[] message) {
|
||||
byte[] signature = new byte[SIGNATURE_LENGTH];
|
||||
|
||||
new Ed25519PrivateKeyParameters(privateKey, 0).sign(Ed25519.Algorithm.Ed25519,null, message, 0, message.length, signature, 0);
|
||||
|
||||
return signature;
|
||||
}
|
||||
|
||||
public static byte[] getSharedSecret(byte[] privateKey, byte[] publicKey) {
|
||||
byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(privateKey);
|
||||
byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(privateKey);
|
||||
X25519PrivateKeyParameters xPrivateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0);
|
||||
|
||||
byte[] x25519PublicKey = BouncyCastle25519.toX25519PublicKey(publicKey);
|
||||
byte[] x25519PublicKey = Qortal25519Extras.toX25519PublicKey(publicKey);
|
||||
X25519PublicKeyParameters xPublicKeyParams = new X25519PublicKeyParameters(x25519PublicKey, 0);
|
||||
|
||||
byte[] sharedSecret = new byte[SHARED_SECRET_LENGTH];
|
||||
@ -281,5 +293,4 @@ public abstract class Crypto {
|
||||
|
||||
return sharedSecret;
|
||||
}
|
||||
|
||||
}
|
||||
|
234
src/main/java/org/qortal/crypto/Qortal25519Extras.java
Normal file
234
src/main/java/org/qortal/crypto/Qortal25519Extras.java
Normal file
@ -0,0 +1,234 @@
|
||||
package org.qortal.crypto;
|
||||
|
||||
import org.bouncycastle.crypto.Digest;
|
||||
import org.bouncycastle.crypto.digests.SHA512Digest;
|
||||
import org.bouncycastle.math.ec.rfc7748.X25519;
|
||||
import org.bouncycastle.math.ec.rfc7748.X25519Field;
|
||||
import org.bouncycastle.math.ec.rfc8032.Ed25519;
|
||||
import org.bouncycastle.math.raw.Nat256;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
* Additions to BouncyCastle providing:
|
||||
* <p></p>
|
||||
* <ul>
|
||||
* <li>Ed25519 to X25519 key conversion</li>
|
||||
* <li>Aggregate public keys</li>
|
||||
* <li>Aggregate signatures</li>
|
||||
* </ul>
|
||||
*/
|
||||
public abstract class Qortal25519Extras extends BouncyCastleEd25519 {
|
||||
|
||||
private static final SecureRandom SECURE_RANDOM = new SecureRandom();
|
||||
|
||||
public static byte[] toX25519PublicKey(byte[] ed25519PublicKey) {
|
||||
int[] one = new int[X25519Field.SIZE];
|
||||
X25519Field.one(one);
|
||||
|
||||
PointAffine pA = new PointAffine();
|
||||
if (!decodePointVar(ed25519PublicKey, 0, true, pA))
|
||||
return null;
|
||||
|
||||
int[] y = pA.y;
|
||||
|
||||
int[] oneMinusY = new int[X25519Field.SIZE];
|
||||
X25519Field.sub(one, y, oneMinusY);
|
||||
|
||||
int[] onePlusY = new int[X25519Field.SIZE];
|
||||
X25519Field.add(one, y, onePlusY);
|
||||
|
||||
int[] oneMinusYInverted = new int[X25519Field.SIZE];
|
||||
X25519Field.inv(oneMinusY, oneMinusYInverted);
|
||||
|
||||
int[] u = new int[X25519Field.SIZE];
|
||||
X25519Field.mul(onePlusY, oneMinusYInverted, u);
|
||||
|
||||
X25519Field.normalize(u);
|
||||
|
||||
byte[] x25519PublicKey = new byte[X25519.SCALAR_SIZE];
|
||||
X25519Field.encode(u, x25519PublicKey, 0);
|
||||
|
||||
return x25519PublicKey;
|
||||
}
|
||||
|
||||
public static byte[] toX25519PrivateKey(byte[] ed25519PrivateKey) {
|
||||
Digest d = Ed25519.createPrehash();
|
||||
byte[] h = new byte[d.getDigestSize()];
|
||||
|
||||
d.update(ed25519PrivateKey, 0, ed25519PrivateKey.length);
|
||||
d.doFinal(h, 0);
|
||||
|
||||
byte[] s = new byte[X25519.SCALAR_SIZE];
|
||||
|
||||
System.arraycopy(h, 0, s, 0, X25519.SCALAR_SIZE);
|
||||
s[0] &= 0xF8;
|
||||
s[X25519.SCALAR_SIZE - 1] &= 0x7F;
|
||||
s[X25519.SCALAR_SIZE - 1] |= 0x40;
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
// Mostly for test support
|
||||
public static PointAccum newPointAccum() {
|
||||
return new PointAccum();
|
||||
}
|
||||
|
||||
public static byte[] aggregatePublicKeys(Collection<byte[]> publicKeys) {
|
||||
PointAccum rAccum = null;
|
||||
|
||||
for (byte[] publicKey : publicKeys) {
|
||||
PointAffine pA = new PointAffine();
|
||||
if (!decodePointVar(publicKey, 0, false, pA))
|
||||
// Failed to decode
|
||||
return null;
|
||||
|
||||
if (rAccum == null) {
|
||||
rAccum = new PointAccum();
|
||||
pointCopy(pA, rAccum);
|
||||
} else {
|
||||
pointAdd(pointCopy(pA), rAccum);
|
||||
}
|
||||
}
|
||||
|
||||
byte[] publicKey = new byte[SCALAR_BYTES];
|
||||
if (0 == encodePoint(rAccum, publicKey, 0))
|
||||
// Failed to encode
|
||||
return null;
|
||||
|
||||
return publicKey;
|
||||
}
|
||||
|
||||
public static byte[] aggregateSignatures(Collection<byte[]> signatures) {
|
||||
// Signatures are (R, s)
|
||||
// R is a point
|
||||
// s is a scalar
|
||||
PointAccum rAccum = null;
|
||||
int[] sAccum = new int[SCALAR_INTS];
|
||||
|
||||
byte[] rEncoded = new byte[POINT_BYTES];
|
||||
int[] sPart = new int[SCALAR_INTS];
|
||||
for (byte[] signature : signatures) {
|
||||
System.arraycopy(signature,0, rEncoded, 0, rEncoded.length);
|
||||
|
||||
PointAffine pA = new PointAffine();
|
||||
if (!decodePointVar(rEncoded, 0, false, pA))
|
||||
// Failed to decode
|
||||
return null;
|
||||
|
||||
if (rAccum == null) {
|
||||
rAccum = new PointAccum();
|
||||
pointCopy(pA, rAccum);
|
||||
|
||||
decode32(signature, rEncoded.length, sAccum, 0, SCALAR_INTS);
|
||||
} else {
|
||||
pointAdd(pointCopy(pA), rAccum);
|
||||
|
||||
decode32(signature, rEncoded.length, sPart, 0, SCALAR_INTS);
|
||||
Nat256.addTo(sPart, sAccum);
|
||||
|
||||
// "mod L" on sAccum
|
||||
if (Nat256.gte(sAccum, L))
|
||||
Nat256.subFrom(L, sAccum);
|
||||
}
|
||||
}
|
||||
|
||||
byte[] signature = new byte[SIGNATURE_SIZE];
|
||||
if (0 == encodePoint(rAccum, signature, 0))
|
||||
// Failed to encode
|
||||
return null;
|
||||
|
||||
for (int i = 0; i < sAccum.length; ++i) {
|
||||
encode32(sAccum[i], signature, POINT_BYTES + i * 4);
|
||||
}
|
||||
|
||||
return signature;
|
||||
}
|
||||
|
||||
public static byte[] signForAggregation(byte[] privateKey, byte[] message) {
|
||||
// Very similar to BouncyCastle's implementation except we use secure random nonce and different hash
|
||||
Digest d = new SHA512Digest();
|
||||
byte[] h = new byte[d.getDigestSize()];
|
||||
|
||||
d.reset();
|
||||
d.update(privateKey, 0, privateKey.length);
|
||||
d.doFinal(h, 0);
|
||||
|
||||
byte[] sH = new byte[SCALAR_BYTES];
|
||||
pruneScalar(h, 0, sH);
|
||||
|
||||
byte[] publicKey = new byte[SCALAR_BYTES];
|
||||
scalarMultBaseEncoded(sH, publicKey, 0);
|
||||
|
||||
byte[] rSeed = new byte[d.getDigestSize()];
|
||||
SECURE_RANDOM.nextBytes(rSeed);
|
||||
|
||||
byte[] r = new byte[SCALAR_BYTES];
|
||||
pruneScalar(rSeed, 0, r);
|
||||
|
||||
byte[] R = new byte[POINT_BYTES];
|
||||
scalarMultBaseEncoded(r, R, 0);
|
||||
|
||||
d.reset();
|
||||
d.update(message, 0, message.length);
|
||||
d.doFinal(h, 0);
|
||||
byte[] k = reduceScalar(h);
|
||||
|
||||
byte[] s = calculateS(r, k, sH);
|
||||
|
||||
byte[] signature = new byte[SIGNATURE_SIZE];
|
||||
System.arraycopy(R, 0, signature, 0, POINT_BYTES);
|
||||
System.arraycopy(s, 0, signature, POINT_BYTES, SCALAR_BYTES);
|
||||
|
||||
return signature;
|
||||
}
|
||||
|
||||
public static boolean verifyAggregated(byte[] publicKey, byte[] signature, byte[] message) {
|
||||
byte[] R = Arrays.copyOfRange(signature, 0, POINT_BYTES);
|
||||
|
||||
byte[] s = Arrays.copyOfRange(signature, POINT_BYTES, POINT_BYTES + SCALAR_BYTES);
|
||||
|
||||
if (!checkPointVar(R))
|
||||
// R out of bounds
|
||||
return false;
|
||||
|
||||
if (!checkScalarVar(s))
|
||||
// s out of bounds
|
||||
return false;
|
||||
|
||||
byte[] S = new byte[POINT_BYTES];
|
||||
scalarMultBaseEncoded(s, S, 0);
|
||||
|
||||
PointAffine pA = new PointAffine();
|
||||
if (!decodePointVar(publicKey, 0, true, pA))
|
||||
// Failed to decode
|
||||
return false;
|
||||
|
||||
Digest d = new SHA512Digest();
|
||||
byte[] h = new byte[d.getDigestSize()];
|
||||
|
||||
d.update(message, 0, message.length);
|
||||
d.doFinal(h, 0);
|
||||
|
||||
byte[] k = reduceScalar(h);
|
||||
|
||||
int[] nS = new int[SCALAR_INTS];
|
||||
decodeScalar(s, 0, nS);
|
||||
|
||||
int[] nA = new int[SCALAR_INTS];
|
||||
decodeScalar(k, 0, nA);
|
||||
|
||||
/*PointAccum*/
|
||||
PointAccum pR = new PointAccum();
|
||||
scalarMultStrausVar(nS, nA, pA, pR);
|
||||
|
||||
byte[] check = new byte[POINT_BYTES];
|
||||
if (0 == encodePoint(pR, check, 0))
|
||||
// Failed to encode
|
||||
return false;
|
||||
|
||||
return Arrays.equals(check, R);
|
||||
}
|
||||
}
|
@ -5,6 +5,7 @@ import java.util.Arrays;
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.qortal.account.PublicKeyAccount;
|
||||
|
||||
@ -16,6 +17,9 @@ public class OnlineAccountData {
|
||||
protected byte[] signature;
|
||||
protected byte[] publicKey;
|
||||
|
||||
@XmlTransient
|
||||
private int hash;
|
||||
|
||||
// Constructors
|
||||
|
||||
// necessary for JAXB serialization
|
||||
@ -62,20 +66,23 @@ public class OnlineAccountData {
|
||||
if (otherOnlineAccountData.timestamp != this.timestamp)
|
||||
return false;
|
||||
|
||||
// Signature more likely to be unique than public key
|
||||
if (!Arrays.equals(otherOnlineAccountData.signature, this.signature))
|
||||
return false;
|
||||
|
||||
if (!Arrays.equals(otherOnlineAccountData.publicKey, this.publicKey))
|
||||
return false;
|
||||
|
||||
// We don't compare signature because it's not our remit to verify and newer aggregate signatures use random nonces
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Pretty lazy implementation
|
||||
return (int) this.timestamp;
|
||||
int h = this.hash;
|
||||
if (h == 0) {
|
||||
this.hash = h = Long.hashCode(this.timestamp)
|
||||
^ Arrays.hashCode(this.publicKey);
|
||||
// We don't use signature because newer aggregate signatures use random nonces
|
||||
}
|
||||
return h;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -469,6 +469,8 @@ public class Network {
|
||||
|
||||
class NetworkProcessor extends ExecuteProduceConsume {
|
||||
|
||||
private final Logger LOGGER = LogManager.getLogger(NetworkProcessor.class);
|
||||
|
||||
private final AtomicLong nextConnectTaskTimestamp = new AtomicLong(0L); // ms - try first connect once NTP syncs
|
||||
private final AtomicLong nextBroadcastTimestamp = new AtomicLong(0L); // ms - try first broadcast once NTP syncs
|
||||
|
||||
@ -1373,17 +1375,26 @@ public class Network {
|
||||
// We attempted to connect within the last day
|
||||
// but we last managed to connect over a week ago.
|
||||
Predicate<PeerData> isNotOldPeer = peerData -> {
|
||||
if (peerData.getLastAttempted() == null
|
||||
|| peerData.getLastAttempted() < now - OLD_PEER_ATTEMPTED_PERIOD) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (peerData.getLastConnected() == null
|
||||
|| peerData.getLastConnected() > now - OLD_PEER_CONNECTION_PERIOD) {
|
||||
|
||||
// First check if there was a connection attempt within the last day
|
||||
if (peerData.getLastAttempted() != null
|
||||
&& peerData.getLastAttempted() > now - OLD_PEER_ATTEMPTED_PERIOD) {
|
||||
|
||||
// There was, so now check if we had a successful connection in the last 7 days
|
||||
if (peerData.getLastConnected() != null
|
||||
&& peerData.getLastConnected() > now - OLD_PEER_CONNECTION_PERIOD) {
|
||||
|
||||
// We did, so this is NOT an 'old' peer
|
||||
return true;
|
||||
}
|
||||
|
||||
// Last successful connection was more than 1 week ago - this is an 'old' peer
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
// Best to wait until we have a connection attempt - assume not an 'old' peer until then
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// Disregard peers that are NOT 'old'
|
||||
|
@ -9,6 +9,7 @@ import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformation;
|
||||
import org.qortal.transform.block.BlockTransformer;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
@ -46,12 +47,12 @@ public class BlockMessage extends Message {
|
||||
try {
|
||||
int height = byteBuffer.getInt();
|
||||
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = BlockTransformer.fromByteBuffer(byteBuffer);
|
||||
BlockTransformation blockTransformation = BlockTransformer.fromByteBuffer(byteBuffer);
|
||||
|
||||
BlockData blockData = blockInfo.getA();
|
||||
BlockData blockData = blockTransformation.getBlockData();
|
||||
blockData.setHeight(height);
|
||||
|
||||
return new BlockMessage(id, blockData, blockInfo.getB(), blockInfo.getC());
|
||||
return new BlockMessage(id, blockData, blockTransformation.getTransactions(), blockTransformation.getAtStates());
|
||||
} catch (TransformationException e) {
|
||||
LOGGER.info(String.format("Received garbled BLOCK message: %s", e.getMessage()));
|
||||
throw new MessageException(e.getMessage(), e);
|
||||
|
87
src/main/java/org/qortal/network/message/BlockV2Message.java
Normal file
87
src/main/java/org/qortal/network/message/BlockV2Message.java
Normal file
@ -0,0 +1,87 @@
|
||||
package org.qortal.network.message;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformation;
|
||||
import org.qortal.transform.block.BlockTransformer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
public class BlockV2Message extends Message {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(BlockV2Message.class);
|
||||
public static final long MIN_PEER_VERSION = 0x300030003L; // 3.3.3
|
||||
|
||||
private BlockData blockData;
|
||||
private List<TransactionData> transactions;
|
||||
private byte[] atStatesHash;
|
||||
|
||||
public BlockV2Message(Block block) throws TransformationException {
|
||||
super(MessageType.BLOCK_V2);
|
||||
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
|
||||
try {
|
||||
bytes.write(Ints.toByteArray(block.getBlockData().getHeight()));
|
||||
|
||||
bytes.write(BlockTransformer.toBytesV2(block));
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream");
|
||||
}
|
||||
|
||||
this.dataBytes = bytes.toByteArray();
|
||||
this.checksumBytes = Message.generateChecksum(this.dataBytes);
|
||||
}
|
||||
|
||||
public BlockV2Message(byte[] cachedBytes) {
|
||||
super(MessageType.BLOCK_V2);
|
||||
|
||||
this.dataBytes = cachedBytes;
|
||||
this.checksumBytes = Message.generateChecksum(this.dataBytes);
|
||||
}
|
||||
|
||||
private BlockV2Message(int id, BlockData blockData, List<TransactionData> transactions, byte[] atStatesHash) {
|
||||
super(id, MessageType.BLOCK_V2);
|
||||
|
||||
this.blockData = blockData;
|
||||
this.transactions = transactions;
|
||||
this.atStatesHash = atStatesHash;
|
||||
}
|
||||
|
||||
public BlockData getBlockData() {
|
||||
return this.blockData;
|
||||
}
|
||||
|
||||
public List<TransactionData> getTransactions() {
|
||||
return this.transactions;
|
||||
}
|
||||
|
||||
public byte[] getAtStatesHash() {
|
||||
return this.atStatesHash;
|
||||
}
|
||||
|
||||
public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) throws MessageException {
|
||||
try {
|
||||
int height = byteBuffer.getInt();
|
||||
|
||||
BlockTransformation blockTransformation = BlockTransformer.fromByteBufferV2(byteBuffer);
|
||||
|
||||
BlockData blockData = blockTransformation.getBlockData();
|
||||
blockData.setHeight(height);
|
||||
|
||||
return new BlockV2Message(id, blockData, blockTransformation.getTransactions(), blockTransformation.getAtStatesHash());
|
||||
} catch (TransformationException e) {
|
||||
LOGGER.info(String.format("Received garbled BLOCK_V2 message: %s", e.getMessage()));
|
||||
throw new MessageException(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,112 @@
|
||||
package org.qortal.network.message;
|
||||
|
||||
import com.google.common.primitives.Longs;
|
||||
import org.qortal.transform.Transformer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* For requesting online accounts info from remote peer, given our list of online accounts.
|
||||
* <p></p>
|
||||
* Different format to V1 and V2:<br>
|
||||
* <ul>
|
||||
* <li>V1 is: number of entries, then timestamp + pubkey for each entry</li>
|
||||
* <li>V2 is: groups of: number of entries, timestamp, then pubkey for each entry</li>
|
||||
* <li>V3 is: groups of: timestamp, number of entries (one per leading byte), then hash(pubkeys) for each entry</li>
|
||||
* </ul>
|
||||
* <p></p>
|
||||
* End
|
||||
*/
|
||||
public class GetOnlineAccountsV3Message extends Message {
|
||||
|
||||
private static final Map<Long, Map<Byte, byte[]>> EMPTY_ONLINE_ACCOUNTS = Collections.emptyMap();
|
||||
private Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByte;
|
||||
|
||||
public GetOnlineAccountsV3Message(Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByte) {
|
||||
super(MessageType.GET_ONLINE_ACCOUNTS_V3);
|
||||
|
||||
// If we don't have ANY online accounts then it's an easier construction...
|
||||
if (hashesByTimestampThenByte.isEmpty()) {
|
||||
this.dataBytes = EMPTY_DATA_BYTES;
|
||||
return;
|
||||
}
|
||||
|
||||
// We should know exactly how many bytes to allocate now
|
||||
int byteSize = hashesByTimestampThenByte.size() * (Transformer.TIMESTAMP_LENGTH + Transformer.BYTE_LENGTH);
|
||||
|
||||
byteSize += hashesByTimestampThenByte.values()
|
||||
.stream()
|
||||
.mapToInt(map -> map.size() * Transformer.PUBLIC_KEY_LENGTH)
|
||||
.sum();
|
||||
|
||||
ByteArrayOutputStream bytes = new ByteArrayOutputStream(byteSize);
|
||||
|
||||
// Warning: no double-checking/fetching! We must be ConcurrentMap compatible.
|
||||
// So no contains() then get() or multiple get()s on the same key/map.
|
||||
try {
|
||||
for (var outerMapEntry : hashesByTimestampThenByte.entrySet()) {
|
||||
bytes.write(Longs.toByteArray(outerMapEntry.getKey()));
|
||||
|
||||
var innerMap = outerMapEntry.getValue();
|
||||
|
||||
// Number of entries: 1 - 256, where 256 is represented by 0
|
||||
bytes.write(innerMap.size() & 0xFF);
|
||||
|
||||
for (byte[] hashBytes : innerMap.values()) {
|
||||
bytes.write(hashBytes);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream");
|
||||
}
|
||||
|
||||
this.dataBytes = bytes.toByteArray();
|
||||
this.checksumBytes = Message.generateChecksum(this.dataBytes);
|
||||
}
|
||||
|
||||
private GetOnlineAccountsV3Message(int id, Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByte) {
|
||||
super(id, MessageType.GET_ONLINE_ACCOUNTS_V3);
|
||||
|
||||
this.hashesByTimestampThenByte = hashesByTimestampThenByte;
|
||||
}
|
||||
|
||||
public Map<Long, Map<Byte, byte[]>> getHashesByTimestampThenByte() {
|
||||
return this.hashesByTimestampThenByte;
|
||||
}
|
||||
|
||||
public static Message fromByteBuffer(int id, ByteBuffer bytes) {
|
||||
// 'empty' case
|
||||
if (!bytes.hasRemaining()) {
|
||||
return new GetOnlineAccountsV3Message(id, EMPTY_ONLINE_ACCOUNTS);
|
||||
}
|
||||
|
||||
Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByte = new HashMap<>();
|
||||
|
||||
while (bytes.hasRemaining()) {
|
||||
long timestamp = bytes.getLong();
|
||||
|
||||
int hashCount = bytes.get();
|
||||
if (hashCount <= 0)
|
||||
// 256 is represented by 0.
|
||||
// Also converts negative signed value (e.g. -1) to proper positive unsigned value (255)
|
||||
hashCount += 256;
|
||||
|
||||
Map<Byte, byte[]> hashesByByte = new HashMap<>();
|
||||
|
||||
for (int i = 0; i < hashCount; ++i) {
|
||||
byte[] publicKeyHash = new byte[Transformer.PUBLIC_KEY_LENGTH];
|
||||
bytes.get(publicKeyHash);
|
||||
|
||||
hashesByByte.put(publicKeyHash[0], publicKeyHash);
|
||||
}
|
||||
|
||||
hashesByTimestampThenByte.put(timestamp, hashesByByte);
|
||||
}
|
||||
|
||||
return new GetOnlineAccountsV3Message(id, hashesByTimestampThenByte);
|
||||
}
|
||||
|
||||
}
|
@ -46,6 +46,7 @@ public abstract class Message {
|
||||
private static final int MAX_DATA_SIZE = 10 * 1024 * 1024; // 10MB
|
||||
|
||||
protected static final byte[] EMPTY_DATA_BYTES = new byte[0];
|
||||
private static final ByteBuffer EMPTY_READ_ONLY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_DATA_BYTES).asReadOnlyBuffer();
|
||||
|
||||
protected int id;
|
||||
protected final MessageType type;
|
||||
@ -126,7 +127,7 @@ public abstract class Message {
|
||||
if (dataSize > 0 && dataSize + CHECKSUM_LENGTH > readOnlyBuffer.remaining())
|
||||
return null;
|
||||
|
||||
ByteBuffer dataSlice = null;
|
||||
ByteBuffer dataSlice = EMPTY_READ_ONLY_BYTE_BUFFER;
|
||||
if (dataSize > 0) {
|
||||
byte[] expectedChecksum = new byte[CHECKSUM_LENGTH];
|
||||
readOnlyBuffer.get(expectedChecksum);
|
||||
|
@ -34,6 +34,7 @@ public enum MessageType {
|
||||
|
||||
BLOCK(50, BlockMessage::fromByteBuffer),
|
||||
GET_BLOCK(51, GetBlockMessage::fromByteBuffer),
|
||||
BLOCK_V2(52, BlockV2Message::fromByteBuffer),
|
||||
|
||||
SIGNATURES(60, SignaturesMessage::fromByteBuffer),
|
||||
GET_SIGNATURES_V2(61, GetSignaturesV2Message::fromByteBuffer),
|
||||
@ -45,6 +46,8 @@ public enum MessageType {
|
||||
GET_ONLINE_ACCOUNTS(81, GetOnlineAccountsMessage::fromByteBuffer),
|
||||
ONLINE_ACCOUNTS_V2(82, OnlineAccountsV2Message::fromByteBuffer),
|
||||
GET_ONLINE_ACCOUNTS_V2(83, GetOnlineAccountsV2Message::fromByteBuffer),
|
||||
// ONLINE_ACCOUNTS_V3(84, OnlineAccountsV3Message::fromByteBuffer),
|
||||
GET_ONLINE_ACCOUNTS_V3(85, GetOnlineAccountsV3Message::fromByteBuffer),
|
||||
|
||||
ARBITRARY_DATA(90, ArbitraryDataMessage::fromByteBuffer),
|
||||
GET_ARBITRARY_DATA(91, GetArbitraryDataMessage::fromByteBuffer),
|
||||
|
@ -9,6 +9,7 @@ import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformation;
|
||||
import org.qortal.transform.block.BlockTransformer;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
@ -66,7 +67,7 @@ public class BlockArchiveReader {
|
||||
this.fileListCache = Map.copyOf(map);
|
||||
}
|
||||
|
||||
public Triple<BlockData, List<TransactionData>, List<ATStateData>> fetchBlockAtHeight(int height) {
|
||||
public BlockTransformation fetchBlockAtHeight(int height) {
|
||||
if (this.fileListCache == null) {
|
||||
this.fetchFileList();
|
||||
}
|
||||
@ -77,13 +78,13 @@ public class BlockArchiveReader {
|
||||
}
|
||||
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(serializedBytes);
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = null;
|
||||
BlockTransformation blockInfo = null;
|
||||
try {
|
||||
blockInfo = BlockTransformer.fromByteBuffer(byteBuffer);
|
||||
if (blockInfo != null && blockInfo.getA() != null) {
|
||||
if (blockInfo != null && blockInfo.getBlockData() != null) {
|
||||
// Block height is stored outside of the main serialized bytes, so it
|
||||
// won't be set automatically.
|
||||
blockInfo.getA().setHeight(height);
|
||||
blockInfo.getBlockData().setHeight(height);
|
||||
}
|
||||
} catch (TransformationException e) {
|
||||
return null;
|
||||
@ -91,8 +92,7 @@ public class BlockArchiveReader {
|
||||
return blockInfo;
|
||||
}
|
||||
|
||||
public Triple<BlockData, List<TransactionData>, List<ATStateData>> fetchBlockWithSignature(
|
||||
byte[] signature, Repository repository) {
|
||||
public BlockTransformation fetchBlockWithSignature(byte[] signature, Repository repository) {
|
||||
|
||||
if (this.fileListCache == null) {
|
||||
this.fetchFileList();
|
||||
@ -105,13 +105,12 @@ public class BlockArchiveReader {
|
||||
return null;
|
||||
}
|
||||
|
||||
public List<Triple<BlockData, List<TransactionData>, List<ATStateData>>> fetchBlocksFromRange(
|
||||
int startHeight, int endHeight) {
|
||||
public List<BlockTransformation> fetchBlocksFromRange(int startHeight, int endHeight) {
|
||||
|
||||
List<Triple<BlockData, List<TransactionData>, List<ATStateData>>> blockInfoList = new ArrayList<>();
|
||||
List<BlockTransformation> blockInfoList = new ArrayList<>();
|
||||
|
||||
for (int height = startHeight; height <= endHeight; height++) {
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = this.fetchBlockAtHeight(height);
|
||||
BlockTransformation blockInfo = this.fetchBlockAtHeight(height);
|
||||
if (blockInfo == null) {
|
||||
return blockInfoList;
|
||||
}
|
||||
|
@ -1,16 +1,13 @@
|
||||
package org.qortal.repository.hsqldb;
|
||||
|
||||
import org.qortal.api.ApiError;
|
||||
import org.qortal.api.ApiExceptionFactory;
|
||||
import org.qortal.api.model.BlockSignerSummary;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.data.block.BlockArchiveData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.block.BlockSummaryData;
|
||||
import org.qortal.repository.BlockArchiveReader;
|
||||
import org.qortal.repository.BlockArchiveRepository;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.utils.Triple;
|
||||
import org.qortal.transform.block.BlockTransformation;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
@ -29,11 +26,11 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository {
|
||||
|
||||
@Override
|
||||
public BlockData fromSignature(byte[] signature) throws DataException {
|
||||
Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockWithSignature(signature, this.repository);
|
||||
if (blockInfo != null) {
|
||||
return (BlockData) blockInfo.getA();
|
||||
}
|
||||
BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockWithSignature(signature, this.repository);
|
||||
if (blockInfo == null)
|
||||
return null;
|
||||
|
||||
return blockInfo.getBlockData();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -47,11 +44,11 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository {
|
||||
|
||||
@Override
|
||||
public BlockData fromHeight(int height) throws DataException {
|
||||
Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height);
|
||||
if (blockInfo != null) {
|
||||
return (BlockData) blockInfo.getA();
|
||||
}
|
||||
BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height);
|
||||
if (blockInfo == null)
|
||||
return null;
|
||||
|
||||
return blockInfo.getBlockData();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -79,9 +76,9 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository {
|
||||
int height = referenceBlock.getHeight();
|
||||
if (height > 0) {
|
||||
// Request the block at height + 1
|
||||
Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height + 1);
|
||||
BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height + 1);
|
||||
if (blockInfo != null) {
|
||||
return (BlockData) blockInfo.getA();
|
||||
return blockInfo.getBlockData();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -964,6 +964,11 @@ public class HSQLDBDatabaseUpdates {
|
||||
stmt.execute("DROP TABLE ArbitraryPeers");
|
||||
break;
|
||||
|
||||
case 42:
|
||||
// We need more space for online accounts
|
||||
stmt.execute("ALTER TABLE Blocks ALTER COLUMN online_accounts SET DATA TYPE VARBINARY(10240)");
|
||||
break;
|
||||
|
||||
default:
|
||||
// nothing to do
|
||||
return false;
|
||||
|
@ -23,7 +23,6 @@ import java.util.stream.Stream;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.globalization.Translator;
|
||||
import org.qortal.gui.SysTray;
|
||||
@ -1003,7 +1002,7 @@ public class HSQLDBRepository implements Repository {
|
||||
if (privateKey == null)
|
||||
return null;
|
||||
|
||||
return PrivateKeyAccount.toPublicKey(privateKey);
|
||||
return Crypto.toPublicKey(privateKey);
|
||||
}
|
||||
|
||||
public static String ed25519PublicKeyToAddress(byte[] publicKey) {
|
||||
|
@ -203,7 +203,7 @@ public class Settings {
|
||||
private int maxRetries = 2;
|
||||
|
||||
/** Minimum peer version number required in order to sync with them */
|
||||
private String minPeerVersion = "3.1.0";
|
||||
private String minPeerVersion = "3.3.7";
|
||||
/** Whether to allow connections with peers below minPeerVersion
|
||||
* If true, we won't sync with them but they can still sync with us, and will show in the peers list
|
||||
* If false, sync will be blocked both ways, and they will not appear in the peers list */
|
||||
|
@ -6,6 +6,7 @@ import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataManager;
|
||||
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
|
||||
import org.qortal.crypto.Crypto;
|
||||
@ -19,6 +20,7 @@ import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.arbitrary.ArbitraryDataFile;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.Transformer;
|
||||
import org.qortal.transform.transaction.ArbitraryTransactionTransformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.ArbitraryTransactionUtils;
|
||||
@ -86,6 +88,14 @@ public class ArbitraryTransaction extends Transaction {
|
||||
@Override
|
||||
public boolean hasValidReference() throws DataException {
|
||||
// We shouldn't really get this far, but just in case:
|
||||
|
||||
// Disable reference checking after feature trigger timestamp
|
||||
if (this.arbitraryTransactionData.getTimestamp() >= BlockChain.getInstance().getDisableReferenceTimestamp()) {
|
||||
// Allow any value as long as it is the correct length
|
||||
return this.arbitraryTransactionData.getReference() != null &&
|
||||
this.arbitraryTransactionData.getReference().length == Transformer.SIGNATURE_LENGTH;
|
||||
}
|
||||
|
||||
if (this.arbitraryTransactionData.getReference() == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import java.util.List;
|
||||
|
||||
import org.qortal.account.Account;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.asset.AssetData;
|
||||
import org.qortal.data.transaction.ATTransactionData;
|
||||
@ -12,6 +13,7 @@ import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.Transformer;
|
||||
import org.qortal.transform.transaction.AtTransactionTransformer;
|
||||
import org.qortal.utils.Amounts;
|
||||
|
||||
@ -75,6 +77,13 @@ public class AtTransaction extends Transaction {
|
||||
|
||||
@Override
|
||||
public boolean hasValidReference() throws DataException {
|
||||
// Disable reference checking after feature trigger timestamp
|
||||
if (this.atTransactionData.getTimestamp() >= BlockChain.getInstance().getDisableReferenceTimestamp()) {
|
||||
// Allow any value as long as it is the correct length
|
||||
return this.atTransactionData.getReference() != null &&
|
||||
this.atTransactionData.getReference().length == Transformer.SIGNATURE_LENGTH;
|
||||
}
|
||||
|
||||
// Check reference is correct, using AT account, not transaction creator which is null account
|
||||
Account atAccount = getATAccount();
|
||||
return Arrays.equals(atAccount.getLastReference(), atTransactionData.getReference());
|
||||
|
@ -8,6 +8,7 @@ import org.qortal.account.Account;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.account.PublicKeyAccount;
|
||||
import org.qortal.asset.Asset;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.crypto.MemoryPoW;
|
||||
import org.qortal.data.PaymentData;
|
||||
@ -20,6 +21,7 @@ import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.GroupRepository;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.Transformer;
|
||||
import org.qortal.transform.transaction.ChatTransactionTransformer;
|
||||
import org.qortal.transform.transaction.MessageTransactionTransformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
@ -163,6 +165,14 @@ public class MessageTransaction extends Transaction {
|
||||
@Override
|
||||
public boolean hasValidReference() throws DataException {
|
||||
// We shouldn't really get this far, but just in case:
|
||||
|
||||
// Disable reference checking after feature trigger timestamp
|
||||
if (this.messageTransactionData.getTimestamp() >= BlockChain.getInstance().getDisableReferenceTimestamp()) {
|
||||
// Allow any value as long as it is the correct length
|
||||
return this.messageTransactionData.getReference() != null &&
|
||||
this.messageTransactionData.getReference().length == Transformer.SIGNATURE_LENGTH;
|
||||
}
|
||||
|
||||
if (this.messageTransactionData.getReference() == null)
|
||||
return false;
|
||||
|
||||
|
@ -31,6 +31,7 @@ import org.qortal.repository.GroupRepository;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.settings.Settings;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.Transformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
@ -905,6 +906,13 @@ public abstract class Transaction {
|
||||
* @throws DataException
|
||||
*/
|
||||
public boolean hasValidReference() throws DataException {
|
||||
// Disable reference checking after feature trigger timestamp
|
||||
if (this.transactionData.getTimestamp() >= BlockChain.getInstance().getDisableReferenceTimestamp()) {
|
||||
// Allow any value as long as it is the correct length
|
||||
return this.transactionData.getReference() != null &&
|
||||
this.transactionData.getReference().length == Transformer.SIGNATURE_LENGTH;
|
||||
}
|
||||
|
||||
Account creator = getCreator();
|
||||
|
||||
return Arrays.equals(transactionData.getReference(), creator.getLastReference());
|
||||
|
@ -0,0 +1,44 @@
|
||||
package org.qortal.transform.block;
|
||||
|
||||
import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class BlockTransformation {
|
||||
private final BlockData blockData;
|
||||
private final List<TransactionData> transactions;
|
||||
private final List<ATStateData> atStates;
|
||||
private final byte[] atStatesHash;
|
||||
|
||||
/*package*/ BlockTransformation(BlockData blockData, List<TransactionData> transactions, List<ATStateData> atStates) {
|
||||
this.blockData = blockData;
|
||||
this.transactions = transactions;
|
||||
this.atStates = atStates;
|
||||
this.atStatesHash = null;
|
||||
}
|
||||
|
||||
/*package*/ BlockTransformation(BlockData blockData, List<TransactionData> transactions, byte[] atStatesHash) {
|
||||
this.blockData = blockData;
|
||||
this.transactions = transactions;
|
||||
this.atStates = null;
|
||||
this.atStatesHash = atStatesHash;
|
||||
}
|
||||
|
||||
public BlockData getBlockData() {
|
||||
return blockData;
|
||||
}
|
||||
|
||||
public List<TransactionData> getTransactions() {
|
||||
return transactions;
|
||||
}
|
||||
|
||||
public List<ATStateData> getAtStates() {
|
||||
return atStates;
|
||||
}
|
||||
|
||||
public byte[] getAtStatesHash() {
|
||||
return atStatesHash;
|
||||
}
|
||||
}
|
@ -3,12 +3,14 @@ package org.qortal.transform.block;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.at.ATStateData;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
@ -20,7 +22,6 @@ import org.qortal.transform.Transformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
import org.qortal.utils.Serialization;
|
||||
import org.qortal.utils.Triple;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
@ -45,14 +46,13 @@ public class BlockTransformer extends Transformer {
|
||||
|
||||
protected static final int AT_BYTES_LENGTH = INT_LENGTH;
|
||||
protected static final int AT_FEES_LENGTH = AMOUNT_LENGTH;
|
||||
protected static final int AT_LENGTH = AT_FEES_LENGTH + AT_BYTES_LENGTH;
|
||||
|
||||
protected static final int ONLINE_ACCOUNTS_COUNT_LENGTH = INT_LENGTH;
|
||||
protected static final int ONLINE_ACCOUNTS_SIZE_LENGTH = INT_LENGTH;
|
||||
protected static final int ONLINE_ACCOUNTS_TIMESTAMP_LENGTH = TIMESTAMP_LENGTH;
|
||||
protected static final int ONLINE_ACCOUNTS_SIGNATURES_COUNT_LENGTH = INT_LENGTH;
|
||||
|
||||
protected static final int AT_ENTRY_LENGTH = ADDRESS_LENGTH + SHA256_LENGTH + AMOUNT_LENGTH;
|
||||
public static final int AT_ENTRY_LENGTH = ADDRESS_LENGTH + SHA256_LENGTH + AMOUNT_LENGTH;
|
||||
|
||||
/**
|
||||
* Extract block data and transaction data from serialized bytes.
|
||||
@ -61,7 +61,7 @@ public class BlockTransformer extends Transformer {
|
||||
* @return BlockData and a List of transactions.
|
||||
* @throws TransformationException
|
||||
*/
|
||||
public static Triple<BlockData, List<TransactionData>, List<ATStateData>> fromBytes(byte[] bytes) throws TransformationException {
|
||||
public static BlockTransformation fromBytes(byte[] bytes) throws TransformationException {
|
||||
if (bytes == null)
|
||||
return null;
|
||||
|
||||
@ -76,28 +76,40 @@ public class BlockTransformer extends Transformer {
|
||||
/**
|
||||
* Extract block data and transaction data from serialized bytes containing a single block.
|
||||
*
|
||||
* @param bytes
|
||||
* @param byteBuffer source of serialized block bytes
|
||||
* @return BlockData and a List of transactions.
|
||||
* @throws TransformationException
|
||||
*/
|
||||
public static Triple<BlockData, List<TransactionData>, List<ATStateData>> fromByteBuffer(ByteBuffer byteBuffer) throws TransformationException {
|
||||
public static BlockTransformation fromByteBuffer(ByteBuffer byteBuffer) throws TransformationException {
|
||||
return BlockTransformer.fromByteBuffer(byteBuffer, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract block data and transaction data from serialized bytes containing a single block.
|
||||
*
|
||||
* @param byteBuffer source of serialized block bytes
|
||||
* @return BlockData and a List of transactions.
|
||||
* @throws TransformationException
|
||||
*/
|
||||
public static BlockTransformation fromByteBufferV2(ByteBuffer byteBuffer) throws TransformationException {
|
||||
return BlockTransformer.fromByteBuffer(byteBuffer, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract block data and transaction data from serialized bytes containing one or more blocks.
|
||||
* Extract block data and transaction data from serialized bytes containing a single block, in one of two forms.
|
||||
*
|
||||
* @param bytes
|
||||
* @param byteBuffer source of serialized block bytes
|
||||
* @param isV2 set to true if AT state info is represented by a single hash, false if serialized as per-AT address+state hash+fees
|
||||
* @return the next block's BlockData and a List of transactions.
|
||||
* @throws TransformationException
|
||||
*/
|
||||
public static Triple<BlockData, List<TransactionData>, List<ATStateData>> fromByteBuffer(ByteBuffer byteBuffer, boolean finalBlockInBuffer) throws TransformationException {
|
||||
private static BlockTransformation fromByteBuffer(ByteBuffer byteBuffer, boolean isV2) throws TransformationException {
|
||||
int version = byteBuffer.getInt();
|
||||
|
||||
if (finalBlockInBuffer && byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH)
|
||||
if (byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH)
|
||||
throw new TransformationException("Byte data too short for Block");
|
||||
|
||||
if (finalBlockInBuffer && byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize())
|
||||
if (byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize())
|
||||
throw new TransformationException("Byte data too long for Block");
|
||||
|
||||
long timestamp = byteBuffer.getLong();
|
||||
@ -117,20 +129,29 @@ public class BlockTransformer extends Transformer {
|
||||
|
||||
int atCount = 0;
|
||||
long atFees = 0;
|
||||
List<ATStateData> atStates = new ArrayList<>();
|
||||
byte[] atStatesHash = null;
|
||||
List<ATStateData> atStates = null;
|
||||
|
||||
if (isV2) {
|
||||
// Simply: AT count, AT total fees, hash(all AT states)
|
||||
atCount = byteBuffer.getInt();
|
||||
atFees = byteBuffer.getLong();
|
||||
atStatesHash = new byte[Transformer.SHA256_LENGTH];
|
||||
byteBuffer.get(atStatesHash);
|
||||
} else {
|
||||
// V1: AT info byte length, then per-AT entries of AT address + state hash + fees
|
||||
int atBytesLength = byteBuffer.getInt();
|
||||
|
||||
if (atBytesLength > BlockChain.getInstance().getMaxBlockSize())
|
||||
throw new TransformationException("Byte data too long for Block's AT info");
|
||||
|
||||
ByteBuffer atByteBuffer = byteBuffer.slice();
|
||||
atByteBuffer.limit(atBytesLength);
|
||||
|
||||
// Read AT-address, SHA256 hash and fees
|
||||
if (atBytesLength % AT_ENTRY_LENGTH != 0)
|
||||
throw new TransformationException("AT byte data not a multiple of AT entry length");
|
||||
|
||||
ByteBuffer atByteBuffer = byteBuffer.slice();
|
||||
atByteBuffer.limit(atBytesLength);
|
||||
|
||||
atStates = new ArrayList<>();
|
||||
while (atByteBuffer.hasRemaining()) {
|
||||
byte[] atAddressBytes = new byte[ADDRESS_LENGTH];
|
||||
atByteBuffer.get(atAddressBytes);
|
||||
@ -152,6 +173,7 @@ public class BlockTransformer extends Transformer {
|
||||
|
||||
// AT count to reflect the number of states we have
|
||||
atCount = atStates.size();
|
||||
}
|
||||
|
||||
// Add AT fees to totalFees
|
||||
totalFees += atFees;
|
||||
@ -221,16 +243,15 @@ public class BlockTransformer extends Transformer {
|
||||
byteBuffer.get(onlineAccountsSignatures);
|
||||
}
|
||||
|
||||
// We should only complain about excess byte data if we aren't expecting more blocks in this ByteBuffer
|
||||
if (finalBlockInBuffer && byteBuffer.hasRemaining())
|
||||
throw new TransformationException("Excess byte data found after parsing Block");
|
||||
|
||||
// We don't have a height!
|
||||
Integer height = null;
|
||||
BlockData blockData = new BlockData(version, reference, transactionCount, totalFees, transactionsSignature, height, timestamp,
|
||||
minterPublicKey, minterSignature, atCount, atFees, encodedOnlineAccounts, onlineAccountsCount, onlineAccountsTimestamp, onlineAccountsSignatures);
|
||||
|
||||
return new Triple<>(blockData, transactions, atStates);
|
||||
if (isV2)
|
||||
return new BlockTransformation(blockData, transactions, atStatesHash);
|
||||
else
|
||||
return new BlockTransformation(blockData, transactions, atStates);
|
||||
}
|
||||
|
||||
public static int getDataLength(Block block) throws TransformationException {
|
||||
@ -266,6 +287,14 @@ public class BlockTransformer extends Transformer {
|
||||
}
|
||||
|
||||
public static byte[] toBytes(Block block) throws TransformationException {
|
||||
return toBytes(block, false);
|
||||
}
|
||||
|
||||
public static byte[] toBytesV2(Block block) throws TransformationException {
|
||||
return toBytes(block, true);
|
||||
}
|
||||
|
||||
private static byte[] toBytes(Block block, boolean isV2) throws TransformationException {
|
||||
BlockData blockData = block.getBlockData();
|
||||
|
||||
try {
|
||||
@ -279,6 +308,26 @@ public class BlockTransformer extends Transformer {
|
||||
bytes.write(blockData.getMinterSignature());
|
||||
|
||||
int atBytesLength = blockData.getATCount() * AT_ENTRY_LENGTH;
|
||||
if (isV2) {
|
||||
ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength);
|
||||
long atFees = 0;
|
||||
|
||||
for (ATStateData atStateData : block.getATStates()) {
|
||||
// Skip initial states generated by DEPLOY_AT transactions in the same block
|
||||
if (atStateData.isInitial())
|
||||
continue;
|
||||
|
||||
atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8));
|
||||
atHashBytes.write(atStateData.getStateHash());
|
||||
atHashBytes.write(Longs.toByteArray(atStateData.getFees()));
|
||||
|
||||
atFees += atStateData.getFees();
|
||||
}
|
||||
|
||||
bytes.write(Ints.toByteArray(blockData.getATCount()));
|
||||
bytes.write(Longs.toByteArray(atFees));
|
||||
bytes.write(Crypto.digest(atHashBytes.toByteArray()));
|
||||
} else {
|
||||
bytes.write(Ints.toByteArray(atBytesLength));
|
||||
|
||||
for (ATStateData atStateData : block.getATStates()) {
|
||||
@ -290,6 +339,7 @@ public class BlockTransformer extends Transformer {
|
||||
bytes.write(atStateData.getStateHash());
|
||||
bytes.write(Longs.toByteArray(atStateData.getFees()));
|
||||
}
|
||||
}
|
||||
|
||||
// Transactions
|
||||
bytes.write(Ints.toByteArray(blockData.getTransactionCount()));
|
||||
|
@ -6,6 +6,7 @@ import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.BlockArchiveReader;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.transform.block.BlockTransformation;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@ -33,8 +34,7 @@ public class BlockArchiveUtils {
|
||||
repository.discardChanges();
|
||||
final int requestedRange = endHeight+1-startHeight;
|
||||
|
||||
List<Triple<BlockData, List<TransactionData>, List<ATStateData>>> blockInfoList =
|
||||
BlockArchiveReader.getInstance().fetchBlocksFromRange(startHeight, endHeight);
|
||||
List<BlockTransformation> blockInfoList = BlockArchiveReader.getInstance().fetchBlocksFromRange(startHeight, endHeight);
|
||||
|
||||
// Ensure that we have received all of the requested blocks
|
||||
if (blockInfoList == null || blockInfoList.isEmpty()) {
|
||||
@ -43,27 +43,26 @@ public class BlockArchiveUtils {
|
||||
if (blockInfoList.size() != requestedRange) {
|
||||
throw new IllegalStateException("Non matching block count when importing from archive");
|
||||
}
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> firstBlock = blockInfoList.get(0);
|
||||
if (firstBlock == null || firstBlock.getA().getHeight() != startHeight) {
|
||||
BlockTransformation firstBlock = blockInfoList.get(0);
|
||||
if (firstBlock == null || firstBlock.getBlockData().getHeight() != startHeight) {
|
||||
throw new IllegalStateException("Non matching first block when importing from archive");
|
||||
}
|
||||
if (blockInfoList.size() > 0) {
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> lastBlock =
|
||||
blockInfoList.get(blockInfoList.size() - 1);
|
||||
if (lastBlock == null || lastBlock.getA().getHeight() != endHeight) {
|
||||
BlockTransformation lastBlock = blockInfoList.get(blockInfoList.size() - 1);
|
||||
if (lastBlock == null || lastBlock.getBlockData().getHeight() != endHeight) {
|
||||
throw new IllegalStateException("Non matching last block when importing from archive");
|
||||
}
|
||||
}
|
||||
|
||||
// Everything seems okay, so go ahead with the import
|
||||
for (Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo : blockInfoList) {
|
||||
for (BlockTransformation blockInfo : blockInfoList) {
|
||||
try {
|
||||
// Save block
|
||||
repository.getBlockRepository().save(blockInfo.getA());
|
||||
repository.getBlockRepository().save(blockInfo.getBlockData());
|
||||
|
||||
// Save AT state data hashes
|
||||
for (ATStateData atStateData : blockInfo.getC()) {
|
||||
atStateData.setHeight(blockInfo.getA().getHeight());
|
||||
for (ATStateData atStateData : blockInfo.getAtStates()) {
|
||||
atStateData.setHeight(blockInfo.getBlockData().getHeight());
|
||||
repository.getATRepository().save(atStateData);
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,9 @@
|
||||
"shareBinFix": 399000,
|
||||
"calcChainWeightTimestamp": 1620579600000,
|
||||
"transactionV5Timestamp": 1642176000000,
|
||||
"transactionV6Timestamp": 9999999999999
|
||||
"transactionV6Timestamp": 9999999999999,
|
||||
"disableReferenceTimestamp": 1655222400000,
|
||||
"aggregateSignatureTimestamp": 1656864000000
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
@ -6,15 +6,15 @@
|
||||
### Common ###
|
||||
JSON = JSON Nachricht konnte nicht geparst werden
|
||||
|
||||
INSUFFICIENT_BALANCE = insufficient balance
|
||||
INSUFFICIENT_BALANCE = Kein Ausgleich
|
||||
|
||||
UNAUTHORIZED = API-Aufruf nicht autorisiert
|
||||
|
||||
REPOSITORY_ISSUE = Repository-Fehler
|
||||
|
||||
NON_PRODUCTION = this API call is not permitted for production systems
|
||||
NON_PRODUCTION = Dieser APi-Aufruf ist nicht gestattet für Produtkion
|
||||
|
||||
BLOCKCHAIN_NEEDS_SYNC = blockchain needs to synchronize first
|
||||
BLOCKCHAIN_NEEDS_SYNC = Blockchain muss sich erst verbinden
|
||||
|
||||
NO_TIME_SYNC = noch keine Uhrensynchronisation
|
||||
|
||||
@ -68,16 +68,16 @@ ORDER_UNKNOWN = unbekannte asset order ID
|
||||
GROUP_UNKNOWN = Gruppe unbekannt
|
||||
|
||||
### Foreign Blockchain ###
|
||||
FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = foreign blokchain or ElectrumX network issue
|
||||
FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = fremde Blockchain oder ElectrumX Netzwerk Problem
|
||||
|
||||
FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = insufficient balance on foreign blockchain
|
||||
FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = unzureichend Bilanz auf fremde blockchain
|
||||
|
||||
FOREIGN_BLOCKCHAIN_TOO_SOON = too soon to broadcast foreign blockchain transaction (LockTime/median block time)
|
||||
FOREIGN_BLOCKCHAIN_TOO_SOON = zu früh um fremde Blockchain-Transaktionen zu übertragen (Sperrzeit/mittlere Blockzeit)
|
||||
|
||||
### Trade Portal ###
|
||||
ORDER_SIZE_TOO_SMALL = order amount too low
|
||||
ORDER_SIZE_TOO_SMALL = Bestellmenge zu niedrig
|
||||
|
||||
### Data ###
|
||||
FILE_NOT_FOUND = Datei nicht gefunden
|
||||
|
||||
NO_REPLY = peer did not reply with data
|
||||
NO_REPLY = Peer hat nicht mit Daten verbinden
|
||||
|
83
src/main/resources/i18n/ApiError_ko.properties
Normal file
83
src/main/resources/i18n/ApiError_ko.properties
Normal file
@ -0,0 +1,83 @@
|
||||
#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/)
|
||||
# Keys are from api.ApiError enum
|
||||
|
||||
# "localeLang": "ko",
|
||||
|
||||
### Common ###
|
||||
JSON = JSON 메시지를 구문 분석하지 못했습니다.
|
||||
|
||||
INSUFFICIENT_BALANCE = 잔고 부족
|
||||
|
||||
UNAUTHORIZED = 승인되지 않은 API 호출
|
||||
|
||||
REPOSITORY_ISSUE = 리포지토리 오류
|
||||
|
||||
NON_PRODUCTION = 이 API 호출은 프로덕션 시스템에 허용되지 않습니다.
|
||||
|
||||
BLOCKCHAIN_NEEDS_SYNC = 블록체인이 먼저 동기화되어야 함
|
||||
|
||||
NO_TIME_SYNC = 아직 동기화가 없습니다.
|
||||
|
||||
### Validation ###
|
||||
INVALID_SIGNATURE = 무효 서명
|
||||
|
||||
INVALID_ADDRESS = 잘못된 주소
|
||||
|
||||
INVALID_PUBLIC_KEY = 잘못된 공개 키
|
||||
|
||||
INVALID_DATA = 잘못된 데이터
|
||||
|
||||
INVALID_NETWORK_ADDRESS = 잘못된 네트워크 주소
|
||||
|
||||
ADDRESS_UNKNOWN = 계정 주소 알 수 없음
|
||||
|
||||
INVALID_CRITERIA = 잘못된 검색 기준
|
||||
|
||||
INVALID_REFERENCE = 무효 참조
|
||||
|
||||
TRANSFORMATION_ERROR = JSON을 트랜잭션으로 변환할 수 없습니다.
|
||||
|
||||
INVALID_PRIVATE_KEY = 잘못된 개인 키
|
||||
|
||||
INVALID_HEIGHT = 잘못된 블록 높이
|
||||
|
||||
CANNOT_MINT = 계정을 만들 수 없습니다.
|
||||
|
||||
### Blocks ###
|
||||
BLOCK_UNKNOWN = 알 수 없는 블록
|
||||
|
||||
### Transactions ###
|
||||
TRANSACTION_UNKNOWN = 알 수 없는 거래
|
||||
|
||||
PUBLIC_KEY_NOT_FOUND = 공개 키를 찾을 수 없음
|
||||
|
||||
# this one is special in that caller expected to pass two additional strings, hence the two %s
|
||||
TRANSACTION_INVALID = 유효하지 않은 거래: %s (%s)
|
||||
|
||||
### Naming ###
|
||||
NAME_UNKNOWN = 이름 미상
|
||||
|
||||
### Asset ###
|
||||
INVALID_ASSET_ID = 잘못된 자산 ID
|
||||
|
||||
INVALID_ORDER_ID = 자산 주문 ID가 잘못되었습니다.
|
||||
|
||||
ORDER_UNKNOWN = 알 수 없는 자산 주문 ID
|
||||
|
||||
### Groups ###
|
||||
GROUP_UNKNOWN = 알 수 없는 그룹
|
||||
|
||||
### Foreign Blockchain ###
|
||||
FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = 외부 블록체인 또는 일렉트럼X 네트워크 문제
|
||||
|
||||
FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = 외부 블록체인 잔액 부족
|
||||
|
||||
FOREIGN_BLOCKCHAIN_TOO_SOON = 외부 블록체인 트랜잭션을 브로드캐스트하기에는 너무 빠릅니다(LockTime/중앙 블록 시간).
|
||||
|
||||
### Trade Portal ###
|
||||
ORDER_SIZE_TOO_SMALL = 주문량이 너무 적다
|
||||
|
||||
### Data ###
|
||||
FILE_NOT_FOUND = 파일을 찾을 수 없음
|
||||
|
||||
NO_REPLY = 피어가 허용된 시간 내에 응답하지 않음
|
83
src/main/resources/i18n/ApiError_ro.properties
Normal file
83
src/main/resources/i18n/ApiError_ro.properties
Normal file
@ -0,0 +1,83 @@
|
||||
#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/)
|
||||
# Keys are from api.ApiError enum
|
||||
|
||||
# "localeLang": "ro",
|
||||
|
||||
### Comun ###
|
||||
JSON = nu s-a reusit analizarea mesajului JSON
|
||||
|
||||
INSUFFICIENT_BALANCE = fonduri insuficiente
|
||||
|
||||
UNAUTHORIZED = Solicitare API neautorizata
|
||||
|
||||
REPOSITORY_ISSUE = eroare a depozitarului
|
||||
|
||||
NON_PRODUCTION = aceasta solictare API nu este permisa pentru sistemele de productie
|
||||
|
||||
BLOCKCHAIN_NEEDS_SYNC = blockchain-ul trebuie sa se sincronizeze mai intai
|
||||
|
||||
NO_TIME_SYNC = nu exista inca o sincronizare a ceasului
|
||||
|
||||
### Validation ###
|
||||
INVALID_SIGNATURE = semnatura invalida
|
||||
|
||||
INVALID_ADDRESS = adresa invalida
|
||||
|
||||
INVALID_PUBLIC_KEY = cheie publica invalid
|
||||
|
||||
INVALID_DATA = date invalida
|
||||
|
||||
INVALID_NETWORK_ADDRESS = invalid network address
|
||||
|
||||
ADDRESS_UNKNOWN = adresa contului necunoscuta
|
||||
|
||||
INVALID_CRITERIA = criteriu de cautare invalid
|
||||
|
||||
INVALID_REFERENCE = referinta invalida
|
||||
|
||||
TRANSFORMATION_ERROR = nu s-a putut transforma JSON in tranzactie
|
||||
|
||||
INVALID_PRIVATE_KEY = invalid private key
|
||||
|
||||
INVALID_HEIGHT = dimensiunea blocului invalida
|
||||
|
||||
CANNOT_MINT = contul nu poate produce moneda
|
||||
|
||||
### Blocks ###
|
||||
BLOCK_UNKNOWN = bloc necunoscut
|
||||
|
||||
### Transactions ###
|
||||
TRANSACTION_UNKNOWN = tranzactie necunoscuta
|
||||
|
||||
PUBLIC_KEY_NOT_FOUND = nu s-a gasit cheia publica
|
||||
|
||||
# this one is special in that caller expected to pass two additional strings, hence the two %s
|
||||
TRANSACTION_INVALID = tranzactie invalida: %s (%s)
|
||||
|
||||
### Naming ###
|
||||
NAME_UNKNOWN = nume necunoscut
|
||||
|
||||
### Asset ###
|
||||
INVALID_ASSET_ID = ID active invalid
|
||||
|
||||
INVALID_ORDER_ID = ID-ul de comanda al activului invalid
|
||||
|
||||
ORDER_UNKNOWN = ID necunoscut al comenzii activului
|
||||
|
||||
### Groups ###
|
||||
GROUP_UNKNOWN = grup necunoscut
|
||||
|
||||
### Foreign Blockchain ###
|
||||
FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = problema de blockchain strain sau de retea ElectrumX
|
||||
|
||||
FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = sold insuficient pe blockchain strain
|
||||
|
||||
FOREIGN_BLOCKCHAIN_TOO_SOON = prea devreme pentru a difuza o tranzactie blockchain straina (LockTime/median block time)
|
||||
|
||||
### Trade Portal ###
|
||||
ORDER_SIZE_TOO_SMALL = valoarea tranzactiei este prea mica
|
||||
|
||||
### Data ###
|
||||
FILE_NOT_FOUND = nu s-a gasit fisierul
|
||||
|
||||
NO_REPLY = omologul nu a raspuns in termenul stabilit
|
46
src/main/resources/i18n/SysTray_ko.properties
Normal file
46
src/main/resources/i18n/SysTray_ko.properties
Normal file
@ -0,0 +1,46 @@
|
||||
#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/)
|
||||
# SysTray pop-up menu
|
||||
|
||||
APPLYING_UPDATE_AND_RESTARTING = 자동 업데이트를 적용하고 다시 시작하는 중...
|
||||
|
||||
AUTO_UPDATE = 자동 업데이트
|
||||
|
||||
BLOCK_HEIGHT = 높이
|
||||
|
||||
BUILD_VERSION = 빌드 버전
|
||||
|
||||
CHECK_TIME_ACCURACY = 시간 정확도 점검
|
||||
|
||||
CONNECTING = 연결하는
|
||||
|
||||
CONNECTION = 연결
|
||||
|
||||
CONNECTIONS = 연결
|
||||
|
||||
CREATING_BACKUP_OF_DB_FILES = 데이터베이스 파일의 백업을 만드는 중...
|
||||
|
||||
DB_BACKUP = Database Backup
|
||||
|
||||
DB_CHECKPOINT = Database Checkpoint
|
||||
|
||||
DB_MAINTENANCE = 데이터베이스 유지 관리
|
||||
|
||||
EXIT = 종료
|
||||
|
||||
LITE_NODE = 라이트 노드
|
||||
|
||||
MINTING_DISABLED = 민팅중이 아님
|
||||
|
||||
MINTING_ENABLED = \u2714 민팅
|
||||
|
||||
OPEN_UI = UI 열기
|
||||
|
||||
PERFORMING_DB_CHECKPOINT = 커밋되지 않은 데이터베이스 변경 내용을 저장하는 중...
|
||||
|
||||
PERFORMING_DB_MAINTENANCE = 예약된 유지 관리 수행 중...
|
||||
|
||||
SYNCHRONIZE_CLOCK = 시간 동기화
|
||||
|
||||
SYNCHRONIZING_BLOCKCHAIN = 동기화중
|
||||
|
||||
SYNCHRONIZING_CLOCK = 시간 동기화
|
46
src/main/resources/i18n/SysTray_ro.properties
Normal file
46
src/main/resources/i18n/SysTray_ro.properties
Normal file
@ -0,0 +1,46 @@
|
||||
#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/)
|
||||
# SysTray pop-up menu
|
||||
|
||||
APPLYING_UPDATE_AND_RESTARTING = Aplicarea actualizarii automate si repornire...
|
||||
|
||||
AUTO_UPDATE = Actualizare automata
|
||||
|
||||
BLOCK_HEIGHT = dimensiune
|
||||
|
||||
BUILD_VERSION = versiunea compilatiei
|
||||
|
||||
CHECK_TIME_ACCURACY = verificare exactitate ora
|
||||
|
||||
CONNECTING = Se conecteaza
|
||||
|
||||
CONNECTION = conexiune
|
||||
|
||||
CONNECTIONS = conexiuni
|
||||
|
||||
CREATING_BACKUP_OF_DB_FILES = Se creaza copia bazei de date
|
||||
|
||||
DB_BACKUP = Copie baza de date
|
||||
|
||||
DB_CHECKPOINT = Punct de control al bazei de date
|
||||
|
||||
DB_MAINTENANCE = Database Maintenance
|
||||
|
||||
EXIT = iesire
|
||||
|
||||
LITE_NODE = Nod Lite
|
||||
|
||||
MINTING_DISABLED = nu produce moneda
|
||||
|
||||
MINTING_ENABLED = \u2714 Minting
|
||||
|
||||
OPEN_UI = Deschidere interfata utilizator IU
|
||||
|
||||
PERFORMING_DB_CHECKPOINT = Salvarea modificarilor nerealizate ale bazei de date...
|
||||
|
||||
PERFORMING_DB_MAINTENANCE = Efectuarea intretinerii programate…
|
||||
|
||||
SYNCHRONIZE_CLOCK = Sincronizare ceas
|
||||
|
||||
SYNCHRONIZING_BLOCKCHAIN = Sincronizare
|
||||
|
||||
SYNCHRONIZING_CLOCK = Se sincronizeaza ceasul
|
@ -25,7 +25,7 @@ DB_CHECKPOINT = Databaskontrollpunkt
|
||||
|
||||
DB_MAINTENANCE = Databasunderhåll
|
||||
|
||||
EXIT = Utgång
|
||||
EXIT = Avsluta
|
||||
|
||||
MINTING_DISABLED = Präglar INTE
|
||||
|
||||
|
195
src/main/resources/i18n/TransactionValidity_ko.properties
Normal file
195
src/main/resources/i18n/TransactionValidity_ko.properties
Normal file
@ -0,0 +1,195 @@
|
||||
#
|
||||
|
||||
ACCOUNT_ALREADY_EXISTS = 계정이 이미 존재합니다.
|
||||
|
||||
ACCOUNT_CANNOT_REWARD_SHARE = 계정이 보상을 공유할 수 없습니다.
|
||||
|
||||
ADDRESS_ABOVE_RATE_LIMIT = 주소가 지정된 속도 제한에 도달했습니다.
|
||||
|
||||
ADDRESS_BLOCKED = 이 주소는 차단되었습니다.
|
||||
|
||||
ALREADY_GROUP_ADMIN = 이미 그룹 관리자
|
||||
|
||||
ALREADY_GROUP_MEMBER = 이미 그룹 맴버
|
||||
|
||||
ALREADY_VOTED_FOR_THAT_OPTION = 이미 그 옵션에 투표했다.
|
||||
|
||||
ASSET_ALREADY_EXISTS = 자산이 이미 있습니다.
|
||||
|
||||
ASSET_DOES_NOT_EXIST = 자산이 존재하지 않습니다.
|
||||
|
||||
ASSET_DOES_NOT_MATCH_AT = 자산이 AT의 자산과 일치하지 않습니다.
|
||||
|
||||
ASSET_NOT_SPENDABLE = 자산을 사용할 수 없습니다.
|
||||
|
||||
AT_ALREADY_EXISTS = AT가 이미 있습니다.
|
||||
|
||||
AT_IS_FINISHED = AT가 완료되었습니다.
|
||||
|
||||
AT_UNKNOWN = 알 수 없는 AT
|
||||
|
||||
BAN_EXISTS = 금지가 이미 있습니다.
|
||||
|
||||
BAN_UNKNOWN = 금지 알 수 없음
|
||||
|
||||
BANNED_FROM_GROUP = 그룹에서 금지
|
||||
|
||||
BUYER_ALREADY_OWNER = 구매자는 이미 소유자입니다
|
||||
|
||||
CLOCK_NOT_SYNCED = 동기화되지 않은 시간
|
||||
|
||||
DUPLICATE_MESSAGE = 주소가 중복 메시지를 보냈습니다.
|
||||
|
||||
DUPLICATE_OPTION = 중복 옵션
|
||||
|
||||
GROUP_ALREADY_EXISTS = 그룹이 이미 존재합니다
|
||||
|
||||
GROUP_APPROVAL_DECIDED = 그룹 승인이 이미 결정되었습니다.
|
||||
|
||||
GROUP_APPROVAL_NOT_REQUIRED = 그룹 승인이 필요하지 않음
|
||||
|
||||
GROUP_DOES_NOT_EXIST = 그룹이 존재하지 않습니다
|
||||
|
||||
GROUP_ID_MISMATCH = 그룹 ID 불일치
|
||||
|
||||
GROUP_OWNER_CANNOT_LEAVE = 그룹 소유자는 그룹을 나갈 수 없습니다
|
||||
|
||||
HAVE_EQUALS_WANT = 소유 자산은 원하는 자산과 동일합니다.
|
||||
|
||||
INCORRECT_NONCE = 잘못된 PoW nonce
|
||||
|
||||
INSUFFICIENT_FEE = 부족한 수수료
|
||||
|
||||
INVALID_ADDRESS = 잘못된 주소
|
||||
|
||||
INVALID_AMOUNT = 유효하지 않은 금액
|
||||
|
||||
INVALID_ASSET_OWNER = 잘못된 자산 소유자
|
||||
|
||||
INVALID_AT_TRANSACTION = 유효하지 않은 AT 거래
|
||||
|
||||
INVALID_AT_TYPE_LENGTH = 잘못된 AT '유형' 길이
|
||||
|
||||
INVALID_BUT_OK = 유효하지 않지만 OK
|
||||
|
||||
INVALID_CREATION_BYTES = 잘못된 생성 바이트
|
||||
|
||||
INVALID_DATA_LENGTH = 잘못된 데이터 길이
|
||||
|
||||
INVALID_DESCRIPTION_LENGTH = 잘못된 설명 길이
|
||||
|
||||
INVALID_GROUP_APPROVAL_THRESHOLD = 잘못된 그룹 승인 임계값
|
||||
|
||||
INVALID_GROUP_BLOCK_DELAY = 잘못된 그룹 승인 차단 지연
|
||||
|
||||
INVALID_GROUP_ID = 잘못된 그룹 ID
|
||||
|
||||
INVALID_GROUP_OWNER = 잘못된 그룹 소유자
|
||||
|
||||
INVALID_LIFETIME = 유효하지 않은 수명
|
||||
|
||||
INVALID_NAME_LENGTH = 잘못된 이름 길이
|
||||
|
||||
INVALID_NAME_OWNER = 잘못된 이름 소유자
|
||||
|
||||
INVALID_OPTION_LENGTH = 잘못된 옵션 길이
|
||||
|
||||
INVALID_OPTIONS_COUNT = 잘못된 옵션 수
|
||||
|
||||
INVALID_ORDER_CREATOR = 잘못된 주문 생성자
|
||||
|
||||
INVALID_PAYMENTS_COUNT = 유효하지 않은 지불 수
|
||||
|
||||
INVALID_PUBLIC_KEY = 잘못된 공개 키
|
||||
|
||||
INVALID_QUANTITY = 유효하지 않은 수량
|
||||
|
||||
INVALID_REFERENCE = 잘못된 참조
|
||||
|
||||
INVALID_RETURN = 무효 반환
|
||||
|
||||
INVALID_REWARD_SHARE_PERCENT = 잘못된 보상 공유 비율
|
||||
|
||||
INVALID_SELLER = 무효 판매자
|
||||
|
||||
INVALID_TAGS_LENGTH = invalid 'tags' length
|
||||
|
||||
INVALID_TIMESTAMP_SIGNATURE = 유효하지 않은 타임스탬프 서명
|
||||
|
||||
INVALID_TX_GROUP_ID = 잘못된 트랜잭션 그룹 ID
|
||||
|
||||
INVALID_VALUE_LENGTH = 잘못된 '값' 길이
|
||||
|
||||
INVITE_UNKNOWN = 알 수 없는 그룹 초대
|
||||
|
||||
JOIN_REQUEST_EXISTS = 그룹 가입 요청이 이미 있습니다.
|
||||
|
||||
MAXIMUM_REWARD_SHARES = 이미 이 계정에 대한 최대 보상 공유 수에 도달했습니다.t
|
||||
|
||||
MISSING_CREATOR = 실종된 창작자
|
||||
|
||||
MULTIPLE_NAMES_FORBIDDEN = 계정당 여러 등록 이름은 금지되어 있습니다.
|
||||
|
||||
NAME_ALREADY_FOR_SALE = 이미 판매 중인 이름
|
||||
|
||||
NAME_ALREADY_REGISTERED = 이미 등록된 이름
|
||||
|
||||
NAME_BLOCKED = 이 이름은 차단되었습니다
|
||||
|
||||
NAME_DOES_NOT_EXIST = 이름이 존재하지 않습니다
|
||||
|
||||
NAME_NOT_FOR_SALE = 이름은 판매용이 아닙니다
|
||||
|
||||
NAME_NOT_NORMALIZED = 유니코드 '정규화된' 형식이 아닌 이름
|
||||
|
||||
NEGATIVE_AMOUNT = 유효하지 않은/음수 금액
|
||||
|
||||
NEGATIVE_FEE = 무효/음수 수수료
|
||||
|
||||
NEGATIVE_PRICE = 유효하지 않은/음수 가격
|
||||
|
||||
NO_BALANCE = 잔액 불충분
|
||||
|
||||
NO_BLOCKCHAIN_LOCK = 노드의 블록체인이 현재 사용 중입니다.
|
||||
|
||||
NO_FLAG_PERMISSION = 계정에 해당 권한이 없습니다
|
||||
|
||||
NOT_GROUP_ADMIN = 계정은 그룹 관리자가 아닙니다.
|
||||
|
||||
NOT_GROUP_MEMBER = 계정이 그룹 구성원이 아닙니다.
|
||||
|
||||
NOT_MINTING_ACCOUNT = 계정은 발행할 수 없습니다
|
||||
|
||||
NOT_YET_RELEASED = 아직 출시되지 않은 기능
|
||||
|
||||
OK = OK
|
||||
|
||||
ORDER_ALREADY_CLOSED = 아직 출시되지 않은 기능
|
||||
|
||||
ORDER_DOES_NOT_EXIST = 자산 거래 주문이 존재하지 않습니다
|
||||
|
||||
POLL_ALREADY_EXISTS = 설문조사가 이미 존재합니다
|
||||
|
||||
POLL_DOES_NOT_EXIST = 설문조사가 존재하지 않습니다
|
||||
|
||||
POLL_OPTION_DOES_NOT_EXIST = 투표 옵션이 존재하지 않습니다
|
||||
|
||||
PUBLIC_KEY_UNKNOWN = 공개 키 알 수 없음
|
||||
|
||||
REWARD_SHARE_UNKNOWN = 알 수 없는 보상 공유
|
||||
|
||||
SELF_SHARE_EXISTS = 자체 공유(보상 공유)가 이미 존재합니다.
|
||||
|
||||
TIMESTAMP_TOO_NEW = 타임스탬프가 너무 새롭습니다.
|
||||
|
||||
TIMESTAMP_TOO_OLD = 너무 오래된 타임스탬프
|
||||
|
||||
TOO_MANY_UNCONFIRMED = 계정에 보류 중인 확인되지 않은 거래가 너무 많습니다.
|
||||
|
||||
TRANSACTION_ALREADY_CONFIRMED = 거래가 이미 확인되었습니다
|
||||
|
||||
TRANSACTION_ALREADY_EXISTS = 거래가 이미 존재합니다
|
||||
|
||||
TRANSACTION_UNKNOWN = 알 수 없는 거래
|
||||
|
||||
TX_GROUP_ID_MISMATCH = 트랜잭션의 그룹 ID가 일치하지 않습니다
|
195
src/main/resources/i18n/TransactionValidity_ro.properties
Normal file
195
src/main/resources/i18n/TransactionValidity_ro.properties
Normal file
@ -0,0 +1,195 @@
|
||||
#
|
||||
|
||||
ACCOUNT_ALREADY_EXISTS = contul exista deja
|
||||
|
||||
ACCOUNT_CANNOT_REWARD_SHARE = contul nu poate genera reward-share
|
||||
|
||||
ADDRESS_ABOVE_RATE_LIMIT = adresa a atins limita specificata
|
||||
|
||||
ADDRESS_BLOCKED = aceasta adresa este blocata
|
||||
|
||||
ALREADY_GROUP_ADMIN = sunteti deja admin
|
||||
|
||||
ALREADY_GROUP_MEMBER = sunteti deja membru
|
||||
|
||||
ALREADY_VOTED_FOR_THAT_OPTION = deja ati votat pentru aceasta optiune
|
||||
|
||||
ASSET_ALREADY_EXISTS = activul deja exista
|
||||
|
||||
ASSET_DOES_NOT_EXIST = activul un exista
|
||||
|
||||
ASSET_DOES_NOT_MATCH_AT = activul nu se potriveste cu activul TA
|
||||
|
||||
ASSET_NOT_SPENDABLE = activul nu poate fi utilizat
|
||||
|
||||
AT_ALREADY_EXISTS = TA exista deja
|
||||
|
||||
AT_IS_FINISHED = TA s-a terminat
|
||||
|
||||
AT_UNKNOWN = TA necunoscuta
|
||||
|
||||
BAN_EXISTS = ban-ul este deja folosit
|
||||
|
||||
BAN_UNKNOWN = ban necunoscut
|
||||
|
||||
BANNED_FROM_GROUP = accesul la grup a fost blocat
|
||||
|
||||
BUYER_ALREADY_OWNER = cumparatorul este deja detinator
|
||||
|
||||
CLOCK_NOT_SYNCED = ceasul nu este sincronizat
|
||||
|
||||
DUPLICATE_MESSAGE = adresa a trimis mesaje duplicate
|
||||
|
||||
DUPLICATE_OPTION = optiune duplicata
|
||||
|
||||
GROUP_ALREADY_EXISTS = grupul deja exista
|
||||
|
||||
GROUP_APPROVAL_DECIDED = aprobarea grupului a fost deja decisa
|
||||
|
||||
GROUP_APPROVAL_NOT_REQUIRED = aprobarea grupului nu este solicitata
|
||||
|
||||
GROUP_DOES_NOT_EXIST = grupul nu exista
|
||||
|
||||
GROUP_ID_MISMATCH = ID-ul grupului incorect
|
||||
|
||||
GROUP_OWNER_CANNOT_LEAVE = proprietarul grupului nu poate parasi grupul
|
||||
|
||||
HAVE_EQUALS_WANT = a avea un obiect este acelasi lucru cu a vrea un obiect
|
||||
|
||||
INCORRECT_NONCE = numar PoW incorect
|
||||
|
||||
INSUFFICIENT_FEE = taxa insuficienta
|
||||
|
||||
INVALID_ADDRESS = adresa invalida
|
||||
|
||||
INVALID_AMOUNT = suma invalida
|
||||
|
||||
INVALID_ASSET_OWNER = propietar al activului invalid
|
||||
|
||||
INVALID_AT_TRANSACTION = tranzactie automata invalida
|
||||
|
||||
INVALID_AT_TYPE_LENGTH = TA invalida 'tip' lungime
|
||||
|
||||
INVALID_BUT_OK = invalid dar OK
|
||||
|
||||
INVALID_CREATION_BYTES = octeti de creatie invalizi
|
||||
|
||||
INVALID_DATA_LENGTH = lungimea datelor invalida
|
||||
|
||||
INVALID_DESCRIPTION_LENGTH = lungimea descrierii invalida
|
||||
|
||||
INVALID_GROUP_APPROVAL_THRESHOLD = prag de aprobare a grupului invalid
|
||||
|
||||
INVALID_GROUP_BLOCK_DELAY = intarziere invalida a blocului de aprobare a grupului
|
||||
|
||||
INVALID_GROUP_ID = ID de grup invalid
|
||||
|
||||
INVALID_GROUP_OWNER = proprietar de grup invalid
|
||||
|
||||
INVALID_LIFETIME = durata de viata invalida
|
||||
|
||||
INVALID_NAME_LENGTH = lungimea numelui invalida
|
||||
|
||||
INVALID_NAME_OWNER = numele proprietarului invalid
|
||||
|
||||
INVALID_OPTION_LENGTH = lungimea optiunii invalida
|
||||
|
||||
INVALID_OPTIONS_COUNT = contor de optiuni invalid
|
||||
|
||||
INVALID_ORDER_CREATOR = creator de ordine invalid
|
||||
|
||||
INVALID_PAYMENTS_COUNT = contor de plati invalid
|
||||
|
||||
INVALID_PUBLIC_KEY = cheie publica invalida
|
||||
|
||||
INVALID_QUANTITY = cantitate invalida
|
||||
|
||||
INVALID_REFERENCE = referinta invalida
|
||||
|
||||
INVALID_RETURN = returnare invalida
|
||||
|
||||
INVALID_REWARD_SHARE_PERCENT = procentaj al cotei de recompensa invalid
|
||||
|
||||
INVALID_SELLER = vanzator invalid
|
||||
|
||||
INVALID_TAGS_LENGTH = lungime a tagurilor invalida
|
||||
|
||||
INVALID_TIMESTAMP_SIGNATURE = semnatura timestamp invalida
|
||||
|
||||
INVALID_TX_GROUP_ID = ID-ul grupului de tranzactii invalid
|
||||
|
||||
INVALID_VALUE_LENGTH = lungimea "valorii "invalida
|
||||
|
||||
INVITE_UNKNOWN = invitatie de grup invalida
|
||||
|
||||
JOIN_REQUEST_EXISTS = cererea de aderare la grup exista deja
|
||||
|
||||
MAXIMUM_REWARD_SHARES = ati ajuns deja la numarul maxim de cote de recompensa pentru acest cont
|
||||
|
||||
MISSING_CREATOR = creator lipsa
|
||||
|
||||
MULTIPLE_NAMES_FORBIDDEN = este interzisa folosirea mai multor nume inregistrate pe cont
|
||||
|
||||
NAME_ALREADY_FOR_SALE = numele este deja de vanzare
|
||||
|
||||
NAME_ALREADY_REGISTERED = nume deja inregistrat
|
||||
|
||||
NAME_BLOCKED = numele este blocat
|
||||
|
||||
NAME_DOES_NOT_EXIST = numele nu exista
|
||||
|
||||
NAME_NOT_FOR_SALE = numele nu este de vanzare
|
||||
|
||||
NAME_NOT_NORMALIZED = numele nu este in forma "normalizata" Unicode
|
||||
|
||||
NEGATIVE_AMOUNT = suma invalida/negativa
|
||||
|
||||
NEGATIVE_FEE = taxa invalida/negativa
|
||||
|
||||
NEGATIVE_PRICE = pret invalid/negativ
|
||||
|
||||
NO_BALANCE = fonduri insuficiente
|
||||
|
||||
NO_BLOCKCHAIN_LOCK = nodul blochain-ului este momentan ocupat
|
||||
|
||||
NO_FLAG_PERMISSION = contul nu are aceasta permisiune
|
||||
|
||||
NOT_GROUP_ADMIN = contul nu este un administrator de grup
|
||||
|
||||
NOT_GROUP_MEMBER = contul nu este un membru al grupului
|
||||
|
||||
NOT_MINTING_ACCOUNT = contul nu poate genera moneda Qort
|
||||
|
||||
NOT_YET_RELEASED = caracteristica nu este inca disponibila
|
||||
|
||||
OK = OK
|
||||
|
||||
ORDER_ALREADY_CLOSED = ordinul de tranzactionare a activului este deja inchis
|
||||
|
||||
ORDER_DOES_NOT_EXIST = ordinul de comercializare a activului nu exista
|
||||
|
||||
POLL_ALREADY_EXISTS = sondajul exista deja
|
||||
|
||||
POLL_DOES_NOT_EXIST = sondajul nu exista
|
||||
|
||||
POLL_OPTION_DOES_NOT_EXIST = optiunea de sondaj nu exista
|
||||
|
||||
PUBLIC_KEY_UNKNOWN = cheie publica necunoscuta
|
||||
|
||||
REWARD_SHARE_UNKNOWN = cheie de cota de recompensa necunoscuta
|
||||
|
||||
SELF_SHARE_EXISTS = cota personala (cota de recompensa) exista deja
|
||||
|
||||
TIMESTAMP_TOO_NEW = timestamp prea nou
|
||||
|
||||
TIMESTAMP_TOO_OLD = timestamp prea vechi
|
||||
|
||||
TOO_MANY_UNCONFIRMED = contul are prea multe tranzactii neconfirmate in asteptare
|
||||
|
||||
TRANZACTIE_DEJA_CONFIRMATA = tranzactia a fost deja confirmata
|
||||
|
||||
TRANSACTION_ALREADY_EXISTS = tranzactia exista deja
|
||||
|
||||
TRANSACTION_UNKNOWN = tranzactie necunoscuta
|
||||
|
||||
TX_GROUP_ID_MISMATCH = ID-ul de grup al tranzactiei nu se potriveste
|
@ -20,6 +20,7 @@ import org.qortal.test.common.Common;
|
||||
import org.qortal.transaction.DeployAtTransaction;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformation;
|
||||
import org.qortal.utils.BlockArchiveUtils;
|
||||
import org.qortal.utils.NTP;
|
||||
import org.qortal.utils.Triple;
|
||||
@ -123,8 +124,8 @@ public class BlockArchiveTests extends Common {
|
||||
|
||||
// Read block 2 from the archive
|
||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> block2Info = reader.fetchBlockAtHeight(2);
|
||||
BlockData block2ArchiveData = block2Info.getA();
|
||||
BlockTransformation block2Info = reader.fetchBlockAtHeight(2);
|
||||
BlockData block2ArchiveData = block2Info.getBlockData();
|
||||
|
||||
// Read block 2 from the repository
|
||||
BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2);
|
||||
@ -137,8 +138,8 @@ public class BlockArchiveTests extends Common {
|
||||
assertEquals(1, block2ArchiveData.getOnlineAccountsCount());
|
||||
|
||||
// Read block 900 from the archive
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> block900Info = reader.fetchBlockAtHeight(900);
|
||||
BlockData block900ArchiveData = block900Info.getA();
|
||||
BlockTransformation block900Info = reader.fetchBlockAtHeight(900);
|
||||
BlockData block900ArchiveData = block900Info.getBlockData();
|
||||
|
||||
// Read block 900 from the repository
|
||||
BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900);
|
||||
@ -200,10 +201,10 @@ public class BlockArchiveTests extends Common {
|
||||
|
||||
// Read a block from the archive
|
||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = reader.fetchBlockAtHeight(testHeight);
|
||||
BlockData archivedBlockData = blockInfo.getA();
|
||||
ATStateData archivedAtStateData = blockInfo.getC().isEmpty() ? null : blockInfo.getC().get(0);
|
||||
List<TransactionData> archivedTransactions = blockInfo.getB();
|
||||
BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight);
|
||||
BlockData archivedBlockData = blockInfo.getBlockData();
|
||||
ATStateData archivedAtStateData = blockInfo.getAtStates().isEmpty() ? null : blockInfo.getAtStates().get(0);
|
||||
List<TransactionData> archivedTransactions = blockInfo.getTransactions();
|
||||
|
||||
// Read the same block from the repository
|
||||
BlockData repositoryBlockData = repository.getBlockRepository().fromHeight(testHeight);
|
||||
@ -255,7 +256,7 @@ public class BlockArchiveTests extends Common {
|
||||
|
||||
// Check block 10 (unarchived)
|
||||
BlockArchiveReader reader = BlockArchiveReader.getInstance();
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = reader.fetchBlockAtHeight(10);
|
||||
BlockTransformation blockInfo = reader.fetchBlockAtHeight(10);
|
||||
assertNull(blockInfo);
|
||||
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.qortal.test.common.TransactionUtils;
|
||||
import org.qortal.transaction.Transaction;
|
||||
import org.qortal.transaction.Transaction.TransactionType;
|
||||
import org.qortal.transform.TransformationException;
|
||||
import org.qortal.transform.block.BlockTransformation;
|
||||
import org.qortal.transform.block.BlockTransformer;
|
||||
import org.qortal.transform.transaction.TransactionTransformer;
|
||||
import org.qortal.utils.Base58;
|
||||
@ -121,10 +122,10 @@ public class BlockTests extends Common {
|
||||
|
||||
assertEquals(BlockTransformer.getDataLength(block), bytes.length);
|
||||
|
||||
Triple<BlockData, List<TransactionData>, List<ATStateData>> blockInfo = BlockTransformer.fromBytes(bytes);
|
||||
BlockTransformation blockInfo = BlockTransformer.fromBytes(bytes);
|
||||
|
||||
// Compare transactions
|
||||
List<TransactionData> deserializedTransactions = blockInfo.getB();
|
||||
List<TransactionData> deserializedTransactions = blockInfo.getTransactions();
|
||||
assertEquals("Transaction count differs", blockData.getTransactionCount(), deserializedTransactions.size());
|
||||
|
||||
for (int i = 0; i < blockData.getTransactionCount(); ++i) {
|
||||
|
@ -4,7 +4,7 @@ import org.junit.Test;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.block.BlockChain;
|
||||
import org.qortal.crypto.AES;
|
||||
import org.qortal.crypto.BouncyCastle25519;
|
||||
import org.qortal.crypto.Qortal25519Extras;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.test.common.Common;
|
||||
import org.qortal.utils.Base58;
|
||||
@ -123,14 +123,14 @@ public class CryptoTests extends Common {
|
||||
random.nextBytes(ed25519PrivateKey);
|
||||
PrivateKeyAccount account = new PrivateKeyAccount(null, ed25519PrivateKey);
|
||||
|
||||
byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(account.getPrivateKey());
|
||||
byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(account.getPrivateKey());
|
||||
X25519PrivateKeyParameters x25519PrivateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0);
|
||||
|
||||
// Derive X25519 public key from X25519 private key
|
||||
byte[] x25519PublicKeyFromPrivate = x25519PrivateKeyParams.generatePublicKey().getEncoded();
|
||||
|
||||
// Derive X25519 public key from Ed25519 public key
|
||||
byte[] x25519PublicKeyFromEd25519 = BouncyCastle25519.toX25519PublicKey(account.getPublicKey());
|
||||
byte[] x25519PublicKeyFromEd25519 = Qortal25519Extras.toX25519PublicKey(account.getPublicKey());
|
||||
|
||||
assertEquals(String.format("Public keys do not match, from private key %s", Base58.encode(ed25519PrivateKey)), Base58.encode(x25519PublicKeyFromPrivate), Base58.encode(x25519PublicKeyFromEd25519));
|
||||
}
|
||||
@ -162,10 +162,10 @@ public class CryptoTests extends Common {
|
||||
}
|
||||
|
||||
private static byte[] calcBCSharedSecret(byte[] ed25519PrivateKey, byte[] ed25519PublicKey) {
|
||||
byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(ed25519PrivateKey);
|
||||
byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(ed25519PrivateKey);
|
||||
X25519PrivateKeyParameters privateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0);
|
||||
|
||||
byte[] x25519PublicKey = BouncyCastle25519.toX25519PublicKey(ed25519PublicKey);
|
||||
byte[] x25519PublicKey = Qortal25519Extras.toX25519PublicKey(ed25519PublicKey);
|
||||
X25519PublicKeyParameters publicKeyParams = new X25519PublicKeyParameters(x25519PublicKey, 0);
|
||||
|
||||
byte[] sharedSecret = new byte[32];
|
||||
@ -186,10 +186,10 @@ public class CryptoTests extends Common {
|
||||
final String expectedTheirX25519PublicKey = "ANjnZLRSzW9B1aVamiYGKP3XtBooU9tGGDjUiibUfzp2";
|
||||
final String expectedSharedSecret = "DTMZYG96x8XZuGzDvHFByVLsXedimqtjiXHhXPVe58Ap";
|
||||
|
||||
byte[] ourX25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(ourPrivateKey);
|
||||
byte[] ourX25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(ourPrivateKey);
|
||||
assertEquals("X25519 private key incorrect", expectedOurX25519PrivateKey, Base58.encode(ourX25519PrivateKey));
|
||||
|
||||
byte[] theirX25519PublicKey = BouncyCastle25519.toX25519PublicKey(theirPublicKey);
|
||||
byte[] theirX25519PublicKey = Qortal25519Extras.toX25519PublicKey(theirPublicKey);
|
||||
assertEquals("X25519 public key incorrect", expectedTheirX25519PublicKey, Base58.encode(theirX25519PublicKey));
|
||||
|
||||
byte[] sharedSecret = calcBCSharedSecret(ourPrivateKey, theirPublicKey);
|
||||
|
190
src/test/java/org/qortal/test/SchnorrTests.java
Normal file
190
src/test/java/org/qortal/test/SchnorrTests.java
Normal file
@ -0,0 +1,190 @@
|
||||
package org.qortal.test;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.primitives.Bytes;
|
||||
import com.google.common.primitives.Longs;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
|
||||
import org.junit.Test;
|
||||
import org.qortal.crypto.Qortal25519Extras;
|
||||
import org.qortal.data.network.OnlineAccountData;
|
||||
import org.qortal.transform.Transformer;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.security.SecureRandom;
|
||||
import java.security.Security;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class SchnorrTests extends Qortal25519Extras {
|
||||
|
||||
static {
|
||||
// This must go before any calls to LogManager/Logger
|
||||
System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager");
|
||||
|
||||
Security.insertProviderAt(new BouncyCastleProvider(), 0);
|
||||
Security.insertProviderAt(new BouncyCastleJsseProvider(), 1);
|
||||
}
|
||||
|
||||
private static final SecureRandom SECURE_RANDOM = new SecureRandom();
|
||||
|
||||
@Test
|
||||
public void testConversion() {
|
||||
// Scalar form
|
||||
byte[] scalarA = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes();
|
||||
System.out.printf("a: %s%n", HashCode.fromBytes(scalarA));
|
||||
|
||||
byte[] pointA = HashCode.fromString("5866666666666666666666666666666666666666666666666666666666666666".toLowerCase()).asBytes();
|
||||
|
||||
BigInteger expectedY = new BigInteger("46316835694926478169428394003475163141307993866256225615783033603165251855960");
|
||||
|
||||
PointAccum pointAccum = Qortal25519Extras.newPointAccum();
|
||||
scalarMultBase(scalarA, pointAccum);
|
||||
|
||||
byte[] encoded = new byte[POINT_BYTES];
|
||||
if (0 == encodePoint(pointAccum, encoded, 0))
|
||||
fail("Point encoding failed");
|
||||
|
||||
System.out.printf("aG: %s%n", HashCode.fromBytes(encoded));
|
||||
assertArrayEquals(pointA, encoded);
|
||||
|
||||
byte[] yBytes = new byte[POINT_BYTES];
|
||||
System.arraycopy(encoded,0, yBytes, 0, encoded.length);
|
||||
Bytes.reverse(yBytes);
|
||||
|
||||
System.out.printf("yBytes: %s%n", HashCode.fromBytes(yBytes));
|
||||
BigInteger yBI = new BigInteger(yBytes);
|
||||
|
||||
System.out.printf("aG y: %s%n", yBI);
|
||||
assertEquals(expectedY, yBI);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAddition() {
|
||||
/*
|
||||
* 1G: b'5866666666666666666666666666666666666666666666666666666666666666'
|
||||
* 2G: b'c9a3f86aae465f0e56513864510f3997561fa2c9e85ea21dc2292309f3cd6022'
|
||||
* 3G: b'd4b4f5784868c3020403246717ec169ff79e26608ea126a1ab69ee77d1b16712'
|
||||
*/
|
||||
|
||||
// Scalar form
|
||||
byte[] s1 = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes();
|
||||
byte[] s2 = HashCode.fromString("0200000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes();
|
||||
|
||||
// Point form
|
||||
byte[] g1 = HashCode.fromString("5866666666666666666666666666666666666666666666666666666666666666".toLowerCase()).asBytes();
|
||||
byte[] g2 = HashCode.fromString("c9a3f86aae465f0e56513864510f3997561fa2c9e85ea21dc2292309f3cd6022".toLowerCase()).asBytes();
|
||||
byte[] g3 = HashCode.fromString("d4b4f5784868c3020403246717ec169ff79e26608ea126a1ab69ee77d1b16712".toLowerCase()).asBytes();
|
||||
|
||||
PointAccum p1 = Qortal25519Extras.newPointAccum();
|
||||
scalarMultBase(s1, p1);
|
||||
|
||||
PointAccum p2 = Qortal25519Extras.newPointAccum();
|
||||
scalarMultBase(s2, p2);
|
||||
|
||||
pointAdd(pointCopy(p1), p2);
|
||||
|
||||
byte[] encoded = new byte[POINT_BYTES];
|
||||
if (0 == encodePoint(p2, encoded, 0))
|
||||
fail("Point encoding failed");
|
||||
|
||||
System.out.printf("sum: %s%n", HashCode.fromBytes(encoded));
|
||||
assertArrayEquals(g3, encoded);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimpleSign() {
|
||||
byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes();
|
||||
byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes();
|
||||
|
||||
byte[] signature = signForAggregation(privateKey, message);
|
||||
System.out.printf("signature: %s%n", HashCode.fromBytes(signature));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimpleVerify() {
|
||||
byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes();
|
||||
byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes();
|
||||
byte[] signature = HashCode.fromString("13e58e88f3df9e06637d2d5bbb814c028e3ba135494530b9d3b120bdb31168d62c70a37ae9cfba816fe6038ee1ce2fb521b95c4a91c7ff0bb1dd2e67733f2b0d".toLowerCase()).asBytes();
|
||||
|
||||
byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH];
|
||||
Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0);
|
||||
|
||||
assertTrue(verifyAggregated(publicKey, signature, message));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimpleSignAndVerify() {
|
||||
byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes();
|
||||
byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes();
|
||||
|
||||
byte[] signature = signForAggregation(privateKey, message);
|
||||
|
||||
byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH];
|
||||
Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0);
|
||||
|
||||
assertTrue(verifyAggregated(publicKey, signature, message));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimpleAggregate() {
|
||||
List<OnlineAccountData> onlineAccounts = generateOnlineAccounts(1);
|
||||
|
||||
byte[] aggregatePublicKey = aggregatePublicKeys(onlineAccounts.stream().map(OnlineAccountData::getPublicKey).collect(Collectors.toUnmodifiableList()));
|
||||
System.out.printf("Aggregate public key: %s%n", HashCode.fromBytes(aggregatePublicKey));
|
||||
|
||||
byte[] aggregateSignature = aggregateSignatures(onlineAccounts.stream().map(OnlineAccountData::getSignature).collect(Collectors.toUnmodifiableList()));
|
||||
System.out.printf("Aggregate signature: %s%n", HashCode.fromBytes(aggregateSignature));
|
||||
|
||||
OnlineAccountData onlineAccount = onlineAccounts.get(0);
|
||||
|
||||
assertArrayEquals(String.format("expected: %s, actual: %s", HashCode.fromBytes(onlineAccount.getPublicKey()), HashCode.fromBytes(aggregatePublicKey)), onlineAccount.getPublicKey(), aggregatePublicKey);
|
||||
assertArrayEquals(String.format("expected: %s, actual: %s", HashCode.fromBytes(onlineAccount.getSignature()), HashCode.fromBytes(aggregateSignature)), onlineAccount.getSignature(), aggregateSignature);
|
||||
|
||||
// This is the crucial test:
|
||||
long timestamp = onlineAccount.getTimestamp();
|
||||
byte[] timestampBytes = Longs.toByteArray(timestamp);
|
||||
assertTrue(verifyAggregated(aggregatePublicKey, aggregateSignature, timestampBytes));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultipleAggregate() {
|
||||
List<OnlineAccountData> onlineAccounts = generateOnlineAccounts(5000);
|
||||
|
||||
byte[] aggregatePublicKey = aggregatePublicKeys(onlineAccounts.stream().map(OnlineAccountData::getPublicKey).collect(Collectors.toUnmodifiableList()));
|
||||
System.out.printf("Aggregate public key: %s%n", HashCode.fromBytes(aggregatePublicKey));
|
||||
|
||||
byte[] aggregateSignature = aggregateSignatures(onlineAccounts.stream().map(OnlineAccountData::getSignature).collect(Collectors.toUnmodifiableList()));
|
||||
System.out.printf("Aggregate signature: %s%n", HashCode.fromBytes(aggregateSignature));
|
||||
|
||||
OnlineAccountData onlineAccount = onlineAccounts.get(0);
|
||||
|
||||
// This is the crucial test:
|
||||
long timestamp = onlineAccount.getTimestamp();
|
||||
byte[] timestampBytes = Longs.toByteArray(timestamp);
|
||||
assertTrue(verifyAggregated(aggregatePublicKey, aggregateSignature, timestampBytes));
|
||||
}
|
||||
|
||||
private List<OnlineAccountData> generateOnlineAccounts(int numAccounts) {
|
||||
List<OnlineAccountData> onlineAccounts = new ArrayList<>();
|
||||
|
||||
long timestamp = System.currentTimeMillis();
|
||||
byte[] timestampBytes = Longs.toByteArray(timestamp);
|
||||
|
||||
for (int a = 0; a < numAccounts; ++a) {
|
||||
byte[] privateKey = new byte[Transformer.PUBLIC_KEY_LENGTH];
|
||||
SECURE_RANDOM.nextBytes(privateKey);
|
||||
|
||||
byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH];
|
||||
Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0);
|
||||
|
||||
byte[] signature = signForAggregation(privateKey, timestampBytes);
|
||||
|
||||
onlineAccounts.add(new OnlineAccountData(timestamp, signature, publicKey));
|
||||
}
|
||||
|
||||
return onlineAccounts;
|
||||
}
|
||||
}
|
165
src/test/java/org/qortal/test/TransactionReferenceTests.java
Normal file
165
src/test/java/org/qortal/test/TransactionReferenceTests.java
Normal file
@ -0,0 +1,165 @@
|
||||
package org.qortal.test;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.data.transaction.PaymentTransactionData;
|
||||
import org.qortal.data.transaction.TransactionData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.test.common.Common;
|
||||
import org.qortal.test.common.TransactionUtils;
|
||||
import org.qortal.test.common.transaction.TestTransaction;
|
||||
import org.qortal.transaction.Transaction;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class TransactionReferenceTests extends Common {
|
||||
|
||||
@Before
|
||||
public void beforeTest() throws DataException {
|
||||
Common.useDefaultSettings();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidRandomReferenceBeforeFeatureTrigger() throws DataException {
|
||||
Random random = new Random();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
|
||||
byte[] randomPrivateKey = new byte[32];
|
||||
random.nextBytes(randomPrivateKey);
|
||||
PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey);
|
||||
|
||||
// Create payment transaction data
|
||||
TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L);
|
||||
|
||||
// Set random reference
|
||||
byte[] randomReference = new byte[64];
|
||||
random.nextBytes(randomReference);
|
||||
paymentTransactionData.setReference(randomReference);
|
||||
|
||||
Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData);
|
||||
|
||||
// Transaction should be invalid due to random reference
|
||||
Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed();
|
||||
assertEquals(Transaction.ValidationResult.INVALID_REFERENCE, validationResult);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidRandomReferenceAfterFeatureTrigger() throws DataException {
|
||||
Common.useSettings("test-settings-v2-disable-reference.json");
|
||||
Random random = new Random();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
|
||||
byte[] randomPrivateKey = new byte[32];
|
||||
random.nextBytes(randomPrivateKey);
|
||||
PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey);
|
||||
|
||||
// Create payment transaction data
|
||||
TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L);
|
||||
|
||||
// Set random reference
|
||||
byte[] randomReference = new byte[64];
|
||||
random.nextBytes(randomReference);
|
||||
paymentTransactionData.setReference(randomReference);
|
||||
|
||||
Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData);
|
||||
|
||||
// Transaction should be valid, even with random reference, because reference checking is now disabled
|
||||
Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed();
|
||||
assertEquals(Transaction.ValidationResult.OK, validationResult);
|
||||
TransactionUtils.signAndImportValid(repository, paymentTransactionData, alice);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullReferenceAfterFeatureTrigger() throws DataException {
|
||||
Common.useSettings("test-settings-v2-disable-reference.json");
|
||||
Random random = new Random();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
|
||||
byte[] randomPrivateKey = new byte[32];
|
||||
random.nextBytes(randomPrivateKey);
|
||||
PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey);
|
||||
|
||||
// Create payment transaction data
|
||||
TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L);
|
||||
|
||||
// Set null reference
|
||||
paymentTransactionData.setReference(null);
|
||||
|
||||
Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData);
|
||||
|
||||
// Transaction should be invalid, as we require a non-null reference
|
||||
Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed();
|
||||
assertEquals(Transaction.ValidationResult.INVALID_REFERENCE, validationResult);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testShortReferenceAfterFeatureTrigger() throws DataException {
|
||||
Common.useSettings("test-settings-v2-disable-reference.json");
|
||||
Random random = new Random();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
|
||||
byte[] randomPrivateKey = new byte[32];
|
||||
random.nextBytes(randomPrivateKey);
|
||||
PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey);
|
||||
|
||||
// Create payment transaction data
|
||||
TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L);
|
||||
|
||||
// Set a 1-byte reference
|
||||
byte[] randomByte = new byte[63];
|
||||
random.nextBytes(randomByte);
|
||||
paymentTransactionData.setReference(randomByte);
|
||||
|
||||
Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData);
|
||||
|
||||
// Transaction should be invalid, as reference isn't long enough
|
||||
Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed();
|
||||
assertEquals(Transaction.ValidationResult.INVALID_REFERENCE, validationResult);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLongReferenceAfterFeatureTrigger() throws DataException {
|
||||
Common.useSettings("test-settings-v2-disable-reference.json");
|
||||
Random random = new Random();
|
||||
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
PrivateKeyAccount alice = Common.getTestAccount(repository, "alice");
|
||||
|
||||
byte[] randomPrivateKey = new byte[32];
|
||||
random.nextBytes(randomPrivateKey);
|
||||
PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey);
|
||||
|
||||
// Create payment transaction data
|
||||
TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L);
|
||||
|
||||
// Set a 1-byte reference
|
||||
byte[] randomByte = new byte[65];
|
||||
random.nextBytes(randomByte);
|
||||
paymentTransactionData.setReference(randomByte);
|
||||
|
||||
Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData);
|
||||
|
||||
// Transaction should be invalid, as reference is too long
|
||||
Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed();
|
||||
assertEquals(Transaction.ValidationResult.INVALID_REFERENCE, validationResult);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -6,6 +6,7 @@ import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.account.PublicKeyAccount;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.utils.Base58;
|
||||
|
||||
public class RewardShareKeys {
|
||||
@ -28,7 +29,7 @@ public class RewardShareKeys {
|
||||
PublicKeyAccount recipientAccount = new PublicKeyAccount(null, args.length > 1 ? Base58.decode(args[1]) : minterAccount.getPublicKey());
|
||||
|
||||
byte[] rewardSharePrivateKey = minterAccount.getRewardSharePrivateKey(recipientAccount.getPublicKey());
|
||||
byte[] rewardSharePublicKey = PrivateKeyAccount.toPublicKey(rewardSharePrivateKey);
|
||||
byte[] rewardSharePublicKey = Crypto.toPublicKey(rewardSharePrivateKey);
|
||||
|
||||
System.out.println(String.format("Minter account: %s", minterAccount.getAddress()));
|
||||
System.out.println(String.format("Minter's public key: %s", Base58.encode(minterAccount.getPublicKey())));
|
||||
|
@ -6,6 +6,7 @@ import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.qortal.account.PrivateKeyAccount;
|
||||
import org.qortal.crypto.Crypto;
|
||||
import org.qortal.data.transaction.BaseTransactionData;
|
||||
import org.qortal.data.transaction.PaymentTransactionData;
|
||||
import org.qortal.data.transaction.RewardShareTransactionData;
|
||||
@ -45,7 +46,7 @@ public class AccountUtils {
|
||||
long timestamp = repository.getTransactionRepository().fromSignature(reference).getTimestamp() + 1;
|
||||
|
||||
byte[] rewardSharePrivateKey = mintingAccount.getRewardSharePrivateKey(recipientAccount.getPublicKey());
|
||||
byte[] rewardSharePublicKey = PrivateKeyAccount.toPublicKey(rewardSharePrivateKey);
|
||||
byte[] rewardSharePublicKey = Crypto.toPublicKey(rewardSharePrivateKey);
|
||||
|
||||
BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, mintingAccount.getPublicKey(), fee, null);
|
||||
TransactionData transactionData = new RewardShareTransactionData(baseTransactionData, recipientAccount.getAddress(), rewardSharePublicKey, sharePercent);
|
||||
|
@ -61,6 +61,7 @@ public class Common {
|
||||
|
||||
|
||||
public static final String testSettingsFilename = "test-settings-v2.json";
|
||||
public static boolean shouldRetainRepositoryAfterTest = false;
|
||||
|
||||
static {
|
||||
// Load/check settings, which potentially sets up blockchain config, etc.
|
||||
@ -126,6 +127,7 @@ public class Common {
|
||||
|
||||
public static void useSettings(String settingsFilename) throws DataException {
|
||||
Common.useSettingsAndDb(settingsFilename, true);
|
||||
setShouldRetainRepositoryAfterTest(false);
|
||||
}
|
||||
|
||||
public static void useDefaultSettings() throws DataException {
|
||||
@ -207,7 +209,16 @@ public class Common {
|
||||
RepositoryManager.setRepositoryFactory(repositoryFactory);
|
||||
}
|
||||
|
||||
public static void setShouldRetainRepositoryAfterTest(boolean shouldRetain) {
|
||||
shouldRetainRepositoryAfterTest = shouldRetain;
|
||||
}
|
||||
|
||||
public static void deleteTestRepository() throws DataException {
|
||||
if (shouldRetainRepositoryAfterTest) {
|
||||
// Don't delete if we've requested to keep the db intact
|
||||
return;
|
||||
}
|
||||
|
||||
// Delete repository directory if exists
|
||||
Path repositoryPath = Paths.get(Settings.getInstance().getRepositoryPath());
|
||||
try {
|
||||
|
@ -0,0 +1,63 @@
|
||||
package org.qortal.test.minting;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.qortal.block.Block;
|
||||
import org.qortal.data.block.BlockData;
|
||||
import org.qortal.repository.DataException;
|
||||
import org.qortal.repository.Repository;
|
||||
import org.qortal.repository.RepositoryManager;
|
||||
import org.qortal.test.common.BlockUtils;
|
||||
import org.qortal.test.common.Common;
|
||||
import org.qortal.transform.Transformer;
|
||||
import org.qortal.utils.NTP;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
public class BlockTimestampTests extends Common {
|
||||
|
||||
private static class BlockTimestampDataPoint {
|
||||
public byte[] minterPublicKey;
|
||||
public int minterAccountLevel;
|
||||
public long blockTimestamp;
|
||||
}
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
@Before
|
||||
public void beforeTest() throws DataException {
|
||||
Common.useSettings("test-settings-v2-block-timestamps.json");
|
||||
NTP.setFixedOffset(0L);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTimestamps() throws DataException {
|
||||
try (final Repository repository = RepositoryManager.getRepository()) {
|
||||
Block parentBlock = BlockUtils.mintBlock(repository);
|
||||
BlockData parentBlockData = parentBlock.getBlockData();
|
||||
|
||||
// Generate lots of test minters
|
||||
List<BlockTimestampDataPoint> dataPoints = new ArrayList<>();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
BlockTimestampDataPoint dataPoint = new BlockTimestampDataPoint();
|
||||
|
||||
dataPoint.minterPublicKey = new byte[Transformer.PUBLIC_KEY_LENGTH];
|
||||
RANDOM.nextBytes(dataPoint.minterPublicKey);
|
||||
|
||||
dataPoint.minterAccountLevel = RANDOM.nextInt(5) + 5;
|
||||
|
||||
dataPoint.blockTimestamp = Block.calcTimestamp(parentBlockData, dataPoint.minterPublicKey, dataPoint.minterAccountLevel);
|
||||
|
||||
System.out.printf("[%d] level %d, blockTimestamp %d - parentTimestamp %d = %d%n",
|
||||
i,
|
||||
dataPoint.minterAccountLevel,
|
||||
dataPoint.blockTimestamp,
|
||||
parentBlockData.getTimestamp(),
|
||||
dataPoint.blockTimestamp - parentBlockData.getTimestamp()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -509,6 +509,7 @@ public class IntegrityTests extends Common {
|
||||
@Ignore("Checks 'live' repository")
|
||||
@Test
|
||||
public void testRepository() throws DataException {
|
||||
Common.setShouldRetainRepositoryAfterTest(true);
|
||||
Settings.fileInstance("settings.json"); // use 'live' settings
|
||||
|
||||
String repositoryUrlTemplate = "jdbc:hsqldb:file:%s" + File.separator + "blockchain;create=false;hsqldb.full_log_replay=true";
|
||||
|
210
src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java
Normal file
210
src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java
Normal file
@ -0,0 +1,210 @@
|
||||
package org.qortal.test.network;
|
||||
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.qortal.controller.OnlineAccountsManager;
|
||||
import org.qortal.data.network.OnlineAccountData;
|
||||
import org.qortal.network.message.*;
|
||||
import org.qortal.transform.Transformer;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.security.Security;
|
||||
import java.util.*;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class OnlineAccountsV3Tests {
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
static {
|
||||
// This must go before any calls to LogManager/Logger
|
||||
System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager");
|
||||
|
||||
Security.insertProviderAt(new BouncyCastleProvider(), 0);
|
||||
Security.insertProviderAt(new BouncyCastleJsseProvider(), 1);
|
||||
}
|
||||
|
||||
@Ignore("For informational use")
|
||||
@Test
|
||||
public void compareV2ToV3() throws MessageException {
|
||||
List<OnlineAccountData> onlineAccounts = generateOnlineAccounts(false);
|
||||
|
||||
// How many of each timestamp and leading byte (of public key)
|
||||
Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByte = convertToHashMaps(onlineAccounts);
|
||||
|
||||
byte[] v3DataBytes = new GetOnlineAccountsV3Message(hashesByTimestampThenByte).toBytes();
|
||||
int v3ByteSize = v3DataBytes.length;
|
||||
|
||||
byte[] v2DataBytes = new GetOnlineAccountsV2Message(onlineAccounts).toBytes();
|
||||
int v2ByteSize = v2DataBytes.length;
|
||||
|
||||
int numTimestamps = hashesByTimestampThenByte.size();
|
||||
System.out.printf("For %d accounts split across %d timestamp%s: V2 size %d vs V3 size %d%n",
|
||||
onlineAccounts.size(),
|
||||
numTimestamps,
|
||||
numTimestamps != 1 ? "s" : "",
|
||||
v2ByteSize,
|
||||
v3ByteSize
|
||||
);
|
||||
|
||||
for (var outerMapEntry : hashesByTimestampThenByte.entrySet()) {
|
||||
long timestamp = outerMapEntry.getKey();
|
||||
|
||||
var innerMap = outerMapEntry.getValue();
|
||||
|
||||
System.out.printf("For timestamp %d: %d / 256 slots used.%n",
|
||||
timestamp,
|
||||
innerMap.size()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private Map<Long, Map<Byte, byte[]>> convertToHashMaps(List<OnlineAccountData> onlineAccounts) {
|
||||
// How many of each timestamp and leading byte (of public key)
|
||||
Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByte = new HashMap<>();
|
||||
|
||||
for (OnlineAccountData onlineAccountData : onlineAccounts) {
|
||||
Long timestamp = onlineAccountData.getTimestamp();
|
||||
Byte leadingByte = onlineAccountData.getPublicKey()[0];
|
||||
|
||||
hashesByTimestampThenByte
|
||||
.computeIfAbsent(timestamp, k -> new HashMap<>())
|
||||
.compute(leadingByte, (k, v) -> OnlineAccountsManager.xorByteArrayInPlace(v, onlineAccountData.getPublicKey()));
|
||||
}
|
||||
|
||||
return hashesByTimestampThenByte;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOnGetOnlineAccountsV3() {
|
||||
List<OnlineAccountData> ourOnlineAccounts = generateOnlineAccounts(false);
|
||||
List<OnlineAccountData> peersOnlineAccounts = generateOnlineAccounts(false);
|
||||
|
||||
Map<Long, Map<Byte, byte[]>> ourConvertedHashes = convertToHashMaps(ourOnlineAccounts);
|
||||
Map<Long, Map<Byte, byte[]>> peersConvertedHashes = convertToHashMaps(peersOnlineAccounts);
|
||||
|
||||
List<String> mockReply = new ArrayList<>();
|
||||
|
||||
// Warning: no double-checking/fetching - we must be ConcurrentMap compatible!
|
||||
// So no contains()-then-get() or multiple get()s on the same key/map.
|
||||
for (var ourOuterMapEntry : ourConvertedHashes.entrySet()) {
|
||||
Long timestamp = ourOuterMapEntry.getKey();
|
||||
|
||||
var ourInnerMap = ourOuterMapEntry.getValue();
|
||||
var peersInnerMap = peersConvertedHashes.get(timestamp);
|
||||
|
||||
if (peersInnerMap == null) {
|
||||
// Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours
|
||||
for (Byte leadingByte : ourInnerMap.keySet())
|
||||
mockReply.add(timestamp + ":" + leadingByte);
|
||||
} else {
|
||||
// We have entries for this timestamp so compare against peer's entries
|
||||
for (var ourInnerMapEntry : ourInnerMap.entrySet()) {
|
||||
Byte leadingByte = ourInnerMapEntry.getKey();
|
||||
byte[] peersHash = peersInnerMap.get(leadingByte);
|
||||
|
||||
if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) {
|
||||
// We don't match peer, or peer doesn't have - send all online accounts for this timestamp and leading byte
|
||||
mockReply.add(timestamp + ":" + leadingByte);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int numOurTimestamps = ourConvertedHashes.size();
|
||||
System.out.printf("We have %d accounts split across %d timestamp%s%n",
|
||||
ourOnlineAccounts.size(),
|
||||
numOurTimestamps,
|
||||
numOurTimestamps != 1 ? "s" : ""
|
||||
);
|
||||
|
||||
int numPeerTimestamps = peersConvertedHashes.size();
|
||||
System.out.printf("Peer sent %d accounts split across %d timestamp%s%n",
|
||||
peersOnlineAccounts.size(),
|
||||
numPeerTimestamps,
|
||||
numPeerTimestamps != 1 ? "s" : ""
|
||||
);
|
||||
|
||||
System.out.printf("We need to send: %d%n%s%n", mockReply.size(), String.join(", ", mockReply));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSerialization() throws MessageException {
|
||||
List<OnlineAccountData> onlineAccountsOut = generateOnlineAccounts(true);
|
||||
Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByteOut = convertToHashMaps(onlineAccountsOut);
|
||||
|
||||
validateSerialization(hashesByTimestampThenByteOut);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptySerialization() throws MessageException {
|
||||
Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByteOut = Collections.emptyMap();
|
||||
validateSerialization(hashesByTimestampThenByteOut);
|
||||
|
||||
hashesByTimestampThenByteOut = new HashMap<>();
|
||||
validateSerialization(hashesByTimestampThenByteOut);
|
||||
}
|
||||
|
||||
private void validateSerialization(Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByteOut) throws MessageException {
|
||||
Message messageOut = new GetOnlineAccountsV3Message(hashesByTimestampThenByteOut);
|
||||
byte[] messageBytes = messageOut.toBytes();
|
||||
|
||||
ByteBuffer byteBuffer = ByteBuffer.wrap(messageBytes).asReadOnlyBuffer();
|
||||
|
||||
GetOnlineAccountsV3Message messageIn = (GetOnlineAccountsV3Message) Message.fromByteBuffer(byteBuffer);
|
||||
|
||||
Map<Long, Map<Byte, byte[]>> hashesByTimestampThenByteIn = messageIn.getHashesByTimestampThenByte();
|
||||
|
||||
Set<Long> timestampsIn = hashesByTimestampThenByteIn.keySet();
|
||||
Set<Long> timestampsOut = hashesByTimestampThenByteOut.keySet();
|
||||
assertEquals("timestamp count mismatch", timestampsOut.size(), timestampsIn.size());
|
||||
assertTrue("timestamps mismatch", timestampsIn.containsAll(timestampsOut));
|
||||
|
||||
for (Long timestamp : timestampsIn) {
|
||||
Map<Byte, byte[]> hashesByByteIn = hashesByTimestampThenByteIn.get(timestamp);
|
||||
Map<Byte, byte[]> hashesByByteOut = hashesByTimestampThenByteOut.get(timestamp);
|
||||
assertNotNull("timestamp entry missing", hashesByByteOut);
|
||||
|
||||
Set<Byte> leadingBytesIn = hashesByByteIn.keySet();
|
||||
Set<Byte> leadingBytesOut = hashesByByteOut.keySet();
|
||||
assertEquals("leading byte entry count mismatch", leadingBytesOut.size(), leadingBytesIn.size());
|
||||
assertTrue("leading byte entry mismatch", leadingBytesIn.containsAll(leadingBytesOut));
|
||||
|
||||
for (Byte leadingByte : leadingBytesOut) {
|
||||
byte[] bytesIn = hashesByByteIn.get(leadingByte);
|
||||
byte[] bytesOut = hashesByByteOut.get(leadingByte);
|
||||
|
||||
assertTrue("pubkey hash mismatch", Arrays.equals(bytesOut, bytesIn));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private List<OnlineAccountData> generateOnlineAccounts(boolean withSignatures) {
|
||||
List<OnlineAccountData> onlineAccounts = new ArrayList<>();
|
||||
|
||||
int numTimestamps = RANDOM.nextInt(2) + 1; // 1 or 2
|
||||
|
||||
for (int t = 0; t < numTimestamps; ++t) {
|
||||
long timestamp = 1 << 31 + (t + 1) << 12;
|
||||
int numAccounts = RANDOM.nextInt(3000);
|
||||
|
||||
for (int a = 0; a < numAccounts; ++a) {
|
||||
byte[] sig = null;
|
||||
if (withSignatures) {
|
||||
sig = new byte[Transformer.SIGNATURE_LENGTH];
|
||||
RANDOM.nextBytes(sig);
|
||||
}
|
||||
|
||||
byte[] pubkey = new byte[Transformer.PUBLIC_KEY_LENGTH];
|
||||
RANDOM.nextBytes(pubkey);
|
||||
|
||||
onlineAccounts.add(new OnlineAccountData(timestamp, sig, pubkey));
|
||||
}
|
||||
}
|
||||
|
||||
return onlineAccounts;
|
||||
}
|
||||
|
||||
}
|
86
src/test/resources/test-chain-v2-block-timestamps.json
Normal file
86
src/test/resources/test-chain-v2-block-timestamps.json
Normal file
@ -0,0 +1,86 @@
|
||||
{
|
||||
"isTestChain": true,
|
||||
"blockTimestampMargin": 500,
|
||||
"transactionExpiryPeriod": 86400000,
|
||||
"maxBlockSize": 2097152,
|
||||
"maxBytesPerUnitFee": 1024,
|
||||
"unitFee": "0.1",
|
||||
"nameRegistrationUnitFees": [
|
||||
{ "timestamp": 1645372800000, "fee": "5" }
|
||||
],
|
||||
"requireGroupForApproval": false,
|
||||
"minAccountLevelToRewardShare": 5,
|
||||
"maxRewardSharesPerMintingAccount": 20,
|
||||
"founderEffectiveMintingLevel": 10,
|
||||
"onlineAccountSignaturesMinLifetime": 3600000,
|
||||
"onlineAccountSignaturesMaxLifetime": 86400000,
|
||||
"rewardsByHeight": [
|
||||
{ "height": 1, "reward": 100 },
|
||||
{ "height": 11, "reward": 10 },
|
||||
{ "height": 21, "reward": 1 }
|
||||
],
|
||||
"sharesByLevel": [
|
||||
{ "levels": [ 1, 2 ], "share": 0.05 },
|
||||
{ "levels": [ 3, 4 ], "share": 0.10 },
|
||||
{ "levels": [ 5, 6 ], "share": 0.15 },
|
||||
{ "levels": [ 7, 8 ], "share": 0.20 },
|
||||
{ "levels": [ 9, 10 ], "share": 0.25 }
|
||||
],
|
||||
"qoraHoldersShare": 0.20,
|
||||
"qoraPerQortReward": 250,
|
||||
"blocksNeededByLevel": [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 ],
|
||||
"blockTimingsByHeight": [
|
||||
{ "height": 1, "target": 60000, "deviation": 30000, "power": 0.2 },
|
||||
{ "height": 2, "target": 70000, "deviation": 10000, "power": 0.8 }
|
||||
],
|
||||
"ciyamAtSettings": {
|
||||
"feePerStep": "0.0001",
|
||||
"maxStepsPerRound": 500,
|
||||
"stepsPerFunctionCall": 10,
|
||||
"minutesPerBlock": 1
|
||||
},
|
||||
"featureTriggers": {
|
||||
"messageHeight": 0,
|
||||
"atHeight": 0,
|
||||
"assetsTimestamp": 0,
|
||||
"votingTimestamp": 0,
|
||||
"arbitraryTimestamp": 0,
|
||||
"powfixTimestamp": 0,
|
||||
"qortalTimestamp": 0,
|
||||
"newAssetPricingTimestamp": 0,
|
||||
"groupApprovalTimestamp": 0,
|
||||
"atFindNextTransactionFix": 0,
|
||||
"newBlockSigHeight": 999999,
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 9999999999999,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
"timestamp": 0,
|
||||
"transactions": [
|
||||
{ "type": "ISSUE_ASSET", "assetName": "QORT", "description": "QORT native coin", "data": "", "quantity": 0, "isDivisible": true, "fee": 0 },
|
||||
{ "type": "ISSUE_ASSET", "assetName": "Legacy-QORA", "description": "Representative legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true },
|
||||
{ "type": "ISSUE_ASSET", "assetName": "QORT-from-QORA", "description": "QORT gained from holding legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true },
|
||||
|
||||
{ "type": "GENESIS", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "amount": "1000000000" },
|
||||
{ "type": "GENESIS", "recipient": "QixPbJUwsaHsVEofJdozU9zgVqkK6aYhrK", "amount": "1000000" },
|
||||
{ "type": "GENESIS", "recipient": "QaUpHNhT3Ygx6avRiKobuLdusppR5biXjL", "amount": "1000000" },
|
||||
{ "type": "GENESIS", "recipient": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "amount": "1000000" },
|
||||
|
||||
{ "type": "CREATE_GROUP", "creatorPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "groupName": "dev-group", "description": "developer group", "isOpen": false, "approvalThreshold": "PCT100", "minimumBlockDelay": 0, "maximumBlockDelay": 1440 },
|
||||
|
||||
{ "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "TEST", "description": "test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 },
|
||||
{ "type": "ISSUE_ASSET", "issuerPublicKey": "C6wuddsBV3HzRrXUtezE7P5MoRXp5m3mEDokRDGZB6ry", "assetName": "OTHER", "description": "other test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 },
|
||||
{ "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "GOLD", "description": "gold test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 },
|
||||
|
||||
{ "type": "ACCOUNT_FLAGS", "target": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "andMask": -1, "orMask": 1, "xorMask": 0 },
|
||||
{ "type": "REWARD_SHARE", "minterPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "rewardSharePublicKey": "7PpfnvLSG7y4HPh8hE7KoqAjLCkv7Ui6xw4mKAkbZtox", "sharePercent": "100" },
|
||||
|
||||
{ "type": "ACCOUNT_LEVEL", "target": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "level": 5 }
|
||||
]
|
||||
}
|
||||
}
|
85
src/test/resources/test-chain-v2-disable-reference.json
Normal file
85
src/test/resources/test-chain-v2-disable-reference.json
Normal file
@ -0,0 +1,85 @@
|
||||
{
|
||||
"isTestChain": true,
|
||||
"blockTimestampMargin": 500,
|
||||
"transactionExpiryPeriod": 86400000,
|
||||
"maxBlockSize": 2097152,
|
||||
"maxBytesPerUnitFee": 1024,
|
||||
"unitFee": "0.1",
|
||||
"nameRegistrationUnitFees": [
|
||||
{ "timestamp": 1645372800000, "fee": "5" }
|
||||
],
|
||||
"requireGroupForApproval": false,
|
||||
"minAccountLevelToRewardShare": 5,
|
||||
"maxRewardSharesPerMintingAccount": 20,
|
||||
"founderEffectiveMintingLevel": 10,
|
||||
"onlineAccountSignaturesMinLifetime": 3600000,
|
||||
"onlineAccountSignaturesMaxLifetime": 86400000,
|
||||
"rewardsByHeight": [
|
||||
{ "height": 1, "reward": 100 },
|
||||
{ "height": 11, "reward": 10 },
|
||||
{ "height": 21, "reward": 1 }
|
||||
],
|
||||
"sharesByLevel": [
|
||||
{ "levels": [ 1, 2 ], "share": 0.05 },
|
||||
{ "levels": [ 3, 4 ], "share": 0.10 },
|
||||
{ "levels": [ 5, 6 ], "share": 0.15 },
|
||||
{ "levels": [ 7, 8 ], "share": 0.20 },
|
||||
{ "levels": [ 9, 10 ], "share": 0.25 }
|
||||
],
|
||||
"qoraHoldersShare": 0.20,
|
||||
"qoraPerQortReward": 250,
|
||||
"blocksNeededByLevel": [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 ],
|
||||
"blockTimingsByHeight": [
|
||||
{ "height": 1, "target": 60000, "deviation": 30000, "power": 0.2 }
|
||||
],
|
||||
"ciyamAtSettings": {
|
||||
"feePerStep": "0.0001",
|
||||
"maxStepsPerRound": 500,
|
||||
"stepsPerFunctionCall": 10,
|
||||
"minutesPerBlock": 1
|
||||
},
|
||||
"featureTriggers": {
|
||||
"messageHeight": 0,
|
||||
"atHeight": 0,
|
||||
"assetsTimestamp": 0,
|
||||
"votingTimestamp": 0,
|
||||
"arbitraryTimestamp": 0,
|
||||
"powfixTimestamp": 0,
|
||||
"qortalTimestamp": 0,
|
||||
"newAssetPricingTimestamp": 0,
|
||||
"groupApprovalTimestamp": 0,
|
||||
"atFindNextTransactionFix": 0,
|
||||
"newBlockSigHeight": 999999,
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 0,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
"timestamp": 0,
|
||||
"transactions": [
|
||||
{ "type": "ISSUE_ASSET", "assetName": "QORT", "description": "QORT native coin", "data": "", "quantity": 0, "isDivisible": true, "fee": 0 },
|
||||
{ "type": "ISSUE_ASSET", "assetName": "Legacy-QORA", "description": "Representative legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true },
|
||||
{ "type": "ISSUE_ASSET", "assetName": "QORT-from-QORA", "description": "QORT gained from holding legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true },
|
||||
|
||||
{ "type": "GENESIS", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "amount": "1000000000" },
|
||||
{ "type": "GENESIS", "recipient": "QixPbJUwsaHsVEofJdozU9zgVqkK6aYhrK", "amount": "1000000" },
|
||||
{ "type": "GENESIS", "recipient": "QaUpHNhT3Ygx6avRiKobuLdusppR5biXjL", "amount": "1000000" },
|
||||
{ "type": "GENESIS", "recipient": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "amount": "1000000" },
|
||||
|
||||
{ "type": "CREATE_GROUP", "creatorPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "groupName": "dev-group", "description": "developer group", "isOpen": false, "approvalThreshold": "PCT100", "minimumBlockDelay": 0, "maximumBlockDelay": 1440 },
|
||||
|
||||
{ "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "TEST", "description": "test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 },
|
||||
{ "type": "ISSUE_ASSET", "issuerPublicKey": "C6wuddsBV3HzRrXUtezE7P5MoRXp5m3mEDokRDGZB6ry", "assetName": "OTHER", "description": "other test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 },
|
||||
{ "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "GOLD", "description": "gold test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 },
|
||||
|
||||
{ "type": "ACCOUNT_FLAGS", "target": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "andMask": -1, "orMask": 1, "xorMask": 0 },
|
||||
{ "type": "REWARD_SHARE", "minterPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "rewardSharePublicKey": "7PpfnvLSG7y4HPh8hE7KoqAjLCkv7Ui6xw4mKAkbZtox", "sharePercent": "100" },
|
||||
|
||||
{ "type": "ACCOUNT_LEVEL", "target": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "level": 5 }
|
||||
]
|
||||
}
|
||||
}
|
@ -54,7 +54,9 @@
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
@ -54,7 +54,9 @@
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
@ -54,7 +54,9 @@
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
@ -54,7 +54,9 @@
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
@ -54,7 +54,9 @@
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
@ -54,7 +54,9 @@
|
||||
"shareBinFix": 6,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
@ -54,7 +54,9 @@
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
@ -54,7 +54,9 @@
|
||||
"shareBinFix": 999999,
|
||||
"calcChainWeightTimestamp": 0,
|
||||
"transactionV5Timestamp": 0,
|
||||
"transactionV6Timestamp": 0
|
||||
"transactionV6Timestamp": 0,
|
||||
"disableReferenceTimestamp": 9999999999999,
|
||||
"aggregateSignatureTimestamp": 0
|
||||
},
|
||||
"genesisInfo": {
|
||||
"version": 4,
|
||||
|
19
src/test/resources/test-settings-v2-block-timestamps.json
Normal file
19
src/test/resources/test-settings-v2-block-timestamps.json
Normal file
@ -0,0 +1,19 @@
|
||||
{
|
||||
"repositoryPath": "testdb",
|
||||
"bitcoinNet": "TEST3",
|
||||
"litecoinNet": "TEST3",
|
||||
"restrictedApi": false,
|
||||
"blockchainConfig": "src/test/resources/test-chain-v2-block-timestamps.json",
|
||||
"exportPath": "qortal-backup-test",
|
||||
"bootstrap": false,
|
||||
"wipeUnconfirmedOnStart": false,
|
||||
"testNtpOffset": 0,
|
||||
"minPeers": 0,
|
||||
"pruneBlockLimit": 100,
|
||||
"bootstrapFilenamePrefix": "test-",
|
||||
"dataPath": "data-test",
|
||||
"tempDataPath": "data-test/_temp",
|
||||
"listsPath": "lists-test",
|
||||
"storagePolicy": "FOLLOWED_OR_VIEWED",
|
||||
"maxStorageCapacity": 104857600
|
||||
}
|
11
src/test/resources/test-settings-v2-disable-reference.json
Normal file
11
src/test/resources/test-settings-v2-disable-reference.json
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"repositoryPath": "testdb",
|
||||
"restrictedApi": false,
|
||||
"blockchainConfig": "src/test/resources/test-chain-v2-disable-reference.json",
|
||||
"exportPath": "qortal-backup-test",
|
||||
"bootstrap": false,
|
||||
"wipeUnconfirmedOnStart": false,
|
||||
"testNtpOffset": 0,
|
||||
"minPeers": 0,
|
||||
"pruneBlockLimit": 100
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user