diff --git a/WindowsInstaller/Qortal.aip b/WindowsInstaller/Qortal.aip index 0e3d5791..06643cca 100755 --- a/WindowsInstaller/Qortal.aip +++ b/WindowsInstaller/Qortal.aip @@ -17,10 +17,10 @@ - + - + @@ -212,7 +212,7 @@ - + diff --git a/pom.xml b/pom.xml index 224640df..e951c7c7 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 org.qortal qortal - 3.3.2 + 3.4.0 jar true diff --git a/src/main/java/org/qortal/account/PrivateKeyAccount.java b/src/main/java/org/qortal/account/PrivateKeyAccount.java index 3b370d12..4b646b4a 100644 --- a/src/main/java/org/qortal/account/PrivateKeyAccount.java +++ b/src/main/java/org/qortal/account/PrivateKeyAccount.java @@ -11,15 +11,15 @@ public class PrivateKeyAccount extends PublicKeyAccount { private final Ed25519PrivateKeyParameters edPrivateKeyParams; /** - * Create PrivateKeyAccount using byte[32] seed. + * Create PrivateKeyAccount using byte[32] private key. * - * @param seed + * @param privateKey * byte[32] used to create private/public key pair * @throws IllegalArgumentException - * if passed invalid seed + * if passed invalid privateKey */ - public PrivateKeyAccount(Repository repository, byte[] seed) { - this(repository, new Ed25519PrivateKeyParameters(seed, 0)); + public PrivateKeyAccount(Repository repository, byte[] privateKey) { + this(repository, new Ed25519PrivateKeyParameters(privateKey, 0)); } private PrivateKeyAccount(Repository repository, Ed25519PrivateKeyParameters edPrivateKeyParams) { @@ -37,10 +37,6 @@ public class PrivateKeyAccount extends PublicKeyAccount { return this.privateKey; } - public static byte[] toPublicKey(byte[] seed) { - return new Ed25519PrivateKeyParameters(seed, 0).generatePublicKey().getEncoded(); - } - public byte[] sign(byte[] message) { return Crypto.sign(this.edPrivateKeyParams, message); } diff --git a/src/main/java/org/qortal/api/resource/ArbitraryResource.java b/src/main/java/org/qortal/api/resource/ArbitraryResource.java index 73860047..451d9b8a 100644 --- a/src/main/java/org/qortal/api/resource/ArbitraryResource.java +++ b/src/main/java/org/qortal/api/resource/ArbitraryResource.java @@ -57,6 +57,7 @@ import org.qortal.transform.TransformationException; import org.qortal.transform.transaction.ArbitraryTransactionTransformer; import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.utils.Base58; +import org.qortal.utils.NTP; import org.qortal.utils.ZipUtils; @Path("/arbitrary") @@ -1099,7 +1100,8 @@ public class ArbitraryResource { throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, error); } - if (!Controller.getInstance().isUpToDate()) { + final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L); + if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) { throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC); } diff --git a/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java b/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java index 35a678f2..66800eb7 100644 --- a/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java +++ b/src/main/java/org/qortal/api/resource/CrossChainTradeBotResource.java @@ -42,6 +42,7 @@ import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; import org.qortal.utils.Base58; +import org.qortal.utils.NTP; @Path("/crosschain/tradebot") @Tag(name = "Cross-Chain (Trade-Bot)") @@ -137,7 +138,8 @@ public class CrossChainTradeBotResource { if (tradeBotCreateRequest.qortAmount <= 0 || tradeBotCreateRequest.fundingQortAmount <= 0) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.ORDER_SIZE_TOO_SMALL); - if (!Controller.getInstance().isUpToDate()) + final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L); + if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC); try (final Repository repository = RepositoryManager.getRepository()) { @@ -198,7 +200,8 @@ public class CrossChainTradeBotResource { if (tradeBotRespondRequest.receivingAddress == null || !Crypto.isValidAddress(tradeBotRespondRequest.receivingAddress)) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_ADDRESS); - if (!Controller.getInstance().isUpToDate()) + final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L); + if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC); // Extract data from cross-chain trading AT diff --git a/src/main/java/org/qortal/api/resource/TransactionsResource.java b/src/main/java/org/qortal/api/resource/TransactionsResource.java index 4c440304..75724310 100644 --- a/src/main/java/org/qortal/api/resource/TransactionsResource.java +++ b/src/main/java/org/qortal/api/resource/TransactionsResource.java @@ -723,9 +723,9 @@ public class TransactionsResource { ApiError.BLOCKCHAIN_NEEDS_SYNC, ApiError.INVALID_SIGNATURE, ApiError.INVALID_DATA, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE }) public String processTransaction(String rawBytes58) { - // Only allow a transaction to be processed if our latest block is less than 30 minutes old + // Only allow a transaction to be processed if our latest block is less than 60 minutes old // If older than this, we should first wait until the blockchain is synced - final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L); + final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L); if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.BLOCKCHAIN_NEEDS_SYNC); diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index 7800f2a1..ddfe247a 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -3,10 +3,14 @@ package org.qortal.block; import static java.util.Arrays.stream; import static java.util.stream.Collectors.toMap; +import java.io.ByteArrayOutputStream; +import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; +import java.nio.charset.StandardCharsets; import java.text.DecimalFormat; +import java.text.MessageFormat; import java.text.NumberFormat; import java.util.*; import java.util.stream.Collectors; @@ -24,6 +28,7 @@ import org.qortal.block.BlockChain.BlockTimingByHeight; import org.qortal.block.BlockChain.AccountLevelShareBin; import org.qortal.controller.OnlineAccountsManager; import org.qortal.crypto.Crypto; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.data.account.AccountBalanceData; import org.qortal.data.account.AccountData; import org.qortal.data.account.EligibleQoraHolderData; @@ -118,6 +123,8 @@ public class Block { /** Remote/imported/loaded AT states */ protected List atStates; + /** Remote hash of AT states - in lieu of full AT state data in {@code atStates} */ + protected byte[] atStatesHash; /** Locally-generated AT states */ protected List ourAtStates; /** Locally-generated AT fees */ @@ -216,11 +223,10 @@ public class Block { return accountAmount; } } + /** Always use getExpandedAccounts() to access this, as it's lazy-instantiated. */ private List cachedExpandedAccounts = null; - /** Opportunistic cache of this block's valid online accounts. Only created by call to isValid(). */ - private List cachedValidOnlineAccounts = null; /** Opportunistic cache of this block's valid online reward-shares. Only created by call to isValid(). */ private List cachedOnlineRewardShares = null; @@ -255,7 +261,7 @@ public class Block { * Constructs new Block using passed transaction and AT states. *

* This constructor typically used when receiving a serialized block over the network. - * + * * @param repository * @param blockData * @param transactions @@ -281,6 +287,35 @@ public class Block { this.blockData.setTotalFees(totalFees); } + /** + * Constructs new Block using passed transaction and minimal AT state info. + *

+ * This constructor typically used when receiving a serialized block over the network. + * + * @param repository + * @param blockData + * @param transactions + * @param atStatesHash + */ + public Block(Repository repository, BlockData blockData, List transactions, byte[] atStatesHash) { + this(repository, blockData); + + this.transactions = new ArrayList<>(); + + long totalFees = 0; + + // We have to sum fees too + for (TransactionData transactionData : transactions) { + this.transactions.add(Transaction.fromData(repository, transactionData)); + totalFees += transactionData.getFee(); + } + + this.atStatesHash = atStatesHash; + totalFees += this.blockData.getATFees(); + + this.blockData.setTotalFees(totalFees); + } + /** * Constructs new Block with empty transaction list, using passed minter account. * @@ -313,18 +348,21 @@ public class Block { int version = parentBlock.getNextBlockVersion(); byte[] reference = parentBlockData.getSignature(); - // Fetch our list of online accounts - List onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(); - if (onlineAccounts.isEmpty()) { - LOGGER.error("No online accounts - not even our own?"); + // Qortal: minter is always a reward-share, so find actual minter and get their effective minting level + int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey()); + if (minterLevel == 0) { + LOGGER.error("Minter effective level returned zero?"); return null; } - // Find newest online accounts timestamp - long onlineAccountsTimestamp = 0; - for (OnlineAccountData onlineAccountData : onlineAccounts) { - if (onlineAccountData.getTimestamp() > onlineAccountsTimestamp) - onlineAccountsTimestamp = onlineAccountData.getTimestamp(); + long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel); + long onlineAccountsTimestamp = OnlineAccountsManager.getCurrentOnlineAccountTimestamp(); + + // Fetch our list of online accounts + List onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(onlineAccountsTimestamp); + if (onlineAccounts.isEmpty()) { + LOGGER.error("No online accounts - not even our own?"); + return null; } // Load sorted list of reward share public keys into memory, so that the indexes can be obtained. @@ -335,10 +373,6 @@ public class Block { // Map using index into sorted list of reward-shares as key Map indexedOnlineAccounts = new HashMap<>(); for (OnlineAccountData onlineAccountData : onlineAccounts) { - // Disregard online accounts with different timestamps - if (onlineAccountData.getTimestamp() != onlineAccountsTimestamp) - continue; - Integer accountIndex = getRewardShareIndex(onlineAccountData.getPublicKey(), allRewardSharePublicKeys); if (accountIndex == null) // Online account (reward-share) with current timestamp but reward-share cancelled @@ -355,26 +389,29 @@ public class Block { byte[] encodedOnlineAccounts = BlockTransformer.encodeOnlineAccounts(onlineAccountsSet); int onlineAccountsCount = onlineAccountsSet.size(); - // Concatenate online account timestamp signatures (in correct order) - byte[] onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH]; - for (int i = 0; i < onlineAccountsCount; ++i) { - Integer accountIndex = accountIndexes.get(i); - OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex); - System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH); + byte[] onlineAccountsSignatures; + if (timestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // Collate all signatures + Collection signaturesToAggregate = indexedOnlineAccounts.values() + .stream() + .map(OnlineAccountData::getSignature) + .collect(Collectors.toList()); + + // Aggregated, single signature + onlineAccountsSignatures = Qortal25519Extras.aggregateSignatures(signaturesToAggregate); + } else { + // Concatenate online account timestamp signatures (in correct order) + onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH]; + for (int i = 0; i < onlineAccountsCount; ++i) { + Integer accountIndex = accountIndexes.get(i); + OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex); + System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH); + } } byte[] minterSignature = minter.sign(BlockTransformer.getBytesForMinterSignature(parentBlockData, minter.getPublicKey(), encodedOnlineAccounts)); - // Qortal: minter is always a reward-share, so find actual minter and get their effective minting level - int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey()); - if (minterLevel == 0) { - LOGGER.error("Minter effective level returned zero?"); - return null; - } - - long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel); - int transactionCount = 0; byte[] transactionsSignature = null; int height = parentBlockData.getHeight() + 1; @@ -979,49 +1016,59 @@ public class Block { if (this.blockData.getOnlineAccountsSignatures() == null || this.blockData.getOnlineAccountsSignatures().length == 0) return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MISSING; - if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH) - return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + if (this.blockData.getTimestamp() >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // We expect just the one, aggregated signature + if (this.blockData.getOnlineAccountsSignatures().length != Transformer.SIGNATURE_LENGTH) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + } else { + if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + } // Check signatures long onlineTimestamp = this.blockData.getOnlineAccountsTimestamp(); byte[] onlineTimestampBytes = Longs.toByteArray(onlineTimestamp); - // If this block is much older than current online timestamp, then there's no point checking current online accounts - List currentOnlineAccounts = onlineTimestamp < NTP.getTime() - OnlineAccountsManager.getOnlineTimestampModulus() - ? null - : OnlineAccountsManager.getInstance().getOnlineAccounts(); - List latestBlocksOnlineAccounts = OnlineAccountsManager.getInstance().getLatestBlocksOnlineAccounts(); - - // Extract online accounts' timestamp signatures from block data + // Extract online accounts' timestamp signatures from block data. Only one signature if aggregated. List onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(this.blockData.getOnlineAccountsSignatures()); - // We'll build up a list of online accounts to hand over to Controller if block is added to chain - // and this will become latestBlocksOnlineAccounts (above) to reduce CPU load when we process next block... - List ourOnlineAccounts = new ArrayList<>(); + if (this.blockData.getTimestamp() >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // Aggregate all public keys + Collection publicKeys = onlineRewardShares.stream() + .map(RewardShareData::getRewardSharePublicKey) + .collect(Collectors.toList()); - for (int i = 0; i < onlineAccountsSignatures.size(); ++i) { - byte[] signature = onlineAccountsSignatures.get(i); - byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey(); + byte[] aggregatePublicKey = Qortal25519Extras.aggregatePublicKeys(publicKeys); - OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey); - ourOnlineAccounts.add(onlineAccountData); + byte[] aggregateSignature = onlineAccountsSignatures.get(0); - // If signature is still current then no need to perform Ed25519 verify - if (currentOnlineAccounts != null && currentOnlineAccounts.remove(onlineAccountData)) - // remove() returned true, so online account still current - // and one less entry in currentOnlineAccounts to check next time - continue; - - // If signature was okay in latest block then no need to perform Ed25519 verify - if (latestBlocksOnlineAccounts != null && latestBlocksOnlineAccounts.contains(onlineAccountData)) - continue; - - if (!Crypto.verify(publicKey, signature, onlineTimestampBytes)) + // One-step verification of aggregate signature using aggregate public key + if (!Qortal25519Extras.verifyAggregated(aggregatePublicKey, aggregateSignature, onlineTimestampBytes)) return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT; + } else { + // Build block's view of online accounts + Set onlineAccounts = new HashSet<>(); + for (int i = 0; i < onlineAccountsSignatures.size(); ++i) { + byte[] signature = onlineAccountsSignatures.get(i); + byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey(); + + OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey); + onlineAccounts.add(onlineAccountData); + } + + // Remove those already validated & cached by online accounts manager - no need to re-validate them + OnlineAccountsManager.getInstance().removeKnown(onlineAccounts, onlineTimestamp); + + // Validate the rest + for (OnlineAccountData onlineAccount : onlineAccounts) + if (!Crypto.verify(onlineAccount.getPublicKey(), onlineAccount.getSignature(), onlineTimestampBytes)) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT; + + // We've validated these, so allow online accounts manager to cache + OnlineAccountsManager.getInstance().addBlocksOnlineAccounts(onlineAccounts, onlineTimestamp); } // All online accounts valid, so save our list of online accounts for potential later use - this.cachedValidOnlineAccounts = ourOnlineAccounts; this.cachedOnlineRewardShares = onlineRewardShares; return ValidationResult.OK; @@ -1194,7 +1241,7 @@ public class Block { */ private ValidationResult areAtsValid() throws DataException { // Locally generated AT states should be valid so no need to re-execute them - if (this.ourAtStates == this.getATStates()) // Note object reference compare + if (this.ourAtStates != null && this.ourAtStates == this.atStates) // Note object reference compare return ValidationResult.OK; // Generate local AT states for comparison @@ -1208,8 +1255,33 @@ public class Block { if (this.ourAtFees != this.blockData.getATFees()) return ValidationResult.AT_STATES_MISMATCH; - // Note: this.atStates fully loaded thanks to this.getATStates() call above - for (int s = 0; s < this.atStates.size(); ++s) { + // If we have a single AT states hash then compare that in preference + if (this.atStatesHash != null) { + int atBytesLength = blockData.getATCount() * BlockTransformer.AT_ENTRY_LENGTH; + ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength); + + try { + for (ATStateData atStateData : this.ourAtStates) { + atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8)); + atHashBytes.write(atStateData.getStateHash()); + atHashBytes.write(Longs.toByteArray(atStateData.getFees())); + } + } catch (IOException e) { + throw new DataException("Couldn't validate AT states hash due to serialization issue?", e); + } + + byte[] ourAtStatesHash = Crypto.digest(atHashBytes.toByteArray()); + if (!Arrays.equals(ourAtStatesHash, this.atStatesHash)) + return ValidationResult.AT_STATES_MISMATCH; + + // Use our AT state data from now on + this.atStates = this.ourAtStates; + return ValidationResult.OK; + } + + // Note: this.atStates fully loaded thanks to this.getATStates() call: + this.getATStates(); + for (int s = 0; s < this.ourAtStates.size(); ++s) { ATStateData ourAtState = this.ourAtStates.get(s); ATStateData theirAtState = this.atStates.get(s); @@ -1367,9 +1439,6 @@ public class Block { postBlockTidy(); - // Give Controller our cached, valid online accounts data (if any) to help reduce CPU load for next block - OnlineAccountsManager.getInstance().pushLatestBlocksOnlineAccounts(this.cachedValidOnlineAccounts); - // Log some debugging info relating to the block weight calculation this.logDebugInfo(); } @@ -1585,9 +1654,6 @@ public class Block { this.blockData.setHeight(null); postBlockTidy(); - - // Remove any cached, valid online accounts data from Controller - OnlineAccountsManager.getInstance().popLatestBlocksOnlineAccounts(); } protected void orphanTransactionsFromBlock() throws DataException { diff --git a/src/main/java/org/qortal/block/BlockChain.java b/src/main/java/org/qortal/block/BlockChain.java index b54c9613..8bbefb11 100644 --- a/src/main/java/org/qortal/block/BlockChain.java +++ b/src/main/java/org/qortal/block/BlockChain.java @@ -70,7 +70,9 @@ public class BlockChain { shareBinFix, calcChainWeightTimestamp, transactionV5Timestamp, - transactionV6Timestamp; + transactionV6Timestamp, + disableReferenceTimestamp, + aggregateSignatureTimestamp; } // Custom transaction fees @@ -419,6 +421,14 @@ public class BlockChain { return this.featureTriggers.get(FeatureTrigger.transactionV6Timestamp.name()).longValue(); } + public long getDisableReferenceTimestamp() { + return this.featureTriggers.get(FeatureTrigger.disableReferenceTimestamp.name()).longValue(); + } + + public long getAggregateSignatureTimestamp() { + return this.featureTriggers.get(FeatureTrigger.aggregateSignatureTimestamp.name()).longValue(); + } + // More complex getters for aspects that change by height or timestamp public long getRewardAtHeight(int ourHeight) { diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java index 9966d6a9..2d736e76 100644 --- a/src/main/java/org/qortal/controller/BlockMinter.java +++ b/src/main/java/org/qortal/controller/BlockMinter.java @@ -65,9 +65,8 @@ public class BlockMinter extends Thread { // Lite nodes do not mint return; } - - try (final Repository repository = RepositoryManager.getRepository()) { - if (Settings.getInstance().getWipeUnconfirmedOnStart()) { + if (Settings.getInstance().getWipeUnconfirmedOnStart()) { + try (final Repository repository = RepositoryManager.getRepository()) { // Wipe existing unconfirmed transactions List unconfirmedTransactions = repository.getTransactionRepository().getUnconfirmedTransactions(); @@ -77,30 +76,31 @@ public class BlockMinter extends Thread { } repository.saveChanges(); + } catch (DataException e) { + LOGGER.warn("Repository issue trying to wipe unconfirmed transactions on start-up: {}", e.getMessage()); + // Fall-through to normal behaviour in case we can recover } + } - // Going to need this a lot... - BlockRepository blockRepository = repository.getBlockRepository(); - BlockData previousBlockData = null; + BlockData previousBlockData = null; - // Vars to keep track of blocks that were skipped due to chain weight - byte[] parentSignatureForLastLowWeightBlock = null; - Long timeOfLastLowWeightBlock = null; + // Vars to keep track of blocks that were skipped due to chain weight + byte[] parentSignatureForLastLowWeightBlock = null; + Long timeOfLastLowWeightBlock = null; - List newBlocks = new ArrayList<>(); + List newBlocks = new ArrayList<>(); - // Flags for tracking change in whether minting is possible, - // so we can notify Controller, and further update SysTray, etc. - boolean isMintingPossible = false; - boolean wasMintingPossible = isMintingPossible; - while (running) { - repository.discardChanges(); // Free repository locks, if any + // Flags for tracking change in whether minting is possible, + // so we can notify Controller, and further update SysTray, etc. + boolean isMintingPossible = false; + boolean wasMintingPossible = isMintingPossible; + while (running) { + if (isMintingPossible != wasMintingPossible) + Controller.getInstance().onMintingPossibleChange(isMintingPossible); - if (isMintingPossible != wasMintingPossible) - Controller.getInstance().onMintingPossibleChange(isMintingPossible); - - wasMintingPossible = isMintingPossible; + wasMintingPossible = isMintingPossible; + try { // Sleep for a while Thread.sleep(1000); @@ -114,319 +114,338 @@ public class BlockMinter extends Thread { if (minLatestBlockTimestamp == null) continue; - // No online accounts? (e.g. during startup) - if (OnlineAccountsManager.getInstance().getOnlineAccounts().isEmpty()) + // No online accounts for current timestamp? (e.g. during startup) + if (!OnlineAccountsManager.getInstance().hasOnlineAccounts()) continue; - List mintingAccountsData = repository.getAccountRepository().getMintingAccounts(); - // No minting accounts? - if (mintingAccountsData.isEmpty()) - continue; + try (final Repository repository = RepositoryManager.getRepository()) { + // Going to need this a lot... + BlockRepository blockRepository = repository.getBlockRepository(); - // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level - // Note that minting accounts are actually reward-shares in Qortal - Iterator madi = mintingAccountsData.iterator(); - while (madi.hasNext()) { - MintingAccountData mintingAccountData = madi.next(); - - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey()); - if (rewardShareData == null) { - // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts - madi.remove(); - continue; - } - - Account mintingAccount = new Account(repository, rewardShareData.getMinter()); - if (!mintingAccount.canMint()) { - // Minting-account component of reward-share can no longer mint - disregard - madi.remove(); - continue; - } - - // Optional (non-validated) prevention of block submissions below a defined level. - // This is an unvalidated version of Blockchain.minAccountLevelToMint - // and exists only to reduce block candidates by default. - int level = mintingAccount.getEffectiveMintingLevel(); - if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) { - madi.remove(); - continue; - } - } - - // Needs a mutable copy of the unmodifiableList - List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers()); - BlockData lastBlockData = blockRepository.getLastBlock(); - - // Disregard peers that have "misbehaved" recently - peers.removeIf(Controller.hasMisbehaved); - - // Disregard peers that don't have a recent block, but only if we're not in recovery mode. - // In that mode, we want to allow minting on top of older blocks, to recover stalled networks. - if (Synchronizer.getInstance().getRecoveryMode() == false) - peers.removeIf(Controller.hasNoRecentBlock); - - // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from? - if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) - continue; - - // If we are stuck on an invalid block, we should allow an alternative to be minted - boolean recoverInvalidBlock = false; - if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) { - // We've had at least one invalid block - long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived; - long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived; - if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) { - if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) { - // Last valid block was more than 10 mins ago, but we've had an invalid block since then - // Assume that the chain has stalled because there is no alternative valid candidate - // Enter recovery mode to allow alternative, valid candidates to be minted - recoverInvalidBlock = true; - } - } - } - - // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode. - if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp) - if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false) + List mintingAccountsData = repository.getAccountRepository().getMintingAccounts(); + // No minting accounts? + if (mintingAccountsData.isEmpty()) continue; - // There are enough peers with a recent block and our latest block is recent - // so go ahead and mint a block if possible. - isMintingPossible = true; + // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level + // Note that minting accounts are actually reward-shares in Qortal + Iterator madi = mintingAccountsData.iterator(); + while (madi.hasNext()) { + MintingAccountData mintingAccountData = madi.next(); - // Check blockchain hasn't changed - if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) { - previousBlockData = lastBlockData; - newBlocks.clear(); - - // Reduce log timeout - logTimeout = 10 * 1000L; - - // Last low weight block is no longer valid - parentSignatureForLastLowWeightBlock = null; - } - - // Discard accounts we have already built blocks with - mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey()))); - - // Do we need to build any potential new blocks? - List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList()); - - // We might need to sit the next block out, if one of our minting accounts signed the previous one - final byte[] previousBlockMinter = previousBlockData.getMinterPublicKey(); - final boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter)); - if (mintedLastBlock) { - LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one")); - continue; - } - - if (parentSignatureForLastLowWeightBlock != null) { - // The last iteration found a higher weight block in the network, so sleep for a while - // to allow is to sync the higher weight chain. We are sleeping here rather than when - // detected as we don't want to hold the blockchain lock open. - LOGGER.debug("Sleeping for 10 seconds..."); - Thread.sleep(10 * 1000L); - } - - for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) { - // First block does the AT heavy-lifting - if (newBlocks.isEmpty()) { - Block newBlock = Block.mint(repository, previousBlockData, mintingAccount); - if (newBlock == null) { - // For some reason we can't mint right now - moderatedLog(() -> LOGGER.error("Couldn't build a to-be-minted block")); + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey()); + if (rewardShareData == null) { + // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts + madi.remove(); continue; } - newBlocks.add(newBlock); - } else { - // The blocks for other minters require less effort... - Block newBlock = newBlocks.get(0).remint(mintingAccount); - if (newBlock == null) { - // For some reason we can't mint right now - moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block")); + Account mintingAccount = new Account(repository, rewardShareData.getMinter()); + if (!mintingAccount.canMint()) { + // Minting-account component of reward-share can no longer mint - disregard + madi.remove(); continue; } - newBlocks.add(newBlock); + // Optional (non-validated) prevention of block submissions below a defined level. + // This is an unvalidated version of Blockchain.minAccountLevelToMint + // and exists only to reduce block candidates by default. + int level = mintingAccount.getEffectiveMintingLevel(); + if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) { + madi.remove(); + continue; + } } - } - // No potential block candidates? - if (newBlocks.isEmpty()) - continue; + // Needs a mutable copy of the unmodifiableList + List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers()); + BlockData lastBlockData = blockRepository.getLastBlock(); - // Make sure we're the only thread modifying the blockchain - ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); - if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) { - LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds"); - continue; - } + // Disregard peers that have "misbehaved" recently + peers.removeIf(Controller.hasMisbehaved); - boolean newBlockMinted = false; - Block newBlock = null; + // Disregard peers that don't have a recent block, but only if we're not in recovery mode. + // In that mode, we want to allow minting on top of older blocks, to recover stalled networks. + if (Synchronizer.getInstance().getRecoveryMode() == false) + peers.removeIf(Controller.hasNoRecentBlock); - try { - // Clear repository session state so we have latest view of data - repository.discardChanges(); - - // Now that we have blockchain lock, do final check that chain hasn't changed - BlockData latestBlockData = blockRepository.getLastBlock(); - if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature())) + // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from? + if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) continue; - List goodBlocks = new ArrayList<>(); - for (Block testBlock : newBlocks) { - // Is new block's timestamp valid yet? - // We do a separate check as some timestamp checks are skipped for testchains - if (testBlock.isTimestampValid() != ValidationResult.OK) - continue; - - testBlock.preProcess(); - - // Is new block valid yet? (Before adding unconfirmed transactions) - ValidationResult result = testBlock.isValid(); - if (result != ValidationResult.OK) { - moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name()))); - - continue; - } - - goodBlocks.add(testBlock); - } - - if (goodBlocks.isEmpty()) - continue; - - // Pick best block - final int parentHeight = previousBlockData.getHeight(); - final byte[] parentBlockSignature = previousBlockData.getSignature(); - - BigInteger bestWeight = null; - - for (int bi = 0; bi < goodBlocks.size(); ++bi) { - BlockData blockData = goodBlocks.get(bi).getBlockData(); - - BlockSummaryData blockSummaryData = new BlockSummaryData(blockData); - int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey()); - blockSummaryData.setMinterLevel(minterLevel); - - BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData); - - if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) { - newBlock = goodBlocks.get(bi); - bestWeight = blockWeight; - } - } - - try { - if (this.higherWeightChainExists(repository, bestWeight)) { - - // Check if the base block has updated since the last time we were here - if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null || - !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) { - // We've switched to a different chain, so reset the timer - timeOfLastLowWeightBlock = NTP.getTime(); + // If we are stuck on an invalid block, we should allow an alternative to be minted + boolean recoverInvalidBlock = false; + if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) { + // We've had at least one invalid block + long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived; + long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived; + if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) { + if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) { + // Last valid block was more than 10 mins ago, but we've had an invalid block since then + // Assume that the chain has stalled because there is no alternative valid candidate + // Enter recovery mode to allow alternative, valid candidates to be minted + recoverInvalidBlock = true; } - parentSignatureForLastLowWeightBlock = previousBlockData.getSignature(); + } + } - // If less than 30 seconds has passed since first detection the higher weight chain, - // we should skip our block submission to give us the opportunity to sync to the better chain - if (NTP.getTime() - timeOfLastLowWeightBlock < 30*1000L) { - LOGGER.debug("Higher weight chain found in peers, so not signing a block this round"); - LOGGER.debug("Time since detected: {}ms", NTP.getTime() - timeOfLastLowWeightBlock); + // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode. + if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp) + if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false) + continue; + + // There are enough peers with a recent block and our latest block is recent + // so go ahead and mint a block if possible. + isMintingPossible = true; + + // Reattach newBlocks to new repository handle + for (Block newBlock : newBlocks) + newBlock.setRepository(repository); + + // Check blockchain hasn't changed + if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) { + previousBlockData = lastBlockData; + newBlocks.clear(); + + // Reduce log timeout + logTimeout = 10 * 1000L; + + // Last low weight block is no longer valid + parentSignatureForLastLowWeightBlock = null; + } + + // Discard accounts we have already built blocks with + mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey()))); + + // Do we need to build any potential new blocks? + List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList()); + + // We might need to sit the next block out, if one of our minting accounts signed the previous one + byte[] previousBlockMinter = previousBlockData.getMinterPublicKey(); + boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter)); + if (mintedLastBlock) { + LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one")); + continue; + } + + if (parentSignatureForLastLowWeightBlock != null) { + // The last iteration found a higher weight block in the network, so sleep for a while + // to allow is to sync the higher weight chain. We are sleeping here rather than when + // detected as we don't want to hold the blockchain lock open. + LOGGER.info("Sleeping for 10 seconds..."); + Thread.sleep(10 * 1000L); + } + + for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) { + // First block does the AT heavy-lifting + if (newBlocks.isEmpty()) { + Block newBlock = Block.mint(repository, previousBlockData, mintingAccount); + if (newBlock == null) { + // For some reason we can't mint right now + moderatedLog(() -> LOGGER.error("Couldn't build a to-be-minted block")); continue; } - else { - // More than 30 seconds have passed, so we should submit our block candidate anyway. - LOGGER.debug("More than 30 seconds passed, so proceeding to submit block candidate..."); + + newBlocks.add(newBlock); + } else { + // The blocks for other minters require less effort... + Block newBlock = newBlocks.get(0).remint(mintingAccount); + if (newBlock == null) { + // For some reason we can't mint right now + moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block")); + continue; } + + newBlocks.add(newBlock); } - else { - LOGGER.debug("No higher weight chain found in peers"); - } - } catch (DataException e) { - LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway..."); } - // Discard any uncommitted changes as a result of the higher weight chain detection - repository.discardChanges(); + // No potential block candidates? + if (newBlocks.isEmpty()) + continue; - // Clear variables that track low weight blocks - parentSignatureForLastLowWeightBlock = null; - timeOfLastLowWeightBlock = null; - - - // Add unconfirmed transactions - addUnconfirmedTransactions(repository, newBlock); - - // Sign to create block's signature - newBlock.sign(); - - // Is newBlock still valid? - ValidationResult validationResult = newBlock.isValid(); - if (validationResult != ValidationResult.OK) { - // No longer valid? Report and discard - LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name())); - - // Rebuild block candidates, just to be sure - newBlocks.clear(); + // Make sure we're the only thread modifying the blockchain + ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); + if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) { + LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds"); continue; } - // Add to blockchain - something else will notice and broadcast new block to network + boolean newBlockMinted = false; + Block newBlock = null; + try { - newBlock.process(); + // Clear repository session state so we have latest view of data + repository.discardChanges(); - repository.saveChanges(); + // Now that we have blockchain lock, do final check that chain hasn't changed + BlockData latestBlockData = blockRepository.getLastBlock(); + if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature())) + continue; - LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight())); + List goodBlocks = new ArrayList<>(); + boolean wasInvalidBlockDiscarded = false; + Iterator newBlocksIterator = newBlocks.iterator(); - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey()); + while (newBlocksIterator.hasNext()) { + Block testBlock = newBlocksIterator.next(); - if (rewardShareData != null) { - LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s", - newBlock.getBlockData().getHeight(), - Base58.encode(newBlock.getBlockData().getSignature()), - Base58.encode(newBlock.getParent().getSignature()), - rewardShareData.getMinter(), - rewardShareData.getRecipient())); - } else { - LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s", - newBlock.getBlockData().getHeight(), - Base58.encode(newBlock.getBlockData().getSignature()), - Base58.encode(newBlock.getParent().getSignature()), - newBlock.getMinter().getAddress())); + // Is new block's timestamp valid yet? + // We do a separate check as some timestamp checks are skipped for testchains + if (testBlock.isTimestampValid() != ValidationResult.OK) + continue; + + testBlock.preProcess(); + + // Is new block valid yet? (Before adding unconfirmed transactions) + ValidationResult result = testBlock.isValid(); + if (result != ValidationResult.OK) { + moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name()))); + + newBlocksIterator.remove(); + wasInvalidBlockDiscarded = true; + /* + * Bail out fast so that we loop around from the top again. + * This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks, + * via the Blocks.remint() method, which avoids having to re-process Block ATs all over again. + * Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class). + */ + break; + } + + goodBlocks.add(testBlock); } - // Notify network after we're released blockchain lock - newBlockMinted = true; + if (wasInvalidBlockDiscarded || goodBlocks.isEmpty()) + continue; - // Notify Controller - repository.discardChanges(); // clear transaction status to prevent deadlocks - Controller.getInstance().onNewBlock(newBlock.getBlockData()); - } catch (DataException e) { - // Unable to process block - report and discard - LOGGER.error("Unable to process newly minted block?", e); - newBlocks.clear(); + // Pick best block + final int parentHeight = previousBlockData.getHeight(); + final byte[] parentBlockSignature = previousBlockData.getSignature(); + + BigInteger bestWeight = null; + + for (int bi = 0; bi < goodBlocks.size(); ++bi) { + BlockData blockData = goodBlocks.get(bi).getBlockData(); + + BlockSummaryData blockSummaryData = new BlockSummaryData(blockData); + int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey()); + blockSummaryData.setMinterLevel(minterLevel); + + BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData); + + if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) { + newBlock = goodBlocks.get(bi); + bestWeight = blockWeight; + } + } + + try { + if (this.higherWeightChainExists(repository, bestWeight)) { + + // Check if the base block has updated since the last time we were here + if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null || + !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) { + // We've switched to a different chain, so reset the timer + timeOfLastLowWeightBlock = NTP.getTime(); + } + parentSignatureForLastLowWeightBlock = previousBlockData.getSignature(); + + // If less than 30 seconds has passed since first detection the higher weight chain, + // we should skip our block submission to give us the opportunity to sync to the better chain + if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) { + LOGGER.info("Higher weight chain found in peers, so not signing a block this round"); + LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock); + continue; + } else { + // More than 30 seconds have passed, so we should submit our block candidate anyway. + LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate..."); + } + } else { + LOGGER.debug("No higher weight chain found in peers"); + } + } catch (DataException e) { + LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway..."); + } + + // Discard any uncommitted changes as a result of the higher weight chain detection + repository.discardChanges(); + + // Clear variables that track low weight blocks + parentSignatureForLastLowWeightBlock = null; + timeOfLastLowWeightBlock = null; + + // Add unconfirmed transactions + addUnconfirmedTransactions(repository, newBlock); + + // Sign to create block's signature + newBlock.sign(); + + // Is newBlock still valid? + ValidationResult validationResult = newBlock.isValid(); + if (validationResult != ValidationResult.OK) { + // No longer valid? Report and discard + LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name())); + + // Rebuild block candidates, just to be sure + newBlocks.clear(); + continue; + } + + // Add to blockchain - something else will notice and broadcast new block to network + try { + newBlock.process(); + + repository.saveChanges(); + + LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight())); + + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey()); + + if (rewardShareData != null) { + LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s", + newBlock.getBlockData().getHeight(), + Base58.encode(newBlock.getBlockData().getSignature()), + Base58.encode(newBlock.getParent().getSignature()), + rewardShareData.getMinter(), + rewardShareData.getRecipient())); + } else { + LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s", + newBlock.getBlockData().getHeight(), + Base58.encode(newBlock.getBlockData().getSignature()), + Base58.encode(newBlock.getParent().getSignature()), + newBlock.getMinter().getAddress())); + } + + // Notify network after we're released blockchain lock + newBlockMinted = true; + + // Notify Controller + repository.discardChanges(); // clear transaction status to prevent deadlocks + Controller.getInstance().onNewBlock(newBlock.getBlockData()); + } catch (DataException e) { + // Unable to process block - report and discard + LOGGER.error("Unable to process newly minted block?", e); + newBlocks.clear(); + } + } finally { + blockchainLock.unlock(); } - } finally { - blockchainLock.unlock(); - } - if (newBlockMinted) { - // Broadcast our new chain to network - BlockData newBlockData = newBlock.getBlockData(); + if (newBlockMinted) { + // Broadcast our new chain to network + BlockData newBlockData = newBlock.getBlockData(); - Network network = Network.getInstance(); - network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData)); + Network network = Network.getInstance(); + network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData)); + } + } catch (DataException e) { + LOGGER.warn("Repository issue while running block minter", e); } + } catch (InterruptedException e) { + // We've been interrupted - time to exit + return; } - } catch (DataException e) { - LOGGER.warn("Repository issue while running block minter", e); - } catch (InterruptedException e) { - // We've been interrupted - time to exit - return; } } @@ -557,18 +576,23 @@ public class BlockMinter extends Thread { // This peer has common block data CommonBlockData commonBlockData = peer.getCommonBlockData(); BlockSummaryData commonBlockSummaryData = commonBlockData.getCommonBlockSummary(); - if (commonBlockData.getChainWeight() != null) { + if (commonBlockData.getChainWeight() != null && peer.getCommonBlockData().getBlockSummariesAfterCommonBlock() != null) { // The synchronizer has calculated this peer's chain weight - BigInteger ourChainWeightSinceCommonBlock = this.getOurChainWeightSinceBlock(repository, commonBlockSummaryData, commonBlockData.getBlockSummariesAfterCommonBlock()); - BigInteger ourChainWeight = ourChainWeightSinceCommonBlock.add(blockCandidateWeight); - BigInteger peerChainWeight = commonBlockData.getChainWeight(); - if (peerChainWeight.compareTo(ourChainWeight) >= 0) { - // This peer has a higher weight chain than ours - LOGGER.debug("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight)); - return true; + if (!Synchronizer.getInstance().containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) { + // .. and it doesn't hold any invalid blocks + BigInteger ourChainWeightSinceCommonBlock = this.getOurChainWeightSinceBlock(repository, commonBlockSummaryData, commonBlockData.getBlockSummariesAfterCommonBlock()); + BigInteger ourChainWeight = ourChainWeightSinceCommonBlock.add(blockCandidateWeight); + BigInteger peerChainWeight = commonBlockData.getChainWeight(); + if (peerChainWeight.compareTo(ourChainWeight) >= 0) { + // This peer has a higher weight chain than ours + LOGGER.info("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight)); + return true; + } else { + LOGGER.debug("Peer {} is on a lower weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight)); + } } else { - LOGGER.debug("Peer {} is on a lower weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight)); + LOGGER.debug("Peer {} has an invalid block", peer); } } else { LOGGER.debug("Peer {} has no chain weight", peer); diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index a5ada0c2..cde965c1 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -113,6 +113,7 @@ public class Controller extends Thread { private long repositoryBackupTimestamp = startTime; // ms private long repositoryMaintenanceTimestamp = startTime; // ms private long repositoryCheckpointTimestamp = startTime; // ms + private long prunePeersTimestamp = startTime; // ms private long ntpCheckTimestamp = startTime; // ms private long deleteExpiredTimestamp = startTime + DELETE_EXPIRED_INTERVAL; // ms @@ -552,6 +553,7 @@ public class Controller extends Thread { final long repositoryBackupInterval = Settings.getInstance().getRepositoryBackupInterval(); final long repositoryCheckpointInterval = Settings.getInstance().getRepositoryCheckpointInterval(); long repositoryMaintenanceInterval = getRandomRepositoryMaintenanceInterval(); + final long prunePeersInterval = 5 * 60 * 1000L; // Every 5 minutes // Start executor service for trimming or pruning PruneManager.getInstance().start(); @@ -649,10 +651,15 @@ public class Controller extends Thread { } // Prune stuck/slow/old peers - try { - Network.getInstance().prunePeers(); - } catch (DataException e) { - LOGGER.warn(String.format("Repository issue when trying to prune peers: %s", e.getMessage())); + if (now >= prunePeersTimestamp + prunePeersInterval) { + prunePeersTimestamp = now + prunePeersInterval; + + try { + LOGGER.debug("Pruning peers..."); + Network.getInstance().prunePeers(); + } catch (DataException e) { + LOGGER.warn(String.format("Repository issue when trying to prune peers: %s", e.getMessage())); + } } // Delete expired transactions @@ -787,23 +794,24 @@ public class Controller extends Thread { String actionText; // Use a more tolerant latest block timestamp in the isUpToDate() calls below to reduce misleading statuses. - // Any block in the last 30 minutes is considered "up to date" for the purposes of displaying statuses. - final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L); + // Any block in the last 2 hours is considered "up to date" for the purposes of displaying statuses. + // This also aligns with the time interval required for continued online account submission. + final Long minLatestBlockTimestamp = NTP.getTime() - (2 * 60 * 60 * 1000L); + + // Only show sync percent if it's less than 100, to avoid confusion + final Integer syncPercent = Synchronizer.getInstance().getSyncPercent(); + final boolean isSyncing = (syncPercent != null && syncPercent < 100); synchronized (Synchronizer.getInstance().syncLock) { if (Settings.getInstance().isLite()) { actionText = Translator.INSTANCE.translate("SysTray", "LITE_NODE"); SysTray.getInstance().setTrayIcon(4); } - else if (this.isMintingPossible) { - actionText = Translator.INSTANCE.translate("SysTray", "MINTING_ENABLED"); - SysTray.getInstance().setTrayIcon(2); - } else if (numberOfPeers < Settings.getInstance().getMinBlockchainPeers()) { actionText = Translator.INSTANCE.translate("SysTray", "CONNECTING"); SysTray.getInstance().setTrayIcon(3); } - else if (!this.isUpToDate(minLatestBlockTimestamp) && Synchronizer.getInstance().isSynchronizing()) { + else if (!this.isUpToDate(minLatestBlockTimestamp) && isSyncing) { actionText = String.format("%s - %d%%", Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_BLOCKCHAIN"), Synchronizer.getInstance().getSyncPercent()); SysTray.getInstance().setTrayIcon(3); } @@ -811,6 +819,10 @@ public class Controller extends Thread { actionText = String.format("%s", Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_BLOCKCHAIN")); SysTray.getInstance().setTrayIcon(3); } + else if (OnlineAccountsManager.getInstance().hasOnlineAccounts()) { + actionText = Translator.INSTANCE.translate("SysTray", "MINTING_ENABLED"); + SysTray.getInstance().setTrayIcon(2); + } else { actionText = Translator.INSTANCE.translate("SysTray", "MINTING_DISABLED"); SysTray.getInstance().setTrayIcon(4); @@ -1229,6 +1241,10 @@ public class Controller extends Thread { OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV2Message(peer, message); break; + case GET_ONLINE_ACCOUNTS_V3: + OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsV3Message(peer, message); + break; + case GET_ARBITRARY_DATA: // Not currently supported break; @@ -1362,6 +1378,18 @@ public class Controller extends Thread { Block block = new Block(repository, blockData); + // V2 support + if (peer.getPeersVersion() >= BlockV2Message.MIN_PEER_VERSION) { + Message blockMessage = new BlockV2Message(block); + blockMessage.setId(message.getId()); + if (!peer.sendMessage(blockMessage)) { + peer.disconnect("failed to send block"); + // Don't fall-through to caching because failure to send might be from failure to build message + return; + } + return; + } + CachedBlockMessage blockMessage = new CachedBlockMessage(block); blockMessage.setId(message.getId()); diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index 092cae05..52d3b5fa 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -1,12 +1,15 @@ package org.qortal.controller; +import com.google.common.hash.HashCode; import com.google.common.primitives.Longs; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; -import org.qortal.account.PublicKeyAccount; +import org.qortal.block.Block; import org.qortal.block.BlockChain; +import org.qortal.crypto.Crypto; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.data.account.MintingAccountData; import org.qortal.data.account.RewardShareData; import org.qortal.data.network.OnlineAccountData; @@ -18,103 +21,63 @@ import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; import org.qortal.utils.Base58; import org.qortal.utils.NTP; +import org.qortal.utils.NamedThreadFactory; import java.util.*; +import java.util.concurrent.*; import java.util.stream.Collectors; -public class OnlineAccountsManager extends Thread { - - private class OurOnlineAccountsThread extends Thread { - - public void run() { - try { - while (!isStopping) { - Thread.sleep(10000L); - - // Refresh our online accounts signatures? - sendOurOnlineAccountsInfo(); - - } - } catch (InterruptedException e) { - // Fall through to exit thread - } - } - } - +public class OnlineAccountsManager { private static final Logger LOGGER = LogManager.getLogger(OnlineAccountsManager.class); - private static OnlineAccountsManager instance; + // 'Current' as in 'now' + + /** + * How long online accounts signatures last before they expire. + */ + private static final long ONLINE_TIMESTAMP_MODULUS_V1 = 5 * 60 * 1000L; + private static final long ONLINE_TIMESTAMP_MODULUS_V2 = 30 * 60 * 1000L; + + /** + * How many 'current' timestamp-sets of online accounts we cache. + */ + private static final int MAX_CACHED_TIMESTAMP_SETS = 2; + + /** + * How many timestamp-sets of online accounts we cache for 'latest blocks'. + */ + private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 3; + + private static final long ONLINE_ACCOUNTS_QUEUE_INTERVAL = 100L; //ms + private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms + private static final long ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL = 60 * 1000L; // ms + private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 15 * 1000L; // ms + + private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; // v3.2.0 + private static final long ONLINE_ACCOUNTS_V3_PEER_VERSION = 0x03000300cbL; // v3.3.203 + + private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts")); private volatile boolean isStopping = false; - // To do with online accounts list - private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms - private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 1 * 60 * 1000L; // ms - public static final long ONLINE_TIMESTAMP_MODULUS_V1 = 5 * 60 * 1000L; - public static final long ONLINE_TIMESTAMP_MODULUS_V2 = 30 * 60 * 1000L; - /** How many (latest) blocks' worth of online accounts we cache */ - private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 2; - private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; + private final Set onlineAccountsImportQueue = ConcurrentHashMap.newKeySet(); - private long onlineAccountsTasksTimestamp = Controller.startTime + ONLINE_ACCOUNTS_TASKS_INTERVAL; // ms + /** + * Cache of 'current' online accounts, keyed by timestamp + */ + private final Map> currentOnlineAccounts = new ConcurrentHashMap<>(); + /** + * Cache of hash-summary of 'current' online accounts, keyed by timestamp, then leading byte of public key. + */ + private final Map> currentOnlineAccountsHashes = new ConcurrentHashMap<>(); - private final List onlineAccountsImportQueue = Collections.synchronizedList(new ArrayList<>()); + /** + * Cache of online accounts for latest blocks - not necessarily 'current' / now. + * Probably only accessed / modified by a single Synchronizer thread. + */ + private final SortedMap> latestBlocksOnlineAccounts = new ConcurrentSkipListMap<>(); - - /** Cache of current 'online accounts' */ - List onlineAccounts = new ArrayList<>(); - /** Cache of latest blocks' online accounts */ - Deque> latestBlocksOnlineAccounts = new ArrayDeque<>(MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS); - - public OnlineAccountsManager() { - - } - - public static synchronized OnlineAccountsManager getInstance() { - if (instance == null) { - instance = new OnlineAccountsManager(); - } - - return instance; - } - - public void run() { - - // Start separate thread to prepare our online accounts - // This could be converted to a thread pool later if more concurrency is needed - OurOnlineAccountsThread ourOnlineAccountsThread = new OurOnlineAccountsThread(); - ourOnlineAccountsThread.start(); - - try { - while (!Controller.isStopping()) { - Thread.sleep(100L); - - final Long now = NTP.getTime(); - if (now == null) { - continue; - } - - // Perform tasks to do with managing online accounts list - if (now >= onlineAccountsTasksTimestamp) { - onlineAccountsTasksTimestamp = now + ONLINE_ACCOUNTS_TASKS_INTERVAL; - performOnlineAccountsTasks(); - } - - // Process queued online account verifications - this.processOnlineAccountsImportQueue(); - - } - } catch (InterruptedException e) { - // Fall through to exit thread - } - - ourOnlineAccountsThread.interrupt(); - } - - public void shutdown() { - isStopping = true; - this.interrupt(); - } + private boolean hasOurOnlineAccounts = false; public static long getOnlineTimestampModulus() { Long now = NTP.getTime(); @@ -123,183 +86,316 @@ public class OnlineAccountsManager extends Thread { } return ONLINE_TIMESTAMP_MODULUS_V1; } - - - // Online accounts import queue - - private void processOnlineAccountsImportQueue() { - if (this.onlineAccountsImportQueue.isEmpty()) { - // Nothing to do - return; - } - - LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size()); - - try (final Repository repository = RepositoryManager.getRepository()) { - - List onlineAccountDataCopy = new ArrayList<>(this.onlineAccountsImportQueue); - for (OnlineAccountData onlineAccountData : onlineAccountDataCopy) { - if (isStopping) { - return; - } - - this.verifyAndAddAccount(repository, onlineAccountData); - - // Remove from queue - onlineAccountsImportQueue.remove(onlineAccountData); - } - - LOGGER.debug("Finished processing online accounts import queue"); - - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while verifying online accounts"), e); - } - } - - - // Utilities - - private void verifyAndAddAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { - final Long now = NTP.getTime(); + public static Long getCurrentOnlineAccountTimestamp() { + Long now = NTP.getTime(); if (now == null) - return; + return null; - PublicKeyAccount otherAccount = new PublicKeyAccount(repository, onlineAccountData.getPublicKey()); - - // Check timestamp is 'recent' here - if (Math.abs(onlineAccountData.getTimestamp() - now) > getOnlineTimestampModulus() * 2) { - LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp())); - return; - } - - // Verify - byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); - if (!otherAccount.verify(onlineAccountData.getSignature(), data)) { - LOGGER.trace(() -> String.format("Rejecting invalid online account %s", otherAccount.getAddress())); - return; - } - - // Qortal: check online account is actually reward-share - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(onlineAccountData.getPublicKey()); - if (rewardShareData == null) { - // Reward-share doesn't even exist - probably not a good sign - LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(onlineAccountData.getPublicKey()))); - return; - } - - Account mintingAccount = new Account(repository, rewardShareData.getMinter()); - if (!mintingAccount.canMint()) { - // Minting-account component of reward-share can no longer mint - disregard - LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress())); - return; - } - - synchronized (this.onlineAccounts) { - OnlineAccountData existingAccountData = this.onlineAccounts.stream().filter(account -> Arrays.equals(account.getPublicKey(), onlineAccountData.getPublicKey())).findFirst().orElse(null); - - if (existingAccountData != null) { - if (existingAccountData.getTimestamp() < onlineAccountData.getTimestamp()) { - this.onlineAccounts.remove(existingAccountData); - - LOGGER.trace(() -> String.format("Updated online account %s with timestamp %d (was %d)", otherAccount.getAddress(), onlineAccountData.getTimestamp(), existingAccountData.getTimestamp())); - } else { - LOGGER.trace(() -> String.format("Not updating existing online account %s", otherAccount.getAddress())); - - return; - } - } else { - LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp())); - } - - this.onlineAccounts.add(onlineAccountData); - } + long onlineTimestampModulus = getOnlineTimestampModulus(); + return (now / onlineTimestampModulus) * onlineTimestampModulus; } + private OnlineAccountsManager() { + } + + private static class SingletonContainer { + private static final OnlineAccountsManager INSTANCE = new OnlineAccountsManager(); + } + + public static OnlineAccountsManager getInstance() { + return SingletonContainer.INSTANCE; + } + + public void start() { + // Expire old online accounts signatures + executor.scheduleAtFixedRate(this::expireOldOnlineAccounts, ONLINE_ACCOUNTS_TASKS_INTERVAL, ONLINE_ACCOUNTS_TASKS_INTERVAL, TimeUnit.MILLISECONDS); + + // Send our online accounts + executor.scheduleAtFixedRate(this::sendOurOnlineAccountsInfo, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); + + // Request online accounts from peers (legacy) + executor.scheduleAtFixedRate(this::requestLegacyRemoteOnlineAccounts, ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); + // Request online accounts from peers (V3+) + executor.scheduleAtFixedRate(this::requestRemoteOnlineAccounts, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); + + // Process import queue + executor.scheduleWithFixedDelay(this::processOnlineAccountsImportQueue, ONLINE_ACCOUNTS_QUEUE_INTERVAL, ONLINE_ACCOUNTS_QUEUE_INTERVAL, TimeUnit.MILLISECONDS); + } + + public void shutdown() { + isStopping = true; + executor.shutdownNow(); + } + + // Testing support public void ensureTestingAccountsOnline(PrivateKeyAccount... onlineAccounts) { if (!BlockChain.getInstance().isTestChain()) { LOGGER.warn("Ignoring attempt to ensure test account is online for non-test chain!"); return; } - final Long now = NTP.getTime(); - if (now == null) + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) return; - final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now); byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); + final boolean useAggregateCompatibleSignature = onlineAccountsTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp(); - synchronized (this.onlineAccounts) { - this.onlineAccounts.clear(); + Set replacementAccounts = new HashSet<>(); + for (PrivateKeyAccount onlineAccount : onlineAccounts) { + // Check mintingAccount is actually reward-share? - for (PrivateKeyAccount onlineAccount : onlineAccounts) { - // Check mintingAccount is actually reward-share? + byte[] signature = useAggregateCompatibleSignature + ? Qortal25519Extras.signForAggregation(onlineAccount.getPrivateKey(), timestampBytes) + : onlineAccount.sign(timestampBytes); + byte[] publicKey = onlineAccount.getPublicKey(); - byte[] signature = onlineAccount.sign(timestampBytes); - byte[] publicKey = onlineAccount.getPublicKey(); + OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); + replacementAccounts.add(ourOnlineAccountData); + } - OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); - this.onlineAccounts.add(ourOnlineAccountData); + this.currentOnlineAccounts.clear(); + addAccounts(replacementAccounts); + } + + // Online accounts import queue + + private void processOnlineAccountsImportQueue() { + if (this.onlineAccountsImportQueue.isEmpty()) + // Nothing to do + return; + + LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size()); + + Set onlineAccountsToAdd = new HashSet<>(); + try (final Repository repository = RepositoryManager.getRepository()) { + for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) { + if (isStopping) + return; + + boolean isValid = this.isValidCurrentAccount(repository, onlineAccountData); + if (isValid) + onlineAccountsToAdd.add(onlineAccountData); + + // Remove from queue + onlineAccountsImportQueue.remove(onlineAccountData); } + } catch (DataException e) { + LOGGER.error("Repository issue while verifying online accounts", e); + } + + if (!onlineAccountsToAdd.isEmpty()) { + LOGGER.debug("Merging {} validated online accounts from import queue", onlineAccountsToAdd.size()); + addAccounts(onlineAccountsToAdd); } } - private void performOnlineAccountsTasks() { + // Utilities + + public static byte[] xorByteArrayInPlace(byte[] inplaceArray, byte[] otherArray) { + if (inplaceArray == null) + return Arrays.copyOf(otherArray, otherArray.length); + + // Start from index 1 to enforce static leading byte + for (int i = 1; i < otherArray.length; i++) + inplaceArray[i] ^= otherArray[i]; + + return inplaceArray; + } + + private static boolean isValidCurrentAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { + final Long now = NTP.getTime(); + if (now == null) + return false; + + byte[] rewardSharePublicKey = onlineAccountData.getPublicKey(); + long onlineAccountTimestamp = onlineAccountData.getTimestamp(); + + // Check timestamp is 'recent' here + if (Math.abs(onlineAccountTimestamp - now) > getOnlineTimestampModulus() * 2) { + LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + return false; + } + + // Verify signature + byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); + boolean isSignatureValid = onlineAccountTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp() + ? Qortal25519Extras.verifyAggregated(rewardSharePublicKey, onlineAccountData.getSignature(), data) + : Crypto.verify(rewardSharePublicKey, onlineAccountData.getSignature(), data); + if (!isSignatureValid) { + LOGGER.trace(() -> String.format("Rejecting invalid online account %s", Base58.encode(rewardSharePublicKey))); + return false; + } + + // Qortal: check online account is actually reward-share + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey); + if (rewardShareData == null) { + // Reward-share doesn't even exist - probably not a good sign + LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(rewardSharePublicKey))); + return false; + } + + Account mintingAccount = new Account(repository, rewardShareData.getMinter()); + if (!mintingAccount.canMint()) { + // Minting-account component of reward-share can no longer mint - disregard + LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress())); + return false; + } + + return true; + } + + /** Adds accounts, maybe rebuilds hashes, returns whether any new accounts were added / hashes rebuilt. */ + private boolean addAccounts(Collection onlineAccountsToAdd) { + // For keeping track of which hashes to rebuild + Map> hashesToRebuild = new HashMap<>(); + + for (OnlineAccountData onlineAccountData : onlineAccountsToAdd) { + boolean isNewEntry = this.addAccount(onlineAccountData); + + if (isNewEntry) + hashesToRebuild.computeIfAbsent(onlineAccountData.getTimestamp(), k -> new HashSet<>()).add(onlineAccountData.getPublicKey()[0]); + } + + if (hashesToRebuild.isEmpty()) + return false; + + for (var entry : hashesToRebuild.entrySet()) { + Long timestamp = entry.getKey(); + + LOGGER.debug(() -> String.format("Rehashing for timestamp %d and leading bytes %s", + timestamp, + entry.getValue().stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", ")) + ) + ); + + for (Byte leadingByte : entry.getValue()) { + byte[] pubkeyHash = currentOnlineAccounts.get(timestamp).stream() + .map(OnlineAccountData::getPublicKey) + .filter(publicKey -> leadingByte == publicKey[0]) + .reduce(null, OnlineAccountsManager::xorByteArrayInPlace); + + currentOnlineAccountsHashes.computeIfAbsent(timestamp, k -> new ConcurrentHashMap<>()).put(leadingByte, pubkeyHash); + + LOGGER.trace(() -> String.format("Rebuilt hash %s for timestamp %d and leading byte %02x using %d public keys", + HashCode.fromBytes(pubkeyHash), + timestamp, + leadingByte, + currentOnlineAccounts.get(timestamp).stream() + .map(OnlineAccountData::getPublicKey) + .filter(publicKey -> leadingByte == publicKey[0]) + .count() + )); + } + } + + LOGGER.debug(String.format("we have online accounts for timestamps: %s", String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", "))))); + + return true; + } + + private boolean addAccount(OnlineAccountData onlineAccountData) { + byte[] rewardSharePublicKey = onlineAccountData.getPublicKey(); + long onlineAccountTimestamp = onlineAccountData.getTimestamp(); + + Set onlineAccounts = this.currentOnlineAccounts.computeIfAbsent(onlineAccountTimestamp, k -> ConcurrentHashMap.newKeySet()); + boolean isNewEntry = onlineAccounts.add(onlineAccountData); + + if (isNewEntry) + LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + else + LOGGER.trace(() -> String.format("Not updating existing online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + + return isNewEntry; + } + + /** + * Expire old entries. + */ + private void expireOldOnlineAccounts() { final Long now = NTP.getTime(); if (now == null) return; - // Expire old entries - final long lastSeenExpiryPeriod = (getOnlineTimestampModulus() * 2) + (1 * 60 * 1000L); - final long cutoffThreshold = now - lastSeenExpiryPeriod; - synchronized (this.onlineAccounts) { - Iterator iterator = this.onlineAccounts.iterator(); - while (iterator.hasNext()) { - OnlineAccountData onlineAccountData = iterator.next(); - - if (onlineAccountData.getTimestamp() < cutoffThreshold) { - iterator.remove(); - - LOGGER.trace(() -> { - PublicKeyAccount otherAccount = new PublicKeyAccount(null, onlineAccountData.getPublicKey()); - return String.format("Removed expired online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp()); - }); - } - } - } - - // Request data from other peers? - if ((this.onlineAccountsTasksTimestamp % ONLINE_ACCOUNTS_BROADCAST_INTERVAL) < ONLINE_ACCOUNTS_TASKS_INTERVAL) { - List safeOnlineAccounts; - synchronized (this.onlineAccounts) { - safeOnlineAccounts = new ArrayList<>(this.onlineAccounts); - } - - Message messageV1 = new GetOnlineAccountsMessage(safeOnlineAccounts); - Message messageV2 = new GetOnlineAccountsV2Message(safeOnlineAccounts); - - Network.getInstance().broadcast(peer -> - peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1 - ); - } + final long cutoffThreshold = now - MAX_CACHED_TIMESTAMP_SETS * getOnlineTimestampModulus(); + this.currentOnlineAccounts.keySet().removeIf(timestamp -> timestamp < cutoffThreshold); + this.currentOnlineAccountsHashes.keySet().removeIf(timestamp -> timestamp < cutoffThreshold); } - private void sendOurOnlineAccountsInfo() { + /** + * Request data from other peers. (Pre-V3) + */ + private void requestLegacyRemoteOnlineAccounts() { final Long now = NTP.getTime(); + if (now == null) + return; + + // Don't bother if we're not up to date + if (!Controller.getInstance().isUpToDate()) + return; + + List mergedOnlineAccounts = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + + Message messageV2 = new GetOnlineAccountsV2Message(mergedOnlineAccounts); + + Network.getInstance().broadcast(peer -> + peer.getPeersVersion() < ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV2 + : null + ); + } + + /** + * Request data from other peers. V3+ + */ + private void requestRemoteOnlineAccounts() { + final Long now = NTP.getTime(); + if (now == null) + return; + + // Don't bother if we're not up to date + if (!Controller.getInstance().isUpToDate()) + return; + + Message messageV3 = new GetOnlineAccountsV3Message(currentOnlineAccountsHashes); + + Network.getInstance().broadcast(peer -> + peer.getPeersVersion() >= ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV3 + : null + ); + } + + /** + * Send online accounts that are minting on this node. + */ + private void sendOurOnlineAccountsInfo() { + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) + return; + + Long now = NTP.getTime(); if (now == null) { return; } + // Don't submit if we're more than 2 hours out of sync (unless we're in recovery mode) + final Long minLatestBlockTimestamp = now - (2 * 60 * 60 * 1000L); + if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp) && !Synchronizer.getInstance().getRecoveryMode()) { + return; + } + List mintingAccounts; try (final Repository repository = RepositoryManager.getRepository()) { mintingAccounts = repository.getAccountRepository().getMintingAccounts(); - // We have no accounts, but don't reset timestamp + // We have no accounts to send if (mintingAccounts.isEmpty()) return; - // Only reward-share accounts allowed + // Only active reward-shares allowed Iterator iterator = mintingAccounts.iterator(); - int i = 0; while (iterator.hasNext()) { MintingAccountData mintingAccountData = iterator.next(); @@ -316,107 +412,138 @@ public class OnlineAccountsManager extends Thread { iterator.remove(); continue; } - - if (++i > 1+1) { - iterator.remove(); - continue; - } } } catch (DataException e) { LOGGER.warn(String.format("Repository issue trying to fetch minting accounts: %s", e.getMessage())); return; } - // 'current' timestamp - final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now); - boolean hasInfoChanged = false; + final boolean useAggregateCompatibleSignature = onlineAccountsTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp(); byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); List ourOnlineAccounts = new ArrayList<>(); - MINTING_ACCOUNTS: for (MintingAccountData mintingAccountData : mintingAccounts) { - PrivateKeyAccount mintingAccount = new PrivateKeyAccount(null, mintingAccountData.getPrivateKey()); + byte[] privateKey = mintingAccountData.getPrivateKey(); + byte[] publicKey = Crypto.toPublicKey(privateKey); - byte[] signature = mintingAccount.sign(timestampBytes); - byte[] publicKey = mintingAccount.getPublicKey(); + byte[] signature = useAggregateCompatibleSignature + ? Qortal25519Extras.signForAggregation(privateKey, timestampBytes) + : Crypto.sign(privateKey, timestampBytes); // Our account is online OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); - synchronized (this.onlineAccounts) { - Iterator iterator = this.onlineAccounts.iterator(); - while (iterator.hasNext()) { - OnlineAccountData existingOnlineAccountData = iterator.next(); - - if (Arrays.equals(existingOnlineAccountData.getPublicKey(), ourOnlineAccountData.getPublicKey())) { - // If our online account is already present, with same timestamp, then move on to next mintingAccount - if (existingOnlineAccountData.getTimestamp() == onlineAccountsTimestamp) - continue MINTING_ACCOUNTS; - - // If our online account is already present, but with older timestamp, then remove it - iterator.remove(); - break; - } - } - - this.onlineAccounts.add(ourOnlineAccountData); - } - - LOGGER.trace(() -> String.format("Added our online account %s with timestamp %d", mintingAccount.getAddress(), onlineAccountsTimestamp)); ourOnlineAccounts.add(ourOnlineAccountData); - hasInfoChanged = true; } + this.hasOurOnlineAccounts = !ourOnlineAccounts.isEmpty(); + + boolean hasInfoChanged = addAccounts(ourOnlineAccounts); + if (!hasInfoChanged) return; Message messageV1 = new OnlineAccountsMessage(ourOnlineAccounts); Message messageV2 = new OnlineAccountsV2Message(ourOnlineAccounts); + Message messageV3 = new OnlineAccountsV2Message(ourOnlineAccounts); // TODO: V3 message Network.getInstance().broadcast(peer -> - peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1 + peer.getPeersVersion() >= ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV3 + : peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION + ? messageV2 + : messageV1 ); - LOGGER.trace(() -> String.format("Broadcasted %d online account%s with timestamp %d", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp)); + LOGGER.debug("Broadcasted {} online account{} with timestamp {}", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp); } - public static long toOnlineAccountTimestamp(long timestamp) { - return (timestamp / getOnlineTimestampModulus()) * getOnlineTimestampModulus(); + /** + * Returns whether online accounts manager has any online accounts with timestamp recent enough to be considered currently online. + */ + // BlockMinter: only calls this to check whether returned list is empty or not, to determine whether minting is even possible or not + public boolean hasOnlineAccounts() { + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) + return false; + + return this.currentOnlineAccounts.containsKey(onlineAccountsTimestamp); } - /** Returns list of online accounts with timestamp recent enough to be considered currently online. */ + public boolean hasOurOnlineAccounts() { + return this.hasOurOnlineAccounts; + } + + /** + * Returns list of online accounts matching given timestamp. + */ + // Block::mint() - only wants online accounts with (online) timestamp that matches block's (online) timestamp so they can be added to new block + public List getOnlineAccounts(long onlineTimestamp) { + LOGGER.info(String.format("caller's timestamp: %d, our timestamps: %s", onlineTimestamp, String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", "))))); + + return new ArrayList<>(Set.copyOf(this.currentOnlineAccounts.getOrDefault(onlineTimestamp, Collections.emptySet()))); + } + + /** + * Returns list of online accounts with timestamp recent enough to be considered currently online. + */ + // API: calls this to return list of online accounts - probably expects ALL timestamps - but going to get 'current' from now on public List getOnlineAccounts() { - final long onlineTimestamp = toOnlineAccountTimestamp(NTP.getTime()); + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) + return Collections.emptyList(); - synchronized (this.onlineAccounts) { - return this.onlineAccounts.stream().filter(account -> account.getTimestamp() == onlineTimestamp).collect(Collectors.toList()); - } + return getOnlineAccounts(onlineAccountsTimestamp); } + // Block processing - /** Returns cached, unmodifiable list of latest block's online accounts. */ - public List getLatestBlocksOnlineAccounts() { - synchronized (this.latestBlocksOnlineAccounts) { - return this.latestBlocksOnlineAccounts.peekFirst(); - } + /** + * Removes previously validated entries from block's online accounts. + *

+ * Checks both 'current' and block caches. + *

+ * Typically called by {@link Block#areOnlineAccountsValid()} + */ + public void removeKnown(Set blocksOnlineAccounts, Long timestamp) { + Set onlineAccounts = this.currentOnlineAccounts.get(timestamp); + + // If not 'current' timestamp - try block cache instead + if (onlineAccounts == null) + onlineAccounts = this.latestBlocksOnlineAccounts.get(timestamp); + + if (onlineAccounts != null) + blocksOnlineAccounts.removeAll(onlineAccounts); } - /** Caches list of latest block's online accounts. Typically called by Block.process() */ - public void pushLatestBlocksOnlineAccounts(List latestBlocksOnlineAccounts) { - synchronized (this.latestBlocksOnlineAccounts) { - if (this.latestBlocksOnlineAccounts.size() == MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) - this.latestBlocksOnlineAccounts.pollLast(); - - this.latestBlocksOnlineAccounts.addFirst(latestBlocksOnlineAccounts == null - ? Collections.emptyList() - : Collections.unmodifiableList(latestBlocksOnlineAccounts)); + /** + * Adds block's online accounts to one of OnlineAccountManager's caches. + *

+ * It is assumed that the online accounts have been verified. + *

+ * Typically called by {@link Block#areOnlineAccountsValid()} + */ + public void addBlocksOnlineAccounts(Set blocksOnlineAccounts, Long timestamp) { + // We want to add to 'current' in preference if possible + if (this.currentOnlineAccounts.containsKey(timestamp)) { + addAccounts(blocksOnlineAccounts); + return; } - } - /** Reverts list of latest block's online accounts. Typically called by Block.orphan() */ - public void popLatestBlocksOnlineAccounts() { - synchronized (this.latestBlocksOnlineAccounts) { - this.latestBlocksOnlineAccounts.pollFirst(); + // Add to block cache instead + this.latestBlocksOnlineAccounts.computeIfAbsent(timestamp, k -> ConcurrentHashMap.newKeySet()) + .addAll(blocksOnlineAccounts); + + // If block cache has grown too large then we need to trim. + if (this.latestBlocksOnlineAccounts.size() > MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) { + // However, be careful to trim the opposite end to the entry we just added! + Long firstKey = this.latestBlocksOnlineAccounts.firstKey(); + if (!firstKey.equals(timestamp)) + this.latestBlocksOnlineAccounts.remove(firstKey); + else + this.latestBlocksOnlineAccounts.remove(this.latestBlocksOnlineAccounts.lastKey()); } } @@ -429,45 +556,48 @@ public class OnlineAccountsManager extends Thread { List excludeAccounts = getOnlineAccountsMessage.getOnlineAccounts(); // Send online accounts info, excluding entries with matching timestamp & public key from excludeAccounts - List accountsToSend; - synchronized (this.onlineAccounts) { - accountsToSend = new ArrayList<>(this.onlineAccounts); - } + List accountsToSend = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + int prefilterSize = accountsToSend.size(); Iterator iterator = accountsToSend.iterator(); - - SEND_ITERATOR: while (iterator.hasNext()) { OnlineAccountData onlineAccountData = iterator.next(); - for (int i = 0; i < excludeAccounts.size(); ++i) { - OnlineAccountData excludeAccountData = excludeAccounts.get(i); - + for (OnlineAccountData excludeAccountData : excludeAccounts) { if (onlineAccountData.getTimestamp() == excludeAccountData.getTimestamp() && Arrays.equals(onlineAccountData.getPublicKey(), excludeAccountData.getPublicKey())) { iterator.remove(); - continue SEND_ITERATOR; + break; } } } + if (accountsToSend.isEmpty()) + return; + Message onlineAccountsMessage = new OnlineAccountsMessage(accountsToSend); peer.sendMessage(onlineAccountsMessage); - LOGGER.trace(() -> String.format("Sent %d of our %d online accounts to %s", accountsToSend.size(), this.onlineAccounts.size(), peer)); + LOGGER.debug("Sent {} of our {} online accounts to {}", accountsToSend.size(), prefilterSize, peer); } public void onNetworkOnlineAccountsMessage(Peer peer, Message message) { OnlineAccountsMessage onlineAccountsMessage = (OnlineAccountsMessage) message; List peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts(); - LOGGER.trace(() -> String.format("Received %d online accounts from %s", peersOnlineAccounts.size(), peer)); + LOGGER.debug("Received {} online accounts from {}", peersOnlineAccounts.size(), peer); - try (final Repository repository = RepositoryManager.getRepository()) { - for (OnlineAccountData onlineAccountData : peersOnlineAccounts) - this.verifyAndAddAccount(repository, onlineAccountData); - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while verifying online accounts from peer %s", peer), e); + int importCount = 0; + + // Add any online accounts to the queue that aren't already present + for (OnlineAccountData onlineAccountData : peersOnlineAccounts) { + boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData); + + if (isNewEntry) + importCount++; } + + if (importCount > 0) + LOGGER.debug("Added {} online accounts to queue", importCount); } public void onNetworkGetOnlineAccountsV2Message(Peer peer, Message message) { @@ -476,58 +606,106 @@ public class OnlineAccountsManager extends Thread { List excludeAccounts = getOnlineAccountsMessage.getOnlineAccounts(); // Send online accounts info, excluding entries with matching timestamp & public key from excludeAccounts - List accountsToSend; - synchronized (this.onlineAccounts) { - accountsToSend = new ArrayList<>(this.onlineAccounts); - } + List accountsToSend = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + int prefilterSize = accountsToSend.size(); Iterator iterator = accountsToSend.iterator(); - - SEND_ITERATOR: while (iterator.hasNext()) { OnlineAccountData onlineAccountData = iterator.next(); - for (int i = 0; i < excludeAccounts.size(); ++i) { - OnlineAccountData excludeAccountData = excludeAccounts.get(i); - + for (OnlineAccountData excludeAccountData : excludeAccounts) { if (onlineAccountData.getTimestamp() == excludeAccountData.getTimestamp() && Arrays.equals(onlineAccountData.getPublicKey(), excludeAccountData.getPublicKey())) { iterator.remove(); - continue SEND_ITERATOR; + break; } } } + if (accountsToSend.isEmpty()) + return; + Message onlineAccountsMessage = new OnlineAccountsV2Message(accountsToSend); peer.sendMessage(onlineAccountsMessage); - LOGGER.trace(() -> String.format("Sent %d of our %d online accounts to %s", accountsToSend.size(), this.onlineAccounts.size(), peer)); + LOGGER.debug("Sent {} of our {} online accounts to {}", accountsToSend.size(), prefilterSize, peer); } public void onNetworkOnlineAccountsV2Message(Peer peer, Message message) { OnlineAccountsV2Message onlineAccountsMessage = (OnlineAccountsV2Message) message; List peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts(); - LOGGER.debug(String.format("Received %d online accounts from %s", peersOnlineAccounts.size(), peer)); + LOGGER.debug("Received {} online accounts from {}", peersOnlineAccounts.size(), peer); int importCount = 0; // Add any online accounts to the queue that aren't already present for (OnlineAccountData onlineAccountData : peersOnlineAccounts) { + boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData); - // Do we already know about this online account data? - if (onlineAccounts.contains(onlineAccountData)) { - continue; - } - - // Is it already in the import queue? - if (onlineAccountsImportQueue.contains(onlineAccountData)) { - continue; - } - - onlineAccountsImportQueue.add(onlineAccountData); - importCount++; + if (isNewEntry) + importCount++; } - LOGGER.debug(String.format("Added %d online accounts to queue", importCount)); + if (importCount > 0) + LOGGER.debug("Added {} online accounts to queue", importCount); + } + + public void onNetworkGetOnlineAccountsV3Message(Peer peer, Message message) { + GetOnlineAccountsV3Message getOnlineAccountsMessage = (GetOnlineAccountsV3Message) message; + + Map> peersHashes = getOnlineAccountsMessage.getHashesByTimestampThenByte(); + List outgoingOnlineAccounts = new ArrayList<>(); + + // Warning: no double-checking/fetching - we must be ConcurrentMap compatible! + // So no contains()-then-get() or multiple get()s on the same key/map. + // We also use getOrDefault() with emptySet() on currentOnlineAccounts in case corresponding timestamp entry isn't there. + for (var ourOuterMapEntry : currentOnlineAccountsHashes.entrySet()) { + Long timestamp = ourOuterMapEntry.getKey(); + + var ourInnerMap = ourOuterMapEntry.getValue(); + var peersInnerMap = peersHashes.get(timestamp); + + if (peersInnerMap == null) { + // Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours + Set timestampsOnlineAccounts = this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet()); + outgoingOnlineAccounts.addAll(timestampsOnlineAccounts); + + LOGGER.debug(() -> String.format("Going to send all %d online accounts for timestamp %d", timestampsOnlineAccounts.size(), timestamp)); + } else { + // Quick cache of which leading bytes to send so we only have to filter once + Set outgoingLeadingBytes = new HashSet<>(); + + // We have entries for this timestamp so compare against peer's entries + for (var ourInnerMapEntry : ourInnerMap.entrySet()) { + Byte leadingByte = ourInnerMapEntry.getKey(); + byte[] peersHash = peersInnerMap.get(leadingByte); + + if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) { + // For this leading byte: hashes don't match or peer doesn't have entry + // Send all online accounts for this timestamp and leading byte + outgoingLeadingBytes.add(leadingByte); + } + } + + int beforeAddSize = outgoingOnlineAccounts.size(); + + this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet()).stream() + .filter(account -> outgoingLeadingBytes.contains(account.getPublicKey()[0])) + .forEach(outgoingOnlineAccounts::add); + + if (outgoingOnlineAccounts.size() > beforeAddSize) + LOGGER.debug(String.format("Going to send %d online accounts for timestamp %d and leading bytes %s", + outgoingOnlineAccounts.size() - beforeAddSize, + timestamp, + outgoingLeadingBytes.stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", ")) + ) + ); + } + } + + Message onlineAccountsMessage = new OnlineAccountsV2Message(outgoingOnlineAccounts); // TODO: V3 message + peer.sendMessage(onlineAccountsMessage); + + LOGGER.debug("Sent {} online accounts to {}", outgoingOnlineAccounts.size(), peer); } } diff --git a/src/main/java/org/qortal/controller/Synchronizer.java b/src/main/java/org/qortal/controller/Synchronizer.java index 8f3a34bb..74a4a785 100644 --- a/src/main/java/org/qortal/controller/Synchronizer.java +++ b/src/main/java/org/qortal/controller/Synchronizer.java @@ -26,14 +26,7 @@ import org.qortal.event.Event; import org.qortal.event.EventBus; import org.qortal.network.Network; import org.qortal.network.Peer; -import org.qortal.network.message.BlockMessage; -import org.qortal.network.message.BlockSummariesMessage; -import org.qortal.network.message.GetBlockMessage; -import org.qortal.network.message.GetBlockSummariesMessage; -import org.qortal.network.message.GetSignaturesV2Message; -import org.qortal.network.message.Message; -import org.qortal.network.message.SignaturesMessage; -import org.qortal.network.message.MessageType; +import org.qortal.network.message.*; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; @@ -88,7 +81,7 @@ public class Synchronizer extends Thread { private boolean syncRequestPending = false; // Keep track of invalid blocks so that we don't keep trying to sync them - private Map invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>()); + private Map invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>()); public Long timeValidBlockLastReceived = null; public Long timeInvalidBlockLastReceived = null; @@ -178,8 +171,8 @@ public class Synchronizer extends Thread { public Integer getSyncPercent() { synchronized (this.syncLock) { - // Report as 100% synced if the latest block is within the last 30 mins - final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L); + // Report as 100% synced if the latest block is within the last 60 mins + final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L); if (Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) { return 100; } @@ -624,7 +617,7 @@ public class Synchronizer extends Thread { // We have already determined that the correct chain diverged from a lower height. We are safe to skip these peers. for (Peer peer : peersSharingCommonBlock) { LOGGER.debug(String.format("Peer %s has common block at height %d but the superior chain is at height %d. Removing it from this round.", peer, commonBlockSummary.getHeight(), dropPeersAfterCommonBlockHeight)); - this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature()); + //this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature()); } continue; } @@ -635,7 +628,9 @@ public class Synchronizer extends Thread { int minChainLength = this.calculateMinChainLengthOfPeers(peersSharingCommonBlock, commonBlockSummary); // Fetch block summaries from each peer - for (Peer peer : peersSharingCommonBlock) { + Iterator peersSharingCommonBlockIterator = peersSharingCommonBlock.iterator(); + while (peersSharingCommonBlockIterator.hasNext()) { + Peer peer = (Peer) peersSharingCommonBlockIterator.next(); // If we're shutting down, just return the latest peer list if (Controller.isStopping()) @@ -692,6 +687,8 @@ public class Synchronizer extends Thread { if (this.containsInvalidBlockSummary(peer.getCommonBlockData().getBlockSummariesAfterCommonBlock())) { LOGGER.debug("Ignoring peer %s because it holds an invalid block", peer); peers.remove(peer); + peersSharingCommonBlockIterator.remove(); + continue; } // Reduce minChainLength if needed. If we don't have any blocks, this peer will be excluded from chain weight comparisons later in the process, so we shouldn't update minChainLength @@ -847,6 +844,10 @@ public class Synchronizer extends Thread { /* Invalid block signature tracking */ + public Map getInvalidBlockSignatures() { + return this.invalidBlockSignatures; + } + private void addInvalidBlockSignature(byte[] signature) { Long now = NTP.getTime(); if (now == null) { @@ -854,8 +855,7 @@ public class Synchronizer extends Thread { } // Add or update existing entry - String sig58 = Base58.encode(signature); - invalidBlockSignatures.put(sig58, now); + invalidBlockSignatures.put(ByteArray.wrap(signature), now); } private void deleteOlderInvalidSignatures(Long now) { if (now == null) { @@ -874,17 +874,16 @@ public class Synchronizer extends Thread { } } } - private boolean containsInvalidBlockSummary(List blockSummaries) { + public boolean containsInvalidBlockSummary(List blockSummaries) { if (blockSummaries == null || invalidBlockSignatures == null) { return false; } // Loop through our known invalid blocks and check each one against supplied block summaries - for (String invalidSignature58 : invalidBlockSignatures.keySet()) { - byte[] invalidSignature = Base58.decode(invalidSignature58); + for (ByteArray invalidSignature : invalidBlockSignatures.keySet()) { for (BlockSummaryData blockSummary : blockSummaries) { byte[] signature = blockSummary.getSignature(); - if (Arrays.equals(signature, invalidSignature)) { + if (Arrays.equals(signature, invalidSignature.value)) { return true; } } @@ -897,10 +896,9 @@ public class Synchronizer extends Thread { } // Loop through our known invalid blocks and check each one against supplied block signatures - for (String invalidSignature58 : invalidBlockSignatures.keySet()) { - byte[] invalidSignature = Base58.decode(invalidSignature58); + for (ByteArray invalidSignature : invalidBlockSignatures.keySet()) { for (byte[] signature : blockSignatures) { - if (Arrays.equals(signature, invalidSignature)) { + if (Arrays.equals(signature, invalidSignature.value)) { return true; } } @@ -1579,12 +1577,23 @@ public class Synchronizer extends Thread { Message getBlockMessage = new GetBlockMessage(signature); Message message = peer.getResponse(getBlockMessage); - if (message == null || message.getType() != MessageType.BLOCK) + if (message == null) return null; - BlockMessage blockMessage = (BlockMessage) message; + switch (message.getType()) { + case BLOCK: { + BlockMessage blockMessage = (BlockMessage) message; + return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates()); + } - return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates()); + case BLOCK_V2: { + BlockV2Message blockMessage = (BlockV2Message) message; + return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStatesHash()); + } + + default: + return null; + } } public void populateBlockSummariesMinterLevels(Repository repository, List blockSummaries) throws DataException { diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileListManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileListManager.java index a0b4886b..60b3707b 100644 --- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileListManager.java +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileListManager.java @@ -67,6 +67,9 @@ public class ArbitraryDataFileListManager { /** Maximum number of hops that a file list relay request is allowed to make */ public static int RELAY_REQUEST_MAX_HOPS = 4; + /** Minimum peer version to use relay */ + public static String RELAY_MIN_PEER_VERSION = "3.4.0"; + private ArbitraryDataFileListManager() { } @@ -524,6 +527,7 @@ public class ArbitraryDataFileListManager { forwardArbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, arbitraryDataFileListMessage.getPeerAddress(), arbitraryDataFileListMessage.isRelayPossible()); } + forwardArbitraryDataFileListMessage.setId(message.getId()); // Forward to requesting peer LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer); @@ -690,12 +694,14 @@ public class ArbitraryDataFileListManager { // Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast Message relayGetArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, requestTime, requestHops, requestingPeer); + relayGetArbitraryDataFileListMessage.setId(message.getId()); LOGGER.debug("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops); Network.getInstance().broadcast( - broadcastPeer -> broadcastPeer == peer || - Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) - ? null : relayGetArbitraryDataFileListMessage); + broadcastPeer -> + !broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null : + broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryDataFileListMessage + ); } else { diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryMetadataManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryMetadataManager.java index 0903de60..eec0d935 100644 --- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryMetadataManager.java +++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryMetadataManager.java @@ -22,8 +22,7 @@ import org.qortal.utils.Triple; import java.io.IOException; import java.util.*; -import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.RELAY_REQUEST_MAX_DURATION; -import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.RELAY_REQUEST_MAX_HOPS; +import static org.qortal.controller.arbitrary.ArbitraryDataFileListManager.*; public class ArbitraryMetadataManager { @@ -339,6 +338,7 @@ public class ArbitraryMetadataManager { if (requestingPeer != null) { ArbitraryMetadataMessage forwardArbitraryMetadataMessage = new ArbitraryMetadataMessage(signature, arbitraryMetadataMessage.getArbitraryMetadataFile()); + forwardArbitraryMetadataMessage.setId(arbitraryMetadataMessage.getId()); // Forward to requesting peer LOGGER.debug("Forwarding metadata to requesting peer: {}", requestingPeer); @@ -434,12 +434,13 @@ public class ArbitraryMetadataManager { // Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast Message relayGetArbitraryMetadataMessage = new GetArbitraryMetadataMessage(signature, requestTime, requestHops); + relayGetArbitraryMetadataMessage.setId(message.getId()); LOGGER.debug("Rebroadcasting metadata request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops); Network.getInstance().broadcast( - broadcastPeer -> broadcastPeer == peer || - Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) - ? null : relayGetArbitraryMetadataMessage); + broadcastPeer -> + !broadcastPeer.isAtLeastVersion(RELAY_MIN_PEER_VERSION) ? null : + broadcastPeer == peer || Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost()) ? null : relayGetArbitraryMetadataMessage); } else { diff --git a/src/main/java/org/qortal/controller/tradebot/TradeBot.java b/src/main/java/org/qortal/controller/tradebot/TradeBot.java index 938141e0..2c448607 100644 --- a/src/main/java/org/qortal/controller/tradebot/TradeBot.java +++ b/src/main/java/org/qortal/controller/tradebot/TradeBot.java @@ -242,8 +242,8 @@ public class TradeBot implements Listener { if (!(event instanceof Synchronizer.NewChainTipEvent)) return; - // Don't process trade bots or broadcast presence timestamps if our chain is more than 30 minutes old - final Long minLatestBlockTimestamp = NTP.getTime() - (30 * 60 * 1000L); + // Don't process trade bots or broadcast presence timestamps if our chain is more than 60 minutes old + final Long minLatestBlockTimestamp = NTP.getTime() - (60 * 60 * 1000L); if (!Controller.getInstance().isUpToDate(minLatestBlockTimestamp)) return; @@ -292,7 +292,7 @@ public class TradeBot implements Listener { } public static byte[] deriveTradeNativePublicKey(byte[] privateKey) { - return PrivateKeyAccount.toPublicKey(privateKey); + return Crypto.toPublicKey(privateKey); } public static byte[] deriveTradeForeignPublicKey(byte[] privateKey) { diff --git a/src/main/java/org/qortal/crosschain/Bitcoiny.java b/src/main/java/org/qortal/crosschain/Bitcoiny.java index f66ea939..56c5b409 100644 --- a/src/main/java/org/qortal/crosschain/Bitcoiny.java +++ b/src/main/java/org/qortal/crosschain/Bitcoiny.java @@ -375,7 +375,7 @@ public abstract class Bitcoiny implements ForeignBlockchain { public Long getWalletBalanceFromTransactions(String key58) throws ForeignBlockchainException { long balance = 0; - Comparator oldestTimestampFirstComparator = Comparator.comparingInt(SimpleTransaction::getTimestamp); + Comparator oldestTimestampFirstComparator = Comparator.comparingLong(SimpleTransaction::getTimestamp); List transactions = getWalletTransactions(key58).stream().sorted(oldestTimestampFirstComparator).collect(Collectors.toList()); for (SimpleTransaction transaction : transactions) { balance += transaction.getTotalAmount(); @@ -455,7 +455,7 @@ public abstract class Bitcoiny implements ForeignBlockchain { // Process new keys } while (true); - Comparator newestTimestampFirstComparator = Comparator.comparingInt(SimpleTransaction::getTimestamp).reversed(); + Comparator newestTimestampFirstComparator = Comparator.comparingLong(SimpleTransaction::getTimestamp).reversed(); // Update cache and return transactionsCacheTimestamp = NTP.getTime(); @@ -537,7 +537,8 @@ public abstract class Bitcoiny implements ForeignBlockchain { // All inputs and outputs relate to this wallet, so the balance should be unaffected amount = 0; } - return new SimpleTransaction(t.txHash, t.timestamp, amount, fee, inputs, outputs); + long timestampMillis = t.timestamp * 1000L; + return new SimpleTransaction(t.txHash, timestampMillis, amount, fee, inputs, outputs); } /** diff --git a/src/main/java/org/qortal/crosschain/SimpleTransaction.java b/src/main/java/org/qortal/crosschain/SimpleTransaction.java index 27c9f9e3..53039020 100644 --- a/src/main/java/org/qortal/crosschain/SimpleTransaction.java +++ b/src/main/java/org/qortal/crosschain/SimpleTransaction.java @@ -7,7 +7,7 @@ import java.util.List; @XmlAccessorType(XmlAccessType.FIELD) public class SimpleTransaction { private String txHash; - private Integer timestamp; + private Long timestamp; private long totalAmount; private long feeAmount; private List inputs; @@ -74,7 +74,7 @@ public class SimpleTransaction { public SimpleTransaction() { } - public SimpleTransaction(String txHash, Integer timestamp, long totalAmount, long feeAmount, List inputs, List outputs) { + public SimpleTransaction(String txHash, Long timestamp, long totalAmount, long feeAmount, List inputs, List outputs) { this.txHash = txHash; this.timestamp = timestamp; this.totalAmount = totalAmount; @@ -87,7 +87,7 @@ public class SimpleTransaction { return txHash; } - public Integer getTimestamp() { + public Long getTimestamp() { return timestamp; } diff --git a/src/main/java/org/qortal/crypto/BouncyCastle25519.java b/src/main/java/org/qortal/crypto/BouncyCastle25519.java deleted file mode 100644 index 1a2e0de9..00000000 --- a/src/main/java/org/qortal/crypto/BouncyCastle25519.java +++ /dev/null @@ -1,99 +0,0 @@ -package org.qortal.crypto; - -import java.lang.reflect.Constructor; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.Arrays; - -import org.bouncycastle.crypto.Digest; -import org.bouncycastle.math.ec.rfc7748.X25519; -import org.bouncycastle.math.ec.rfc7748.X25519Field; -import org.bouncycastle.math.ec.rfc8032.Ed25519; - -/** Additions to BouncyCastle providing Ed25519 to X25519 key conversion. */ -public class BouncyCastle25519 { - - private static final Class pointAffineClass; - private static final Constructor pointAffineCtor; - private static final Method decodePointVarMethod; - private static final Field yField; - - static { - try { - Class ed25519Class = Ed25519.class; - pointAffineClass = Arrays.stream(ed25519Class.getDeclaredClasses()).filter(clazz -> clazz.getSimpleName().equals("PointAffine")).findFirst().get(); - if (pointAffineClass == null) - throw new ClassNotFoundException("Can't locate PointExt inner class inside Ed25519"); - - decodePointVarMethod = ed25519Class.getDeclaredMethod("decodePointVar", byte[].class, int.class, boolean.class, pointAffineClass); - decodePointVarMethod.setAccessible(true); - - pointAffineCtor = pointAffineClass.getDeclaredConstructors()[0]; - pointAffineCtor.setAccessible(true); - - yField = pointAffineClass.getDeclaredField("y"); - yField.setAccessible(true); - } catch (NoSuchMethodException | SecurityException | IllegalArgumentException | NoSuchFieldException | ClassNotFoundException e) { - throw new RuntimeException("Can't initialize BouncyCastle25519 shim", e); - } - } - - private static int[] obtainYFromPublicKey(byte[] ed25519PublicKey) { - try { - Object pA = pointAffineCtor.newInstance(); - - Boolean result = (Boolean) decodePointVarMethod.invoke(null, ed25519PublicKey, 0, true, pA); - if (result == null || !result) - return null; - - return (int[]) yField.get(pA); - } catch (SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { - throw new RuntimeException("Can't reflect into BouncyCastle", e); - } - } - - public static byte[] toX25519PublicKey(byte[] ed25519PublicKey) { - int[] one = new int[X25519Field.SIZE]; - X25519Field.one(one); - - int[] y = obtainYFromPublicKey(ed25519PublicKey); - - int[] oneMinusY = new int[X25519Field.SIZE]; - X25519Field.sub(one, y, oneMinusY); - - int[] onePlusY = new int[X25519Field.SIZE]; - X25519Field.add(one, y, onePlusY); - - int[] oneMinusYInverted = new int[X25519Field.SIZE]; - X25519Field.inv(oneMinusY, oneMinusYInverted); - - int[] u = new int[X25519Field.SIZE]; - X25519Field.mul(onePlusY, oneMinusYInverted, u); - - X25519Field.normalize(u); - - byte[] x25519PublicKey = new byte[X25519.SCALAR_SIZE]; - X25519Field.encode(u, x25519PublicKey, 0); - - return x25519PublicKey; - } - - public static byte[] toX25519PrivateKey(byte[] ed25519PrivateKey) { - Digest d = Ed25519.createPrehash(); - byte[] h = new byte[d.getDigestSize()]; - - d.update(ed25519PrivateKey, 0, ed25519PrivateKey.length); - d.doFinal(h, 0); - - byte[] s = new byte[X25519.SCALAR_SIZE]; - - System.arraycopy(h, 0, s, 0, X25519.SCALAR_SIZE); - s[0] &= 0xF8; - s[X25519.SCALAR_SIZE - 1] &= 0x7F; - s[X25519.SCALAR_SIZE - 1] |= 0x40; - - return s; - } - -} diff --git a/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java b/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java new file mode 100644 index 00000000..ebcf0f97 --- /dev/null +++ b/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java @@ -0,0 +1,1427 @@ +package org.qortal.crypto; + +import java.security.SecureRandom; + +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.digests.SHA512Digest; +import org.bouncycastle.math.ec.rfc7748.X25519; +import org.bouncycastle.math.ec.rfc7748.X25519Field; +import org.bouncycastle.math.raw.Interleave; +import org.bouncycastle.math.raw.Nat; +import org.bouncycastle.math.raw.Nat256; +import org.bouncycastle.util.Arrays; + +/** + * Duplicate of {@link org.bouncycastle.math.ec.rfc8032.Ed25519}, + * but with {@code private} modifiers replaced with {@code protected}, + * to allow for extension by {@link org.qortal.crypto.Qortal25519Extras}. + */ +public abstract class BouncyCastleEd25519 +{ + // -x^2 + y^2 == 1 + 0x52036CEE2B6FFE738CC740797779E89800700A4D4141D8AB75EB4DCA135978A3 * x^2 * y^2 + + public static final class Algorithm + { + public static final int Ed25519 = 0; + public static final int Ed25519ctx = 1; + public static final int Ed25519ph = 2; + } + + protected static class F extends X25519Field {}; + + protected static final long M08L = 0x000000FFL; + protected static final long M28L = 0x0FFFFFFFL; + protected static final long M32L = 0xFFFFFFFFL; + + protected static final int POINT_BYTES = 32; + protected static final int SCALAR_INTS = 8; + protected static final int SCALAR_BYTES = SCALAR_INTS * 4; + + public static final int PREHASH_SIZE = 64; + public static final int PUBLIC_KEY_SIZE = POINT_BYTES; + public static final int SECRET_KEY_SIZE = 32; + public static final int SIGNATURE_SIZE = POINT_BYTES + SCALAR_BYTES; + + // "SigEd25519 no Ed25519 collisions" + protected static final byte[] DOM2_PREFIX = new byte[]{ 0x53, 0x69, 0x67, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x20, 0x6e, 0x6f, 0x20, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, 0x20, 0x63, 0x6f, 0x6c, 0x6c, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x73 }; + + protected static final int[] P = new int[]{ 0xFFFFFFED, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0x7FFFFFFF }; + protected static final int[] L = new int[]{ 0x5CF5D3ED, 0x5812631A, 0xA2F79CD6, 0x14DEF9DE, 0x00000000, 0x00000000, + 0x00000000, 0x10000000 }; + + protected static final int L0 = 0xFCF5D3ED; // L0:26/-- + protected static final int L1 = 0x012631A6; // L1:24/22 + protected static final int L2 = 0x079CD658; // L2:27/-- + protected static final int L3 = 0xFF9DEA2F; // L3:23/-- + protected static final int L4 = 0x000014DF; // L4:12/11 + + protected static final int[] B_x = new int[]{ 0x0325D51A, 0x018B5823, 0x007B2C95, 0x0304A92D, 0x00D2598E, 0x01D6DC5C, + 0x01388C7F, 0x013FEC0A, 0x029E6B72, 0x0042D26D }; + protected static final int[] B_y = new int[]{ 0x02666658, 0x01999999, 0x00666666, 0x03333333, 0x00CCCCCC, 0x02666666, + 0x01999999, 0x00666666, 0x03333333, 0x00CCCCCC, }; + protected static final int[] C_d = new int[]{ 0x035978A3, 0x02D37284, 0x018AB75E, 0x026A0A0E, 0x0000E014, 0x0379E898, + 0x01D01E5D, 0x01E738CC, 0x03715B7F, 0x00A406D9 }; + protected static final int[] C_d2 = new int[]{ 0x02B2F159, 0x01A6E509, 0x01156EBD, 0x00D4141D, 0x0001C029, 0x02F3D130, + 0x03A03CBB, 0x01CE7198, 0x02E2B6FF, 0x00480DB3 }; + protected static final int[] C_d4 = new int[]{ 0x0165E2B2, 0x034DCA13, 0x002ADD7A, 0x01A8283B, 0x00038052, 0x01E7A260, + 0x03407977, 0x019CE331, 0x01C56DFF, 0x00901B67 }; + + protected static final int WNAF_WIDTH_BASE = 7; + + protected static final int PRECOMP_BLOCKS = 8; + protected static final int PRECOMP_TEETH = 4; + protected static final int PRECOMP_SPACING = 8; + protected static final int PRECOMP_POINTS = 1 << (PRECOMP_TEETH - 1); + protected static final int PRECOMP_MASK = PRECOMP_POINTS - 1; + + protected static final Object precompLock = new Object(); + // TODO[ed25519] Convert to PointPrecomp + protected static PointExt[] precompBaseTable = null; + protected static int[] precompBase = null; + + protected static class PointAccum + { + int[] x = F.create(); + int[] y = F.create(); + int[] z = F.create(); + int[] u = F.create(); + int[] v = F.create(); + } + + protected static class PointAffine + { + int[] x = F.create(); + int[] y = F.create(); + } + + protected static class PointExt + { + int[] x = F.create(); + int[] y = F.create(); + int[] z = F.create(); + int[] t = F.create(); + } + + protected static class PointPrecomp + { + int[] ypx_h = F.create(); + int[] ymx_h = F.create(); + int[] xyd = F.create(); + } + + protected static byte[] calculateS(byte[] r, byte[] k, byte[] s) + { + int[] t = new int[SCALAR_INTS * 2]; decodeScalar(r, 0, t); + int[] u = new int[SCALAR_INTS]; decodeScalar(k, 0, u); + int[] v = new int[SCALAR_INTS]; decodeScalar(s, 0, v); + + Nat256.mulAddTo(u, v, t); + + byte[] result = new byte[SCALAR_BYTES * 2]; + for (int i = 0; i < t.length; ++i) + { + encode32(t[i], result, i * 4); + } + return reduceScalar(result); + } + + protected static boolean checkContextVar(byte[] ctx , byte phflag) + { + return ctx == null && phflag == 0x00 + || ctx != null && ctx.length < 256; + } + + protected static int checkPoint(int[] x, int[] y) + { + int[] t = F.create(); + int[] u = F.create(); + int[] v = F.create(); + + F.sqr(x, u); + F.sqr(y, v); + F.mul(u, v, t); + F.sub(v, u, v); + F.mul(t, C_d, t); + F.addOne(t); + F.sub(t, v, t); + F.normalize(t); + + return F.isZero(t); + } + + protected static int checkPoint(int[] x, int[] y, int[] z) + { + int[] t = F.create(); + int[] u = F.create(); + int[] v = F.create(); + int[] w = F.create(); + + F.sqr(x, u); + F.sqr(y, v); + F.sqr(z, w); + F.mul(u, v, t); + F.sub(v, u, v); + F.mul(v, w, v); + F.sqr(w, w); + F.mul(t, C_d, t); + F.add(t, w, t); + F.sub(t, v, t); + F.normalize(t); + + return F.isZero(t); + } + + protected static boolean checkPointVar(byte[] p) + { + int[] t = new int[8]; + decode32(p, 0, t, 0, 8); + t[7] &= 0x7FFFFFFF; + return !Nat256.gte(t, P); + } + + protected static boolean checkScalarVar(byte[] s) + { + int[] n = new int[SCALAR_INTS]; + decodeScalar(s, 0, n); + return !Nat256.gte(n, L); + } + + protected static Digest createDigest() + { + return new SHA512Digest(); + } + + public static Digest createPrehash() + { + return createDigest(); + } + + protected static int decode24(byte[] bs, int off) + { + int n = bs[ off] & 0xFF; + n |= (bs[++off] & 0xFF) << 8; + n |= (bs[++off] & 0xFF) << 16; + return n; + } + + protected static int decode32(byte[] bs, int off) + { + int n = bs[off] & 0xFF; + n |= (bs[++off] & 0xFF) << 8; + n |= (bs[++off] & 0xFF) << 16; + n |= bs[++off] << 24; + return n; + } + + protected static void decode32(byte[] bs, int bsOff, int[] n, int nOff, int nLen) + { + for (int i = 0; i < nLen; ++i) + { + n[nOff + i] = decode32(bs, bsOff + i * 4); + } + } + + protected static boolean decodePointVar(byte[] p, int pOff, boolean negate, PointAffine r) + { + byte[] py = Arrays.copyOfRange(p, pOff, pOff + POINT_BYTES); + if (!checkPointVar(py)) + { + return false; + } + + int x_0 = (py[POINT_BYTES - 1] & 0x80) >>> 7; + py[POINT_BYTES - 1] &= 0x7F; + + F.decode(py, 0, r.y); + + int[] u = F.create(); + int[] v = F.create(); + + F.sqr(r.y, u); + F.mul(C_d, u, v); + F.subOne(u); + F.addOne(v); + + if (!F.sqrtRatioVar(u, v, r.x)) + { + return false; + } + + F.normalize(r.x); + if (x_0 == 1 && F.isZeroVar(r.x)) + { + return false; + } + + if (negate ^ (x_0 != (r.x[0] & 1))) + { + F.negate(r.x, r.x); + } + + return true; + } + + protected static void decodeScalar(byte[] k, int kOff, int[] n) + { + decode32(k, kOff, n, 0, SCALAR_INTS); + } + + protected static void dom2(Digest d, byte phflag, byte[] ctx) + { + if (ctx != null) + { + int n = DOM2_PREFIX.length; + byte[] t = new byte[n + 2 + ctx.length]; + System.arraycopy(DOM2_PREFIX, 0, t, 0, n); + t[n] = phflag; + t[n + 1] = (byte)ctx.length; + System.arraycopy(ctx, 0, t, n + 2, ctx.length); + + d.update(t, 0, t.length); + } + } + + protected static void encode24(int n, byte[] bs, int off) + { + bs[ off] = (byte)(n ); + bs[++off] = (byte)(n >>> 8); + bs[++off] = (byte)(n >>> 16); + } + + protected static void encode32(int n, byte[] bs, int off) + { + bs[ off] = (byte)(n ); + bs[++off] = (byte)(n >>> 8); + bs[++off] = (byte)(n >>> 16); + bs[++off] = (byte)(n >>> 24); + } + + protected static void encode56(long n, byte[] bs, int off) + { + encode32((int)n, bs, off); + encode24((int)(n >>> 32), bs, off + 4); + } + + protected static int encodePoint(PointAccum p, byte[] r, int rOff) + { + int[] x = F.create(); + int[] y = F.create(); + + F.inv(p.z, y); + F.mul(p.x, y, x); + F.mul(p.y, y, y); + F.normalize(x); + F.normalize(y); + + int result = checkPoint(x, y); + + F.encode(y, r, rOff); + r[rOff + POINT_BYTES - 1] |= ((x[0] & 1) << 7); + + return result; + } + + public static void generatePrivateKey(SecureRandom random, byte[] k) + { + random.nextBytes(k); + } + + public static void generatePublicKey(byte[] sk, int skOff, byte[] pk, int pkOff) + { + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + scalarMultBaseEncoded(s, pk, pkOff); + } + + protected static int getWindow4(int[] x, int n) + { + int w = n >>> 3, b = (n & 7) << 2; + return (x[w] >>> b) & 15; + } + + protected static byte[] getWnafVar(int[] n, int width) + { +// assert n[SCALAR_INTS - 1] >>> 28 == 0; + + int[] t = new int[SCALAR_INTS * 2]; + { + int tPos = t.length, c = 0; + int i = SCALAR_INTS; + while (--i >= 0) + { + int next = n[i]; + t[--tPos] = (next >>> 16) | (c << 16); + t[--tPos] = c = next; + } + } + + byte[] ws = new byte[253]; + + final int pow2 = 1 << width; + final int mask = pow2 - 1; + final int sign = pow2 >>> 1; + + int j = 0, carry = 0; + for (int i = 0; i < t.length; ++i, j -= 16) + { + int word = t[i]; + while (j < 16) + { + int word16 = word >>> j; + int bit = word16 & 1; + + if (bit == carry) + { + ++j; + continue; + } + + int digit = (word16 & mask) + carry; + carry = digit & sign; + digit -= (carry << 1); + carry >>>= (width - 1); + + ws[(i << 4) + j] = (byte)digit; + + j += width; + } + } + +// assert carry == 0; + + return ws; + } + + protected static void implSign(Digest d, byte[] h, byte[] s, byte[] pk, int pkOff, byte[] ctx, byte phflag, byte[] m, + int mOff, int mLen, byte[] sig, int sigOff) + { + dom2(d, phflag, ctx); + d.update(h, SCALAR_BYTES, SCALAR_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] r = reduceScalar(h); + byte[] R = new byte[POINT_BYTES]; + scalarMultBaseEncoded(r, R, 0); + + dom2(d, phflag, ctx); + d.update(R, 0, POINT_BYTES); + d.update(pk, pkOff, POINT_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + byte[] S = calculateS(r, k, s); + + System.arraycopy(R, 0, sig, sigOff, POINT_BYTES); + System.arraycopy(S, 0, sig, sigOff + POINT_BYTES, SCALAR_BYTES); + } + + protected static void implSign(byte[] sk, int skOff, byte[] ctx, byte phflag, byte[] m, int mOff, int mLen, + byte[] sig, int sigOff) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + byte[] pk = new byte[POINT_BYTES]; + scalarMultBaseEncoded(s, pk, 0); + + implSign(d, h, s, pk, 0, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + protected static void implSign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte phflag, + byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + implSign(d, h, s, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + protected static boolean implVerify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte phflag, byte[] m, + int mOff, int mLen) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + byte[] R = Arrays.copyOfRange(sig, sigOff, sigOff + POINT_BYTES); + byte[] S = Arrays.copyOfRange(sig, sigOff + POINT_BYTES, sigOff + SIGNATURE_SIZE); + + if (!checkPointVar(R)) + { + return false; + } + if (!checkScalarVar(S)) + { + return false; + } + + PointAffine pA = new PointAffine(); + if (!decodePointVar(pk, pkOff, true, pA)) + { + return false; + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + dom2(d, phflag, ctx); + d.update(R, 0, POINT_BYTES); + d.update(pk, pkOff, POINT_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + + int[] nS = new int[SCALAR_INTS]; + decodeScalar(S, 0, nS); + + int[] nA = new int[SCALAR_INTS]; + decodeScalar(k, 0, nA); + + PointAccum pR = new PointAccum(); + scalarMultStrausVar(nS, nA, pA, pR); + + byte[] check = new byte[POINT_BYTES]; + return 0 != encodePoint(pR, check, 0) && Arrays.areEqual(check, R); + } + + protected static void pointAdd(PointExt p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.apm(r.y, r.x, b, a); + F.apm(p.y, p.x, d, c); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(r.u, r.v, c); + F.mul(c, p.t, c); + F.mul(c, C_d2, c); + F.mul(r.z, p.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointAdd(PointExt p, PointExt r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = F.create(); + int[] f = F.create(); + int[] g = F.create(); + int[] h = F.create(); + + F.apm(p.y, p.x, b, a); + F.apm(r.y, r.x, d, c); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(p.t, r.t, c); + F.mul(c, C_d2, c); + F.mul(p.z, r.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + F.mul(e, h, r.t); + } + + protected static void pointAddVar(boolean negate, PointExt p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + int[] nc, nd, nf, ng; + if (negate) + { + nc = d; nd = c; nf = g; ng = f; + } + else + { + nc = c; nd = d; nf = f; ng = g; + } + + F.apm(r.y, r.x, b, a); + F.apm(p.y, p.x, nd, nc); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(r.u, r.v, c); + F.mul(c, p.t, c); + F.mul(c, C_d2, c); + F.mul(r.z, p.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, ng, nf); + F.carry(ng); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointAddVar(boolean negate, PointExt p, PointExt q, PointExt r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = F.create(); + int[] f = F.create(); + int[] g = F.create(); + int[] h = F.create(); + + int[] nc, nd, nf, ng; + if (negate) + { + nc = d; nd = c; nf = g; ng = f; + } + else + { + nc = c; nd = d; nf = f; ng = g; + } + + F.apm(p.y, p.x, b, a); + F.apm(q.y, q.x, nd, nc); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(p.t, q.t, c); + F.mul(c, C_d2, c); + F.mul(p.z, q.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, ng, nf); + F.carry(ng); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + F.mul(e, h, r.t); + } + + protected static void pointAddPrecomp(PointPrecomp p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.apm(r.y, r.x, b, a); + F.mul(a, p.ymx_h, a); + F.mul(b, p.ypx_h, b); + F.mul(r.u, r.v, c); + F.mul(c, p.xyd, c); + F.apm(b, a, h, e); + F.apm(r.z, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static PointExt pointCopy(PointAccum p) + { + PointExt r = new PointExt(); + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + F.copy(p.z, 0, r.z, 0); + F.mul(p.u, p.v, r.t); + return r; + } + + protected static PointExt pointCopy(PointAffine p) + { + PointExt r = new PointExt(); + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + pointExtendXY(r); + return r; + } + + protected static PointExt pointCopy(PointExt p) + { + PointExt r = new PointExt(); + pointCopy(p, r); + return r; + } + + protected static void pointCopy(PointAffine p, PointAccum r) + { + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + pointExtendXY(r); + } + + protected static void pointCopy(PointExt p, PointExt r) + { + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + F.copy(p.z, 0, r.z, 0); + F.copy(p.t, 0, r.t, 0); + } + + protected static void pointDouble(PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.sqr(r.x, a); + F.sqr(r.y, b); + F.sqr(r.z, c); + F.add(c, c, c); + F.apm(a, b, h, g); + F.add(r.x, r.y, e); + F.sqr(e, e); + F.sub(h, e, e); + F.add(c, g, f); + F.carry(f); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointExtendXY(PointAccum p) + { + F.one(p.z); + F.copy(p.x, 0, p.u, 0); + F.copy(p.y, 0, p.v, 0); + } + + protected static void pointExtendXY(PointExt p) + { + F.one(p.z); + F.mul(p.x, p.y, p.t); + } + + protected static void pointLookup(int block, int index, PointPrecomp p) + { +// assert 0 <= block && block < PRECOMP_BLOCKS; +// assert 0 <= index && index < PRECOMP_POINTS; + + int off = block * PRECOMP_POINTS * 3 * F.SIZE; + + for (int i = 0; i < PRECOMP_POINTS; ++i) + { + int cond = ((i ^ index) - 1) >> 31; + F.cmov(cond, precompBase, off, p.ypx_h, 0); off += F.SIZE; + F.cmov(cond, precompBase, off, p.ymx_h, 0); off += F.SIZE; + F.cmov(cond, precompBase, off, p.xyd, 0); off += F.SIZE; + } + } + + protected static void pointLookup(int[] x, int n, int[] table, PointExt r) + { + // TODO This method is currently hardcoded to 4-bit windows and 8 precomputed points + + int w = getWindow4(x, n); + + int sign = (w >>> (4 - 1)) ^ 1; + int abs = (w ^ -sign) & 7; + +// assert sign == 0 || sign == 1; +// assert 0 <= abs && abs < 8; + + for (int i = 0, off = 0; i < 8; ++i) + { + int cond = ((i ^ abs) - 1) >> 31; + F.cmov(cond, table, off, r.x, 0); off += F.SIZE; + F.cmov(cond, table, off, r.y, 0); off += F.SIZE; + F.cmov(cond, table, off, r.z, 0); off += F.SIZE; + F.cmov(cond, table, off, r.t, 0); off += F.SIZE; + } + + F.cnegate(sign, r.x); + F.cnegate(sign, r.t); + } + + protected static void pointLookup(int[] table, int index, PointExt r) + { + int off = F.SIZE * 4 * index; + + F.copy(table, off, r.x, 0); off += F.SIZE; + F.copy(table, off, r.y, 0); off += F.SIZE; + F.copy(table, off, r.z, 0); off += F.SIZE; + F.copy(table, off, r.t, 0); + } + + protected static int[] pointPrecompute(PointAffine p, int count) + { +// assert count > 0; + + PointExt q = pointCopy(p); + PointExt d = pointCopy(q); + pointAdd(q, d); + + int[] table = F.createTable(count * 4); + int off = 0; + + int i = 0; + for (;;) + { + F.copy(q.x, 0, table, off); off += F.SIZE; + F.copy(q.y, 0, table, off); off += F.SIZE; + F.copy(q.z, 0, table, off); off += F.SIZE; + F.copy(q.t, 0, table, off); off += F.SIZE; + + if (++i == count) + { + break; + } + + pointAdd(d, q); + } + + return table; + } + + protected static PointExt[] pointPrecomputeVar(PointExt p, int count) + { +// assert count > 0; + + PointExt d = new PointExt(); + pointAddVar(false, p, p, d); + + PointExt[] table = new PointExt[count]; + table[0] = pointCopy(p); + for (int i = 1; i < count; ++i) + { + pointAddVar(false, table[i - 1], d, table[i] = new PointExt()); + } + return table; + } + + protected static void pointSetNeutral(PointAccum p) + { + F.zero(p.x); + F.one(p.y); + F.one(p.z); + F.zero(p.u); + F.one(p.v); + } + + protected static void pointSetNeutral(PointExt p) + { + F.zero(p.x); + F.one(p.y); + F.one(p.z); + F.zero(p.t); + } + + public static void precompute() + { + synchronized (precompLock) + { + if (precompBase != null) + { + return; + } + + // Precomputed table for the base point in verification ladder + { + PointExt b = new PointExt(); + F.copy(B_x, 0, b.x, 0); + F.copy(B_y, 0, b.y, 0); + pointExtendXY(b); + + precompBaseTable = pointPrecomputeVar(b, 1 << (WNAF_WIDTH_BASE - 2)); + } + + PointAccum p = new PointAccum(); + F.copy(B_x, 0, p.x, 0); + F.copy(B_y, 0, p.y, 0); + pointExtendXY(p); + + precompBase = F.createTable(PRECOMP_BLOCKS * PRECOMP_POINTS * 3); + + int off = 0; + for (int b = 0; b < PRECOMP_BLOCKS; ++b) + { + PointExt[] ds = new PointExt[PRECOMP_TEETH]; + + PointExt sum = new PointExt(); + pointSetNeutral(sum); + + for (int t = 0; t < PRECOMP_TEETH; ++t) + { + PointExt q = pointCopy(p); + pointAddVar(true, sum, q, sum); + pointDouble(p); + + ds[t] = pointCopy(p); + + if (b + t != PRECOMP_BLOCKS + PRECOMP_TEETH - 2) + { + for (int s = 1; s < PRECOMP_SPACING; ++s) + { + pointDouble(p); + } + } + } + + PointExt[] points = new PointExt[PRECOMP_POINTS]; + int k = 0; + points[k++] = sum; + + for (int t = 0; t < (PRECOMP_TEETH - 1); ++t) + { + int size = 1 << t; + for (int j = 0; j < size; ++j, ++k) + { + pointAddVar(false, points[k - size], ds[t], points[k] = new PointExt()); + } + } + +// assert k == PRECOMP_POINTS; + + int[] cs = F.createTable(PRECOMP_POINTS); + + // TODO[ed25519] A single batch inversion across all blocks? + { + int[] u = F.create(); + F.copy(points[0].z, 0, u, 0); + F.copy(u, 0, cs, 0); + + int i = 0; + while (++i < PRECOMP_POINTS) + { + F.mul(u, points[i].z, u); + F.copy(u, 0, cs, i * F.SIZE); + } + + F.add(u, u, u); + F.invVar(u, u); + --i; + + int[] t = F.create(); + + while (i > 0) + { + int j = i--; + F.copy(cs, i * F.SIZE, t, 0); + F.mul(t, u, t); + F.copy(t, 0, cs, j * F.SIZE); + F.mul(u, points[j].z, u); + } + + F.copy(u, 0, cs, 0); + } + + for (int i = 0; i < PRECOMP_POINTS; ++i) + { + PointExt q = points[i]; + + int[] x = F.create(); + int[] y = F.create(); + +// F.add(q.z, q.z, x); +// F.invVar(x, y); + F.copy(cs, i * F.SIZE, y, 0); + + F.mul(q.x, y, x); + F.mul(q.y, y, y); + + PointPrecomp r = new PointPrecomp(); + F.apm(y, x, r.ypx_h, r.ymx_h); + F.mul(x, y, r.xyd); + F.mul(r.xyd, C_d4, r.xyd); + + F.normalize(r.ypx_h); + F.normalize(r.ymx_h); +// F.normalize(r.xyd); + + F.copy(r.ypx_h, 0, precompBase, off); off += F.SIZE; + F.copy(r.ymx_h, 0, precompBase, off); off += F.SIZE; + F.copy(r.xyd, 0, precompBase, off); off += F.SIZE; + } + } + +// assert off == precompBase.length; + } + } + + protected static void pruneScalar(byte[] n, int nOff, byte[] r) + { + System.arraycopy(n, nOff, r, 0, SCALAR_BYTES); + + r[0] &= 0xF8; + r[SCALAR_BYTES - 1] &= 0x7F; + r[SCALAR_BYTES - 1] |= 0x40; + } + + protected static byte[] reduceScalar(byte[] n) + { + long x00 = decode32(n, 0) & M32L; // x00:32/-- + long x01 = (decode24(n, 4) << 4) & M32L; // x01:28/-- + long x02 = decode32(n, 7) & M32L; // x02:32/-- + long x03 = (decode24(n, 11) << 4) & M32L; // x03:28/-- + long x04 = decode32(n, 14) & M32L; // x04:32/-- + long x05 = (decode24(n, 18) << 4) & M32L; // x05:28/-- + long x06 = decode32(n, 21) & M32L; // x06:32/-- + long x07 = (decode24(n, 25) << 4) & M32L; // x07:28/-- + long x08 = decode32(n, 28) & M32L; // x08:32/-- + long x09 = (decode24(n, 32) << 4) & M32L; // x09:28/-- + long x10 = decode32(n, 35) & M32L; // x10:32/-- + long x11 = (decode24(n, 39) << 4) & M32L; // x11:28/-- + long x12 = decode32(n, 42) & M32L; // x12:32/-- + long x13 = (decode24(n, 46) << 4) & M32L; // x13:28/-- + long x14 = decode32(n, 49) & M32L; // x14:32/-- + long x15 = (decode24(n, 53) << 4) & M32L; // x15:28/-- + long x16 = decode32(n, 56) & M32L; // x16:32/-- + long x17 = (decode24(n, 60) << 4) & M32L; // x17:28/-- + long x18 = n[63] & M08L; // x18:08/-- + long t; + +// x18 += (x17 >> 28); x17 &= M28L; + x09 -= x18 * L0; // x09:34/28 + x10 -= x18 * L1; // x10:33/30 + x11 -= x18 * L2; // x11:35/28 + x12 -= x18 * L3; // x12:32/31 + x13 -= x18 * L4; // x13:28/21 + + x17 += (x16 >> 28); x16 &= M28L; // x17:28/--, x16:28/-- + x08 -= x17 * L0; // x08:54/32 + x09 -= x17 * L1; // x09:52/51 + x10 -= x17 * L2; // x10:55/34 + x11 -= x17 * L3; // x11:51/36 + x12 -= x17 * L4; // x12:41/-- + +// x16 += (x15 >> 28); x15 &= M28L; + x07 -= x16 * L0; // x07:54/28 + x08 -= x16 * L1; // x08:54/53 + x09 -= x16 * L2; // x09:55/53 + x10 -= x16 * L3; // x10:55/52 + x11 -= x16 * L4; // x11:51/41 + + x15 += (x14 >> 28); x14 &= M28L; // x15:28/--, x14:28/-- + x06 -= x15 * L0; // x06:54/32 + x07 -= x15 * L1; // x07:54/53 + x08 -= x15 * L2; // x08:56/-- + x09 -= x15 * L3; // x09:55/54 + x10 -= x15 * L4; // x10:55/53 + +// x14 += (x13 >> 28); x13 &= M28L; + x05 -= x14 * L0; // x05:54/28 + x06 -= x14 * L1; // x06:54/53 + x07 -= x14 * L2; // x07:56/-- + x08 -= x14 * L3; // x08:56/51 + x09 -= x14 * L4; // x09:56/-- + + x13 += (x12 >> 28); x12 &= M28L; // x13:28/22, x12:28/-- + x04 -= x13 * L0; // x04:54/49 + x05 -= x13 * L1; // x05:54/53 + x06 -= x13 * L2; // x06:56/-- + x07 -= x13 * L3; // x07:56/52 + x08 -= x13 * L4; // x08:56/52 + + x12 += (x11 >> 28); x11 &= M28L; // x12:28/24, x11:28/-- + x03 -= x12 * L0; // x03:54/49 + x04 -= x12 * L1; // x04:54/51 + x05 -= x12 * L2; // x05:56/-- + x06 -= x12 * L3; // x06:56/52 + x07 -= x12 * L4; // x07:56/53 + + x11 += (x10 >> 28); x10 &= M28L; // x11:29/--, x10:28/-- + x02 -= x11 * L0; // x02:55/32 + x03 -= x11 * L1; // x03:55/-- + x04 -= x11 * L2; // x04:56/55 + x05 -= x11 * L3; // x05:56/52 + x06 -= x11 * L4; // x06:56/53 + + x10 += (x09 >> 28); x09 &= M28L; // x10:29/--, x09:28/-- + x01 -= x10 * L0; // x01:55/28 + x02 -= x10 * L1; // x02:55/54 + x03 -= x10 * L2; // x03:56/55 + x04 -= x10 * L3; // x04:57/-- + x05 -= x10 * L4; // x05:56/53 + + x08 += (x07 >> 28); x07 &= M28L; // x08:56/53, x07:28/-- + x09 += (x08 >> 28); x08 &= M28L; // x09:29/25, x08:28/-- + + t = x08 >>> 27; + x09 += t; // x09:29/26 + + x00 -= x09 * L0; // x00:55/53 + x01 -= x09 * L1; // x01:55/54 + x02 -= x09 * L2; // x02:57/-- + x03 -= x09 * L3; // x03:57/-- + x04 -= x09 * L4; // x04:57/42 + + x01 += (x00 >> 28); x00 &= M28L; + x02 += (x01 >> 28); x01 &= M28L; + x03 += (x02 >> 28); x02 &= M28L; + x04 += (x03 >> 28); x03 &= M28L; + x05 += (x04 >> 28); x04 &= M28L; + x06 += (x05 >> 28); x05 &= M28L; + x07 += (x06 >> 28); x06 &= M28L; + x08 += (x07 >> 28); x07 &= M28L; + x09 = (x08 >> 28); x08 &= M28L; + + x09 -= t; + +// assert x09 == 0L || x09 == -1L; + + x00 += x09 & L0; + x01 += x09 & L1; + x02 += x09 & L2; + x03 += x09 & L3; + x04 += x09 & L4; + + x01 += (x00 >> 28); x00 &= M28L; + x02 += (x01 >> 28); x01 &= M28L; + x03 += (x02 >> 28); x02 &= M28L; + x04 += (x03 >> 28); x03 &= M28L; + x05 += (x04 >> 28); x04 &= M28L; + x06 += (x05 >> 28); x05 &= M28L; + x07 += (x06 >> 28); x06 &= M28L; + x08 += (x07 >> 28); x07 &= M28L; + + byte[] r = new byte[SCALAR_BYTES]; + encode56(x00 | (x01 << 28), r, 0); + encode56(x02 | (x03 << 28), r, 7); + encode56(x04 | (x05 << 28), r, 14); + encode56(x06 | (x07 << 28), r, 21); + encode32((int)x08, r, 28); + return r; + } + + protected static void scalarMult(byte[] k, PointAffine p, PointAccum r) + { + int[] n = new int[SCALAR_INTS]; + decodeScalar(k, 0, n); + +// assert 0 == (n[0] & 7); +// assert 1 == n[SCALAR_INTS - 1] >>> 30; + + Nat.shiftDownBits(SCALAR_INTS, n, 3, 1); + + // Recode the scalar into signed-digit form + { + //int c1 = + Nat.cadd(SCALAR_INTS, ~n[0] & 1, n, L, n); //assert c1 == 0; + //int c2 = + Nat.shiftDownBit(SCALAR_INTS, n, 0); //assert c2 == (1 << 31); + } + +// assert 1 == n[SCALAR_INTS - 1] >>> 28; + + int[] table = pointPrecompute(p, 8); + PointExt q = new PointExt(); + + // Replace first 4 doublings (2^4 * P) with 1 addition (P + 15 * P) + pointCopy(p, r); + pointLookup(table, 7, q); + pointAdd(q, r); + + int w = 62; + for (;;) + { + pointLookup(n, w, table, q); + pointAdd(q, r); + + pointDouble(r); + pointDouble(r); + pointDouble(r); + + if (--w < 0) + { + break; + } + + pointDouble(r); + } + } + + protected static void scalarMultBase(byte[] k, PointAccum r) + { + precompute(); + + int[] n = new int[SCALAR_INTS]; + decodeScalar(k, 0, n); + + // Recode the scalar into signed-digit form, then group comb bits in each block + { + //int c1 = + Nat.cadd(SCALAR_INTS, ~n[0] & 1, n, L, n); //assert c1 == 0; + //int c2 = + Nat.shiftDownBit(SCALAR_INTS, n, 1); //assert c2 == (1 << 31); + + for (int i = 0; i < SCALAR_INTS; ++i) + { + n[i] = Interleave.shuffle2(n[i]); + } + } + + PointPrecomp p = new PointPrecomp(); + + pointSetNeutral(r); + + int cOff = (PRECOMP_SPACING - 1) * PRECOMP_TEETH; + for (;;) + { + for (int b = 0; b < PRECOMP_BLOCKS; ++b) + { + int w = n[b] >>> cOff; + int sign = (w >>> (PRECOMP_TEETH - 1)) & 1; + int abs = (w ^ -sign) & PRECOMP_MASK; + +// assert sign == 0 || sign == 1; +// assert 0 <= abs && abs < PRECOMP_POINTS; + + pointLookup(b, abs, p); + + F.cswap(sign, p.ypx_h, p.ymx_h); + F.cnegate(sign, p.xyd); + + pointAddPrecomp(p, r); + } + + if ((cOff -= PRECOMP_TEETH) < 0) + { + break; + } + + pointDouble(r); + } + } + + protected static void scalarMultBaseEncoded(byte[] k, byte[] r, int rOff) + { + PointAccum p = new PointAccum(); + scalarMultBase(k, p); + if (0 == encodePoint(p, r, rOff)) + { + throw new IllegalStateException(); + } + } + + /** + * NOTE: Only for use by X25519 + */ + public static void scalarMultBaseYZ(X25519.Friend friend, byte[] k, int kOff, int[] y, int[] z) + { + if (null == friend) + { + throw new NullPointerException("This method is only for use by X25519"); + } + + byte[] n = new byte[SCALAR_BYTES]; + pruneScalar(k, kOff, n); + + PointAccum p = new PointAccum(); + scalarMultBase(n, p); + if (0 == checkPoint(p.x, p.y, p.z)) + { + throw new IllegalStateException(); + } + F.copy(p.y, 0, y, 0); + F.copy(p.z, 0, z, 0); + } + + protected static void scalarMultStrausVar(int[] nb, int[] np, PointAffine p, PointAccum r) + { + precompute(); + + final int width = 5; + + byte[] ws_b = getWnafVar(nb, WNAF_WIDTH_BASE); + byte[] ws_p = getWnafVar(np, width); + + PointExt[] tp = pointPrecomputeVar(pointCopy(p), 1 << (width - 2)); + + pointSetNeutral(r); + + for (int bit = 252;;) + { + int wb = ws_b[bit]; + if (wb != 0) + { + int sign = wb >> 31; + int index = (wb ^ sign) >>> 1; + + pointAddVar((sign != 0), precompBaseTable[index], r); + } + + int wp = ws_p[bit]; + if (wp != 0) + { + int sign = wp >> 31; + int index = (wp ^ sign) >>> 1; + + pointAddVar((sign != 0), tp[index], r); + } + + if (--bit < 0) + { + break; + } + + pointDouble(r); + } + } + + public static void sign(byte[] sk, int skOff, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte[] ctx = null; + byte phflag = 0x00; + + implSign(sk, skOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte[] ctx = null; + byte phflag = 0x00; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] ctx, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte phflag = 0x00; + + implSign(sk, skOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte phflag = 0x00; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] ctx, byte[] ph, int phOff, byte[] sig, int sigOff) + { + byte phflag = 0x01; + + implSign(sk, skOff, ctx, phflag, ph, phOff, PREHASH_SIZE, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte[] ph, int phOff, byte[] sig, int sigOff) + { + byte phflag = 0x01; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, ph, phOff, PREHASH_SIZE, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] ctx, Digest ph, byte[] sig, int sigOff) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + implSign(sk, skOff, ctx, phflag, m, 0, m.length, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, Digest ph, byte[] sig, int sigOff) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, 0, m.length, sig, sigOff); + } + + public static boolean verify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] m, int mOff, int mLen) + { + byte[] ctx = null; + byte phflag = 0x00; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, mOff, mLen); + } + + public static boolean verify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte[] m, int mOff, int mLen) + { + byte phflag = 0x00; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, mOff, mLen); + } + + public static boolean verifyPrehash(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte[] ph, int phOff) + { + byte phflag = 0x01; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, ph, phOff, PREHASH_SIZE); + } + + public static boolean verifyPrehash(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, Digest ph) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, 0, m.length); + } +} diff --git a/src/main/java/org/qortal/crypto/Crypto.java b/src/main/java/org/qortal/crypto/Crypto.java index 5d91781c..75e5028e 100644 --- a/src/main/java/org/qortal/crypto/Crypto.java +++ b/src/main/java/org/qortal/crypto/Crypto.java @@ -253,6 +253,10 @@ public abstract class Crypto { return false; } + public static byte[] toPublicKey(byte[] privateKey) { + return new Ed25519PrivateKeyParameters(privateKey, 0).generatePublicKey().getEncoded(); + } + public static boolean verify(byte[] publicKey, byte[] signature, byte[] message) { try { return Ed25519.verify(signature, 0, publicKey, 0, message, 0, message.length); @@ -264,16 +268,24 @@ public abstract class Crypto { public static byte[] sign(Ed25519PrivateKeyParameters edPrivateKeyParams, byte[] message) { byte[] signature = new byte[SIGNATURE_LENGTH]; - edPrivateKeyParams.sign(Ed25519.Algorithm.Ed25519, edPrivateKeyParams.generatePublicKey(), null, message, 0, message.length, signature, 0); + edPrivateKeyParams.sign(Ed25519.Algorithm.Ed25519,null, message, 0, message.length, signature, 0); + + return signature; + } + + public static byte[] sign(byte[] privateKey, byte[] message) { + byte[] signature = new byte[SIGNATURE_LENGTH]; + + new Ed25519PrivateKeyParameters(privateKey, 0).sign(Ed25519.Algorithm.Ed25519,null, message, 0, message.length, signature, 0); return signature; } public static byte[] getSharedSecret(byte[] privateKey, byte[] publicKey) { - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(privateKey); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(privateKey); X25519PrivateKeyParameters xPrivateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); - byte[] x25519PublicKey = BouncyCastle25519.toX25519PublicKey(publicKey); + byte[] x25519PublicKey = Qortal25519Extras.toX25519PublicKey(publicKey); X25519PublicKeyParameters xPublicKeyParams = new X25519PublicKeyParameters(x25519PublicKey, 0); byte[] sharedSecret = new byte[SHARED_SECRET_LENGTH]; @@ -281,5 +293,4 @@ public abstract class Crypto { return sharedSecret; } - } diff --git a/src/main/java/org/qortal/crypto/Qortal25519Extras.java b/src/main/java/org/qortal/crypto/Qortal25519Extras.java new file mode 100644 index 00000000..42cca93e --- /dev/null +++ b/src/main/java/org/qortal/crypto/Qortal25519Extras.java @@ -0,0 +1,234 @@ +package org.qortal.crypto; + +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.digests.SHA512Digest; +import org.bouncycastle.math.ec.rfc7748.X25519; +import org.bouncycastle.math.ec.rfc7748.X25519Field; +import org.bouncycastle.math.ec.rfc8032.Ed25519; +import org.bouncycastle.math.raw.Nat256; + +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.Collection; + +/** + * Additions to BouncyCastle providing: + *

+ *
    + *
  • Ed25519 to X25519 key conversion
  • + *
  • Aggregate public keys
  • + *
  • Aggregate signatures
  • + *
+ */ +public abstract class Qortal25519Extras extends BouncyCastleEd25519 { + + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); + + public static byte[] toX25519PublicKey(byte[] ed25519PublicKey) { + int[] one = new int[X25519Field.SIZE]; + X25519Field.one(one); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(ed25519PublicKey, 0, true, pA)) + return null; + + int[] y = pA.y; + + int[] oneMinusY = new int[X25519Field.SIZE]; + X25519Field.sub(one, y, oneMinusY); + + int[] onePlusY = new int[X25519Field.SIZE]; + X25519Field.add(one, y, onePlusY); + + int[] oneMinusYInverted = new int[X25519Field.SIZE]; + X25519Field.inv(oneMinusY, oneMinusYInverted); + + int[] u = new int[X25519Field.SIZE]; + X25519Field.mul(onePlusY, oneMinusYInverted, u); + + X25519Field.normalize(u); + + byte[] x25519PublicKey = new byte[X25519.SCALAR_SIZE]; + X25519Field.encode(u, x25519PublicKey, 0); + + return x25519PublicKey; + } + + public static byte[] toX25519PrivateKey(byte[] ed25519PrivateKey) { + Digest d = Ed25519.createPrehash(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(ed25519PrivateKey, 0, ed25519PrivateKey.length); + d.doFinal(h, 0); + + byte[] s = new byte[X25519.SCALAR_SIZE]; + + System.arraycopy(h, 0, s, 0, X25519.SCALAR_SIZE); + s[0] &= 0xF8; + s[X25519.SCALAR_SIZE - 1] &= 0x7F; + s[X25519.SCALAR_SIZE - 1] |= 0x40; + + return s; + } + + // Mostly for test support + public static PointAccum newPointAccum() { + return new PointAccum(); + } + + public static byte[] aggregatePublicKeys(Collection publicKeys) { + PointAccum rAccum = null; + + for (byte[] publicKey : publicKeys) { + PointAffine pA = new PointAffine(); + if (!decodePointVar(publicKey, 0, false, pA)) + // Failed to decode + return null; + + if (rAccum == null) { + rAccum = new PointAccum(); + pointCopy(pA, rAccum); + } else { + pointAdd(pointCopy(pA), rAccum); + } + } + + byte[] publicKey = new byte[SCALAR_BYTES]; + if (0 == encodePoint(rAccum, publicKey, 0)) + // Failed to encode + return null; + + return publicKey; + } + + public static byte[] aggregateSignatures(Collection signatures) { + // Signatures are (R, s) + // R is a point + // s is a scalar + PointAccum rAccum = null; + int[] sAccum = new int[SCALAR_INTS]; + + byte[] rEncoded = new byte[POINT_BYTES]; + int[] sPart = new int[SCALAR_INTS]; + for (byte[] signature : signatures) { + System.arraycopy(signature,0, rEncoded, 0, rEncoded.length); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(rEncoded, 0, false, pA)) + // Failed to decode + return null; + + if (rAccum == null) { + rAccum = new PointAccum(); + pointCopy(pA, rAccum); + + decode32(signature, rEncoded.length, sAccum, 0, SCALAR_INTS); + } else { + pointAdd(pointCopy(pA), rAccum); + + decode32(signature, rEncoded.length, sPart, 0, SCALAR_INTS); + Nat256.addTo(sPart, sAccum); + + // "mod L" on sAccum + if (Nat256.gte(sAccum, L)) + Nat256.subFrom(L, sAccum); + } + } + + byte[] signature = new byte[SIGNATURE_SIZE]; + if (0 == encodePoint(rAccum, signature, 0)) + // Failed to encode + return null; + + for (int i = 0; i < sAccum.length; ++i) { + encode32(sAccum[i], signature, POINT_BYTES + i * 4); + } + + return signature; + } + + public static byte[] signForAggregation(byte[] privateKey, byte[] message) { + // Very similar to BouncyCastle's implementation except we use secure random nonce and different hash + Digest d = new SHA512Digest(); + byte[] h = new byte[d.getDigestSize()]; + + d.reset(); + d.update(privateKey, 0, privateKey.length); + d.doFinal(h, 0); + + byte[] sH = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, sH); + + byte[] publicKey = new byte[SCALAR_BYTES]; + scalarMultBaseEncoded(sH, publicKey, 0); + + byte[] rSeed = new byte[d.getDigestSize()]; + SECURE_RANDOM.nextBytes(rSeed); + + byte[] r = new byte[SCALAR_BYTES]; + pruneScalar(rSeed, 0, r); + + byte[] R = new byte[POINT_BYTES]; + scalarMultBaseEncoded(r, R, 0); + + d.reset(); + d.update(message, 0, message.length); + d.doFinal(h, 0); + byte[] k = reduceScalar(h); + + byte[] s = calculateS(r, k, sH); + + byte[] signature = new byte[SIGNATURE_SIZE]; + System.arraycopy(R, 0, signature, 0, POINT_BYTES); + System.arraycopy(s, 0, signature, POINT_BYTES, SCALAR_BYTES); + + return signature; + } + + public static boolean verifyAggregated(byte[] publicKey, byte[] signature, byte[] message) { + byte[] R = Arrays.copyOfRange(signature, 0, POINT_BYTES); + + byte[] s = Arrays.copyOfRange(signature, POINT_BYTES, POINT_BYTES + SCALAR_BYTES); + + if (!checkPointVar(R)) + // R out of bounds + return false; + + if (!checkScalarVar(s)) + // s out of bounds + return false; + + byte[] S = new byte[POINT_BYTES]; + scalarMultBaseEncoded(s, S, 0); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(publicKey, 0, true, pA)) + // Failed to decode + return false; + + Digest d = new SHA512Digest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(message, 0, message.length); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + + int[] nS = new int[SCALAR_INTS]; + decodeScalar(s, 0, nS); + + int[] nA = new int[SCALAR_INTS]; + decodeScalar(k, 0, nA); + + /*PointAccum*/ + PointAccum pR = new PointAccum(); + scalarMultStrausVar(nS, nA, pA, pR); + + byte[] check = new byte[POINT_BYTES]; + if (0 == encodePoint(pR, check, 0)) + // Failed to encode + return false; + + return Arrays.equals(check, R); + } +} diff --git a/src/main/java/org/qortal/data/network/OnlineAccountData.java b/src/main/java/org/qortal/data/network/OnlineAccountData.java index 15792307..28c454b5 100644 --- a/src/main/java/org/qortal/data/network/OnlineAccountData.java +++ b/src/main/java/org/qortal/data/network/OnlineAccountData.java @@ -5,6 +5,7 @@ import java.util.Arrays; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlTransient; import org.qortal.account.PublicKeyAccount; @@ -16,6 +17,9 @@ public class OnlineAccountData { protected byte[] signature; protected byte[] publicKey; + @XmlTransient + private int hash; + // Constructors // necessary for JAXB serialization @@ -62,20 +66,23 @@ public class OnlineAccountData { if (otherOnlineAccountData.timestamp != this.timestamp) return false; - // Signature more likely to be unique than public key - if (!Arrays.equals(otherOnlineAccountData.signature, this.signature)) - return false; - if (!Arrays.equals(otherOnlineAccountData.publicKey, this.publicKey)) return false; + // We don't compare signature because it's not our remit to verify and newer aggregate signatures use random nonces + return true; } @Override public int hashCode() { - // Pretty lazy implementation - return (int) this.timestamp; + int h = this.hash; + if (h == 0) { + this.hash = h = Long.hashCode(this.timestamp) + ^ Arrays.hashCode(this.publicKey); + // We don't use signature because newer aggregate signatures use random nonces + } + return h; } } diff --git a/src/main/java/org/qortal/network/Network.java b/src/main/java/org/qortal/network/Network.java index 6bc58bb4..10f02d52 100644 --- a/src/main/java/org/qortal/network/Network.java +++ b/src/main/java/org/qortal/network/Network.java @@ -469,6 +469,8 @@ public class Network { class NetworkProcessor extends ExecuteProduceConsume { + private final Logger LOGGER = LogManager.getLogger(NetworkProcessor.class); + private final AtomicLong nextConnectTaskTimestamp = new AtomicLong(0L); // ms - try first connect once NTP syncs private final AtomicLong nextBroadcastTimestamp = new AtomicLong(0L); // ms - try first broadcast once NTP syncs @@ -1373,17 +1375,26 @@ public class Network { // We attempted to connect within the last day // but we last managed to connect over a week ago. Predicate isNotOldPeer = peerData -> { - if (peerData.getLastAttempted() == null - || peerData.getLastAttempted() < now - OLD_PEER_ATTEMPTED_PERIOD) { + + // First check if there was a connection attempt within the last day + if (peerData.getLastAttempted() != null + && peerData.getLastAttempted() > now - OLD_PEER_ATTEMPTED_PERIOD) { + + // There was, so now check if we had a successful connection in the last 7 days + if (peerData.getLastConnected() != null + && peerData.getLastConnected() > now - OLD_PEER_CONNECTION_PERIOD) { + + // We did, so this is NOT an 'old' peer + return true; + } + + // Last successful connection was more than 1 week ago - this is an 'old' peer + return false; + } + else { + // Best to wait until we have a connection attempt - assume not an 'old' peer until then return true; } - - if (peerData.getLastConnected() == null - || peerData.getLastConnected() > now - OLD_PEER_CONNECTION_PERIOD) { - return true; - } - - return false; }; // Disregard peers that are NOT 'old' diff --git a/src/main/java/org/qortal/network/message/BlockMessage.java b/src/main/java/org/qortal/network/message/BlockMessage.java index 2dd4db87..0a8a23de 100644 --- a/src/main/java/org/qortal/network/message/BlockMessage.java +++ b/src/main/java/org/qortal/network/message/BlockMessage.java @@ -9,6 +9,7 @@ import org.qortal.data.at.ATStateData; import org.qortal.data.block.BlockData; import org.qortal.data.transaction.TransactionData; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.transform.block.BlockTransformer; import org.qortal.utils.Triple; @@ -46,12 +47,12 @@ public class BlockMessage extends Message { try { int height = byteBuffer.getInt(); - Triple, List> blockInfo = BlockTransformer.fromByteBuffer(byteBuffer); + BlockTransformation blockTransformation = BlockTransformer.fromByteBuffer(byteBuffer); - BlockData blockData = blockInfo.getA(); + BlockData blockData = blockTransformation.getBlockData(); blockData.setHeight(height); - return new BlockMessage(id, blockData, blockInfo.getB(), blockInfo.getC()); + return new BlockMessage(id, blockData, blockTransformation.getTransactions(), blockTransformation.getAtStates()); } catch (TransformationException e) { LOGGER.info(String.format("Received garbled BLOCK message: %s", e.getMessage())); throw new MessageException(e.getMessage(), e); diff --git a/src/main/java/org/qortal/network/message/BlockV2Message.java b/src/main/java/org/qortal/network/message/BlockV2Message.java new file mode 100644 index 00000000..7aed65b4 --- /dev/null +++ b/src/main/java/org/qortal/network/message/BlockV2Message.java @@ -0,0 +1,87 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Ints; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.block.Block; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; +import org.qortal.transform.block.BlockTransformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; + +public class BlockV2Message extends Message { + + private static final Logger LOGGER = LogManager.getLogger(BlockV2Message.class); + public static final long MIN_PEER_VERSION = 0x300030003L; // 3.3.3 + + private BlockData blockData; + private List transactions; + private byte[] atStatesHash; + + public BlockV2Message(Block block) throws TransformationException { + super(MessageType.BLOCK_V2); + + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + try { + bytes.write(Ints.toByteArray(block.getBlockData().getHeight())); + + bytes.write(BlockTransformer.toBytesV2(block)); + } catch (IOException e) { + throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream"); + } + + this.dataBytes = bytes.toByteArray(); + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + public BlockV2Message(byte[] cachedBytes) { + super(MessageType.BLOCK_V2); + + this.dataBytes = cachedBytes; + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + private BlockV2Message(int id, BlockData blockData, List transactions, byte[] atStatesHash) { + super(id, MessageType.BLOCK_V2); + + this.blockData = blockData; + this.transactions = transactions; + this.atStatesHash = atStatesHash; + } + + public BlockData getBlockData() { + return this.blockData; + } + + public List getTransactions() { + return this.transactions; + } + + public byte[] getAtStatesHash() { + return this.atStatesHash; + } + + public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) throws MessageException { + try { + int height = byteBuffer.getInt(); + + BlockTransformation blockTransformation = BlockTransformer.fromByteBufferV2(byteBuffer); + + BlockData blockData = blockTransformation.getBlockData(); + blockData.setHeight(height); + + return new BlockV2Message(id, blockData, blockTransformation.getTransactions(), blockTransformation.getAtStatesHash()); + } catch (TransformationException e) { + LOGGER.info(String.format("Received garbled BLOCK_V2 message: %s", e.getMessage())); + throw new MessageException(e.getMessage(), e); + } + } + +} diff --git a/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java b/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java new file mode 100644 index 00000000..66c7c47a --- /dev/null +++ b/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java @@ -0,0 +1,112 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Longs; +import org.qortal.transform.Transformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.*; + +/** + * For requesting online accounts info from remote peer, given our list of online accounts. + *

+ * Different format to V1 and V2:
+ *
    + *
  • V1 is: number of entries, then timestamp + pubkey for each entry
  • + *
  • V2 is: groups of: number of entries, timestamp, then pubkey for each entry
  • + *
  • V3 is: groups of: timestamp, number of entries (one per leading byte), then hash(pubkeys) for each entry
  • + *
+ *

+ * End + */ +public class GetOnlineAccountsV3Message extends Message { + + private static final Map> EMPTY_ONLINE_ACCOUNTS = Collections.emptyMap(); + private Map> hashesByTimestampThenByte; + + public GetOnlineAccountsV3Message(Map> hashesByTimestampThenByte) { + super(MessageType.GET_ONLINE_ACCOUNTS_V3); + + // If we don't have ANY online accounts then it's an easier construction... + if (hashesByTimestampThenByte.isEmpty()) { + this.dataBytes = EMPTY_DATA_BYTES; + return; + } + + // We should know exactly how many bytes to allocate now + int byteSize = hashesByTimestampThenByte.size() * (Transformer.TIMESTAMP_LENGTH + Transformer.BYTE_LENGTH); + + byteSize += hashesByTimestampThenByte.values() + .stream() + .mapToInt(map -> map.size() * Transformer.PUBLIC_KEY_LENGTH) + .sum(); + + ByteArrayOutputStream bytes = new ByteArrayOutputStream(byteSize); + + // Warning: no double-checking/fetching! We must be ConcurrentMap compatible. + // So no contains() then get() or multiple get()s on the same key/map. + try { + for (var outerMapEntry : hashesByTimestampThenByte.entrySet()) { + bytes.write(Longs.toByteArray(outerMapEntry.getKey())); + + var innerMap = outerMapEntry.getValue(); + + // Number of entries: 1 - 256, where 256 is represented by 0 + bytes.write(innerMap.size() & 0xFF); + + for (byte[] hashBytes : innerMap.values()) { + bytes.write(hashBytes); + } + } + } catch (IOException e) { + throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream"); + } + + this.dataBytes = bytes.toByteArray(); + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + private GetOnlineAccountsV3Message(int id, Map> hashesByTimestampThenByte) { + super(id, MessageType.GET_ONLINE_ACCOUNTS_V3); + + this.hashesByTimestampThenByte = hashesByTimestampThenByte; + } + + public Map> getHashesByTimestampThenByte() { + return this.hashesByTimestampThenByte; + } + + public static Message fromByteBuffer(int id, ByteBuffer bytes) { + // 'empty' case + if (!bytes.hasRemaining()) { + return new GetOnlineAccountsV3Message(id, EMPTY_ONLINE_ACCOUNTS); + } + + Map> hashesByTimestampThenByte = new HashMap<>(); + + while (bytes.hasRemaining()) { + long timestamp = bytes.getLong(); + + int hashCount = bytes.get(); + if (hashCount <= 0) + // 256 is represented by 0. + // Also converts negative signed value (e.g. -1) to proper positive unsigned value (255) + hashCount += 256; + + Map hashesByByte = new HashMap<>(); + + for (int i = 0; i < hashCount; ++i) { + byte[] publicKeyHash = new byte[Transformer.PUBLIC_KEY_LENGTH]; + bytes.get(publicKeyHash); + + hashesByByte.put(publicKeyHash[0], publicKeyHash); + } + + hashesByTimestampThenByte.put(timestamp, hashesByByte); + } + + return new GetOnlineAccountsV3Message(id, hashesByTimestampThenByte); + } + +} diff --git a/src/main/java/org/qortal/network/message/Message.java b/src/main/java/org/qortal/network/message/Message.java index f752b5b9..d8467d90 100644 --- a/src/main/java/org/qortal/network/message/Message.java +++ b/src/main/java/org/qortal/network/message/Message.java @@ -46,6 +46,7 @@ public abstract class Message { private static final int MAX_DATA_SIZE = 10 * 1024 * 1024; // 10MB protected static final byte[] EMPTY_DATA_BYTES = new byte[0]; + private static final ByteBuffer EMPTY_READ_ONLY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_DATA_BYTES).asReadOnlyBuffer(); protected int id; protected final MessageType type; @@ -126,7 +127,7 @@ public abstract class Message { if (dataSize > 0 && dataSize + CHECKSUM_LENGTH > readOnlyBuffer.remaining()) return null; - ByteBuffer dataSlice = null; + ByteBuffer dataSlice = EMPTY_READ_ONLY_BYTE_BUFFER; if (dataSize > 0) { byte[] expectedChecksum = new byte[CHECKSUM_LENGTH]; readOnlyBuffer.get(expectedChecksum); diff --git a/src/main/java/org/qortal/network/message/MessageType.java b/src/main/java/org/qortal/network/message/MessageType.java index a2637dfd..de711dc3 100644 --- a/src/main/java/org/qortal/network/message/MessageType.java +++ b/src/main/java/org/qortal/network/message/MessageType.java @@ -34,6 +34,7 @@ public enum MessageType { BLOCK(50, BlockMessage::fromByteBuffer), GET_BLOCK(51, GetBlockMessage::fromByteBuffer), + BLOCK_V2(52, BlockV2Message::fromByteBuffer), SIGNATURES(60, SignaturesMessage::fromByteBuffer), GET_SIGNATURES_V2(61, GetSignaturesV2Message::fromByteBuffer), @@ -45,6 +46,8 @@ public enum MessageType { GET_ONLINE_ACCOUNTS(81, GetOnlineAccountsMessage::fromByteBuffer), ONLINE_ACCOUNTS_V2(82, OnlineAccountsV2Message::fromByteBuffer), GET_ONLINE_ACCOUNTS_V2(83, GetOnlineAccountsV2Message::fromByteBuffer), + // ONLINE_ACCOUNTS_V3(84, OnlineAccountsV3Message::fromByteBuffer), + GET_ONLINE_ACCOUNTS_V3(85, GetOnlineAccountsV3Message::fromByteBuffer), ARBITRARY_DATA(90, ArbitraryDataMessage::fromByteBuffer), GET_ARBITRARY_DATA(91, GetArbitraryDataMessage::fromByteBuffer), diff --git a/src/main/java/org/qortal/repository/BlockArchiveReader.java b/src/main/java/org/qortal/repository/BlockArchiveReader.java index 83508152..311d21c7 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveReader.java +++ b/src/main/java/org/qortal/repository/BlockArchiveReader.java @@ -9,6 +9,7 @@ import org.qortal.data.block.BlockData; import org.qortal.data.transaction.TransactionData; import org.qortal.settings.Settings; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.transform.block.BlockTransformer; import org.qortal.utils.Triple; @@ -66,7 +67,7 @@ public class BlockArchiveReader { this.fileListCache = Map.copyOf(map); } - public Triple, List> fetchBlockAtHeight(int height) { + public BlockTransformation fetchBlockAtHeight(int height) { if (this.fileListCache == null) { this.fetchFileList(); } @@ -77,13 +78,13 @@ public class BlockArchiveReader { } ByteBuffer byteBuffer = ByteBuffer.wrap(serializedBytes); - Triple, List> blockInfo = null; + BlockTransformation blockInfo = null; try { blockInfo = BlockTransformer.fromByteBuffer(byteBuffer); - if (blockInfo != null && blockInfo.getA() != null) { + if (blockInfo != null && blockInfo.getBlockData() != null) { // Block height is stored outside of the main serialized bytes, so it // won't be set automatically. - blockInfo.getA().setHeight(height); + blockInfo.getBlockData().setHeight(height); } } catch (TransformationException e) { return null; @@ -91,8 +92,7 @@ public class BlockArchiveReader { return blockInfo; } - public Triple, List> fetchBlockWithSignature( - byte[] signature, Repository repository) { + public BlockTransformation fetchBlockWithSignature(byte[] signature, Repository repository) { if (this.fileListCache == null) { this.fetchFileList(); @@ -105,13 +105,12 @@ public class BlockArchiveReader { return null; } - public List, List>> fetchBlocksFromRange( - int startHeight, int endHeight) { + public List fetchBlocksFromRange(int startHeight, int endHeight) { - List, List>> blockInfoList = new ArrayList<>(); + List blockInfoList = new ArrayList<>(); for (int height = startHeight; height <= endHeight; height++) { - Triple, List> blockInfo = this.fetchBlockAtHeight(height); + BlockTransformation blockInfo = this.fetchBlockAtHeight(height); if (blockInfo == null) { return blockInfoList; } diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java index d8738f0d..cc7e1611 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java @@ -1,16 +1,13 @@ package org.qortal.repository.hsqldb; -import org.qortal.api.ApiError; -import org.qortal.api.ApiExceptionFactory; import org.qortal.api.model.BlockSignerSummary; -import org.qortal.block.Block; import org.qortal.data.block.BlockArchiveData; import org.qortal.data.block.BlockData; import org.qortal.data.block.BlockSummaryData; import org.qortal.repository.BlockArchiveReader; import org.qortal.repository.BlockArchiveRepository; import org.qortal.repository.DataException; -import org.qortal.utils.Triple; +import org.qortal.transform.block.BlockTransformation; import java.sql.ResultSet; import java.sql.SQLException; @@ -29,11 +26,11 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository { @Override public BlockData fromSignature(byte[] signature) throws DataException { - Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockWithSignature(signature, this.repository); - if (blockInfo != null) { - return (BlockData) blockInfo.getA(); - } - return null; + BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockWithSignature(signature, this.repository); + if (blockInfo == null) + return null; + + return blockInfo.getBlockData(); } @Override @@ -47,11 +44,11 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository { @Override public BlockData fromHeight(int height) throws DataException { - Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height); - if (blockInfo != null) { - return (BlockData) blockInfo.getA(); - } - return null; + BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height); + if (blockInfo == null) + return null; + + return blockInfo.getBlockData(); } @Override @@ -79,9 +76,9 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository { int height = referenceBlock.getHeight(); if (height > 0) { // Request the block at height + 1 - Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height + 1); + BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height + 1); if (blockInfo != null) { - return (BlockData) blockInfo.getA(); + return blockInfo.getBlockData(); } } } diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java index 787622e5..71e5897d 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBDatabaseUpdates.java @@ -964,6 +964,11 @@ public class HSQLDBDatabaseUpdates { stmt.execute("DROP TABLE ArbitraryPeers"); break; + case 42: + // We need more space for online accounts + stmt.execute("ALTER TABLE Blocks ALTER COLUMN online_accounts SET DATA TYPE VARBINARY(10240)"); + break; + default: // nothing to do return false; diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java index 61f4b76f..6ec30e20 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java @@ -23,7 +23,6 @@ import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.qortal.account.PrivateKeyAccount; import org.qortal.crypto.Crypto; import org.qortal.globalization.Translator; import org.qortal.gui.SysTray; @@ -1003,7 +1002,7 @@ public class HSQLDBRepository implements Repository { if (privateKey == null) return null; - return PrivateKeyAccount.toPublicKey(privateKey); + return Crypto.toPublicKey(privateKey); } public static String ed25519PublicKeyToAddress(byte[] publicKey) { diff --git a/src/main/java/org/qortal/settings/Settings.java b/src/main/java/org/qortal/settings/Settings.java index a40df9b4..e0ed7306 100644 --- a/src/main/java/org/qortal/settings/Settings.java +++ b/src/main/java/org/qortal/settings/Settings.java @@ -203,7 +203,7 @@ public class Settings { private int maxRetries = 2; /** Minimum peer version number required in order to sync with them */ - private String minPeerVersion = "3.1.0"; + private String minPeerVersion = "3.3.7"; /** Whether to allow connections with peers below minPeerVersion * If true, we won't sync with them but they can still sync with us, and will show in the peers list * If false, sync will be blocked both ways, and they will not appear in the peers list */ diff --git a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java index 8abebe5c..ca5ce517 100644 --- a/src/main/java/org/qortal/transaction/ArbitraryTransaction.java +++ b/src/main/java/org/qortal/transaction/ArbitraryTransaction.java @@ -6,6 +6,7 @@ import java.util.Objects; import java.util.stream.Collectors; import org.qortal.account.Account; +import org.qortal.block.BlockChain; import org.qortal.controller.arbitrary.ArbitraryDataManager; import org.qortal.controller.arbitrary.ArbitraryDataStorageManager; import org.qortal.crypto.Crypto; @@ -19,6 +20,7 @@ import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.arbitrary.ArbitraryDataFile; import org.qortal.transform.TransformationException; +import org.qortal.transform.Transformer; import org.qortal.transform.transaction.ArbitraryTransactionTransformer; import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.utils.ArbitraryTransactionUtils; @@ -86,6 +88,14 @@ public class ArbitraryTransaction extends Transaction { @Override public boolean hasValidReference() throws DataException { // We shouldn't really get this far, but just in case: + + // Disable reference checking after feature trigger timestamp + if (this.arbitraryTransactionData.getTimestamp() >= BlockChain.getInstance().getDisableReferenceTimestamp()) { + // Allow any value as long as it is the correct length + return this.arbitraryTransactionData.getReference() != null && + this.arbitraryTransactionData.getReference().length == Transformer.SIGNATURE_LENGTH; + } + if (this.arbitraryTransactionData.getReference() == null) { return false; } diff --git a/src/main/java/org/qortal/transaction/AtTransaction.java b/src/main/java/org/qortal/transaction/AtTransaction.java index c570bb65..b07f006b 100644 --- a/src/main/java/org/qortal/transaction/AtTransaction.java +++ b/src/main/java/org/qortal/transaction/AtTransaction.java @@ -5,6 +5,7 @@ import java.util.List; import org.qortal.account.Account; import org.qortal.asset.Asset; +import org.qortal.block.BlockChain; import org.qortal.crypto.Crypto; import org.qortal.data.asset.AssetData; import org.qortal.data.transaction.ATTransactionData; @@ -12,6 +13,7 @@ import org.qortal.data.transaction.TransactionData; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.transform.TransformationException; +import org.qortal.transform.Transformer; import org.qortal.transform.transaction.AtTransactionTransformer; import org.qortal.utils.Amounts; @@ -75,6 +77,13 @@ public class AtTransaction extends Transaction { @Override public boolean hasValidReference() throws DataException { + // Disable reference checking after feature trigger timestamp + if (this.atTransactionData.getTimestamp() >= BlockChain.getInstance().getDisableReferenceTimestamp()) { + // Allow any value as long as it is the correct length + return this.atTransactionData.getReference() != null && + this.atTransactionData.getReference().length == Transformer.SIGNATURE_LENGTH; + } + // Check reference is correct, using AT account, not transaction creator which is null account Account atAccount = getATAccount(); return Arrays.equals(atAccount.getLastReference(), atTransactionData.getReference()); diff --git a/src/main/java/org/qortal/transaction/MessageTransaction.java b/src/main/java/org/qortal/transaction/MessageTransaction.java index d02b6fdd..a9d3a01c 100644 --- a/src/main/java/org/qortal/transaction/MessageTransaction.java +++ b/src/main/java/org/qortal/transaction/MessageTransaction.java @@ -8,6 +8,7 @@ import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; import org.qortal.account.PublicKeyAccount; import org.qortal.asset.Asset; +import org.qortal.block.BlockChain; import org.qortal.crypto.Crypto; import org.qortal.crypto.MemoryPoW; import org.qortal.data.PaymentData; @@ -20,6 +21,7 @@ import org.qortal.repository.DataException; import org.qortal.repository.GroupRepository; import org.qortal.repository.Repository; import org.qortal.transform.TransformationException; +import org.qortal.transform.Transformer; import org.qortal.transform.transaction.ChatTransactionTransformer; import org.qortal.transform.transaction.MessageTransactionTransformer; import org.qortal.transform.transaction.TransactionTransformer; @@ -163,6 +165,14 @@ public class MessageTransaction extends Transaction { @Override public boolean hasValidReference() throws DataException { // We shouldn't really get this far, but just in case: + + // Disable reference checking after feature trigger timestamp + if (this.messageTransactionData.getTimestamp() >= BlockChain.getInstance().getDisableReferenceTimestamp()) { + // Allow any value as long as it is the correct length + return this.messageTransactionData.getReference() != null && + this.messageTransactionData.getReference().length == Transformer.SIGNATURE_LENGTH; + } + if (this.messageTransactionData.getReference() == null) return false; diff --git a/src/main/java/org/qortal/transaction/Transaction.java b/src/main/java/org/qortal/transaction/Transaction.java index a1fd6baa..b56d48cf 100644 --- a/src/main/java/org/qortal/transaction/Transaction.java +++ b/src/main/java/org/qortal/transaction/Transaction.java @@ -31,6 +31,7 @@ import org.qortal.repository.GroupRepository; import org.qortal.repository.Repository; import org.qortal.settings.Settings; import org.qortal.transform.TransformationException; +import org.qortal.transform.Transformer; import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.utils.NTP; @@ -905,6 +906,13 @@ public abstract class Transaction { * @throws DataException */ public boolean hasValidReference() throws DataException { + // Disable reference checking after feature trigger timestamp + if (this.transactionData.getTimestamp() >= BlockChain.getInstance().getDisableReferenceTimestamp()) { + // Allow any value as long as it is the correct length + return this.transactionData.getReference() != null && + this.transactionData.getReference().length == Transformer.SIGNATURE_LENGTH; + } + Account creator = getCreator(); return Arrays.equals(transactionData.getReference(), creator.getLastReference()); diff --git a/src/main/java/org/qortal/transform/block/BlockTransformation.java b/src/main/java/org/qortal/transform/block/BlockTransformation.java new file mode 100644 index 00000000..6aee8cf9 --- /dev/null +++ b/src/main/java/org/qortal/transform/block/BlockTransformation.java @@ -0,0 +1,44 @@ +package org.qortal.transform.block; + +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; + +import java.util.List; + +public class BlockTransformation { + private final BlockData blockData; + private final List transactions; + private final List atStates; + private final byte[] atStatesHash; + + /*package*/ BlockTransformation(BlockData blockData, List transactions, List atStates) { + this.blockData = blockData; + this.transactions = transactions; + this.atStates = atStates; + this.atStatesHash = null; + } + + /*package*/ BlockTransformation(BlockData blockData, List transactions, byte[] atStatesHash) { + this.blockData = blockData; + this.transactions = transactions; + this.atStates = null; + this.atStatesHash = atStatesHash; + } + + public BlockData getBlockData() { + return blockData; + } + + public List getTransactions() { + return transactions; + } + + public List getAtStates() { + return atStates; + } + + public byte[] getAtStatesHash() { + return atStatesHash; + } +} diff --git a/src/main/java/org/qortal/transform/block/BlockTransformer.java b/src/main/java/org/qortal/transform/block/BlockTransformer.java index cce3e7d7..b61d6900 100644 --- a/src/main/java/org/qortal/transform/block/BlockTransformer.java +++ b/src/main/java/org/qortal/transform/block/BlockTransformer.java @@ -3,12 +3,14 @@ package org.qortal.transform.block; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.qortal.block.Block; import org.qortal.block.BlockChain; +import org.qortal.crypto.Crypto; import org.qortal.data.at.ATStateData; import org.qortal.data.block.BlockData; import org.qortal.data.transaction.TransactionData; @@ -20,7 +22,6 @@ import org.qortal.transform.Transformer; import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.utils.Base58; import org.qortal.utils.Serialization; -import org.qortal.utils.Triple; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; @@ -45,14 +46,13 @@ public class BlockTransformer extends Transformer { protected static final int AT_BYTES_LENGTH = INT_LENGTH; protected static final int AT_FEES_LENGTH = AMOUNT_LENGTH; - protected static final int AT_LENGTH = AT_FEES_LENGTH + AT_BYTES_LENGTH; protected static final int ONLINE_ACCOUNTS_COUNT_LENGTH = INT_LENGTH; protected static final int ONLINE_ACCOUNTS_SIZE_LENGTH = INT_LENGTH; protected static final int ONLINE_ACCOUNTS_TIMESTAMP_LENGTH = TIMESTAMP_LENGTH; protected static final int ONLINE_ACCOUNTS_SIGNATURES_COUNT_LENGTH = INT_LENGTH; - protected static final int AT_ENTRY_LENGTH = ADDRESS_LENGTH + SHA256_LENGTH + AMOUNT_LENGTH; + public static final int AT_ENTRY_LENGTH = ADDRESS_LENGTH + SHA256_LENGTH + AMOUNT_LENGTH; /** * Extract block data and transaction data from serialized bytes. @@ -61,7 +61,7 @@ public class BlockTransformer extends Transformer { * @return BlockData and a List of transactions. * @throws TransformationException */ - public static Triple, List> fromBytes(byte[] bytes) throws TransformationException { + public static BlockTransformation fromBytes(byte[] bytes) throws TransformationException { if (bytes == null) return null; @@ -76,28 +76,40 @@ public class BlockTransformer extends Transformer { /** * Extract block data and transaction data from serialized bytes containing a single block. * - * @param bytes + * @param byteBuffer source of serialized block bytes * @return BlockData and a List of transactions. * @throws TransformationException */ - public static Triple, List> fromByteBuffer(ByteBuffer byteBuffer) throws TransformationException { + public static BlockTransformation fromByteBuffer(ByteBuffer byteBuffer) throws TransformationException { + return BlockTransformer.fromByteBuffer(byteBuffer, false); + } + + /** + * Extract block data and transaction data from serialized bytes containing a single block. + * + * @param byteBuffer source of serialized block bytes + * @return BlockData and a List of transactions. + * @throws TransformationException + */ + public static BlockTransformation fromByteBufferV2(ByteBuffer byteBuffer) throws TransformationException { return BlockTransformer.fromByteBuffer(byteBuffer, true); } /** - * Extract block data and transaction data from serialized bytes containing one or more blocks. - * - * @param bytes + * Extract block data and transaction data from serialized bytes containing a single block, in one of two forms. + * + * @param byteBuffer source of serialized block bytes + * @param isV2 set to true if AT state info is represented by a single hash, false if serialized as per-AT address+state hash+fees * @return the next block's BlockData and a List of transactions. * @throws TransformationException */ - public static Triple, List> fromByteBuffer(ByteBuffer byteBuffer, boolean finalBlockInBuffer) throws TransformationException { + private static BlockTransformation fromByteBuffer(ByteBuffer byteBuffer, boolean isV2) throws TransformationException { int version = byteBuffer.getInt(); - if (finalBlockInBuffer && byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH) + if (byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH) throw new TransformationException("Byte data too short for Block"); - if (finalBlockInBuffer && byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize()) + if (byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize()) throw new TransformationException("Byte data too long for Block"); long timestamp = byteBuffer.getLong(); @@ -117,42 +129,52 @@ public class BlockTransformer extends Transformer { int atCount = 0; long atFees = 0; - List atStates = new ArrayList<>(); + byte[] atStatesHash = null; + List atStates = null; - int atBytesLength = byteBuffer.getInt(); + if (isV2) { + // Simply: AT count, AT total fees, hash(all AT states) + atCount = byteBuffer.getInt(); + atFees = byteBuffer.getLong(); + atStatesHash = new byte[Transformer.SHA256_LENGTH]; + byteBuffer.get(atStatesHash); + } else { + // V1: AT info byte length, then per-AT entries of AT address + state hash + fees + int atBytesLength = byteBuffer.getInt(); + if (atBytesLength > BlockChain.getInstance().getMaxBlockSize()) + throw new TransformationException("Byte data too long for Block's AT info"); - if (atBytesLength > BlockChain.getInstance().getMaxBlockSize()) - throw new TransformationException("Byte data too long for Block's AT info"); + // Read AT-address, SHA256 hash and fees + if (atBytesLength % AT_ENTRY_LENGTH != 0) + throw new TransformationException("AT byte data not a multiple of AT entry length"); - ByteBuffer atByteBuffer = byteBuffer.slice(); - atByteBuffer.limit(atBytesLength); + ByteBuffer atByteBuffer = byteBuffer.slice(); + atByteBuffer.limit(atBytesLength); - // Read AT-address, SHA256 hash and fees - if (atBytesLength % AT_ENTRY_LENGTH != 0) - throw new TransformationException("AT byte data not a multiple of AT entry length"); + atStates = new ArrayList<>(); + while (atByteBuffer.hasRemaining()) { + byte[] atAddressBytes = new byte[ADDRESS_LENGTH]; + atByteBuffer.get(atAddressBytes); + String atAddress = Base58.encode(atAddressBytes); - while (atByteBuffer.hasRemaining()) { - byte[] atAddressBytes = new byte[ADDRESS_LENGTH]; - atByteBuffer.get(atAddressBytes); - String atAddress = Base58.encode(atAddressBytes); + byte[] stateHash = new byte[SHA256_LENGTH]; + atByteBuffer.get(stateHash); - byte[] stateHash = new byte[SHA256_LENGTH]; - atByteBuffer.get(stateHash); + long fees = atByteBuffer.getLong(); - long fees = atByteBuffer.getLong(); + // Add this AT's fees to our total + atFees += fees; - // Add this AT's fees to our total - atFees += fees; + atStates.add(new ATStateData(atAddress, stateHash, fees)); + } - atStates.add(new ATStateData(atAddress, stateHash, fees)); + // Bump byteBuffer over AT states just read in slice + byteBuffer.position(byteBuffer.position() + atBytesLength); + + // AT count to reflect the number of states we have + atCount = atStates.size(); } - // Bump byteBuffer over AT states just read in slice - byteBuffer.position(byteBuffer.position() + atBytesLength); - - // AT count to reflect the number of states we have - atCount = atStates.size(); - // Add AT fees to totalFees totalFees += atFees; @@ -221,16 +243,15 @@ public class BlockTransformer extends Transformer { byteBuffer.get(onlineAccountsSignatures); } - // We should only complain about excess byte data if we aren't expecting more blocks in this ByteBuffer - if (finalBlockInBuffer && byteBuffer.hasRemaining()) - throw new TransformationException("Excess byte data found after parsing Block"); - // We don't have a height! Integer height = null; BlockData blockData = new BlockData(version, reference, transactionCount, totalFees, transactionsSignature, height, timestamp, minterPublicKey, minterSignature, atCount, atFees, encodedOnlineAccounts, onlineAccountsCount, onlineAccountsTimestamp, onlineAccountsSignatures); - return new Triple<>(blockData, transactions, atStates); + if (isV2) + return new BlockTransformation(blockData, transactions, atStatesHash); + else + return new BlockTransformation(blockData, transactions, atStates); } public static int getDataLength(Block block) throws TransformationException { @@ -266,6 +287,14 @@ public class BlockTransformer extends Transformer { } public static byte[] toBytes(Block block) throws TransformationException { + return toBytes(block, false); + } + + public static byte[] toBytesV2(Block block) throws TransformationException { + return toBytes(block, true); + } + + private static byte[] toBytes(Block block, boolean isV2) throws TransformationException { BlockData blockData = block.getBlockData(); try { @@ -279,16 +308,37 @@ public class BlockTransformer extends Transformer { bytes.write(blockData.getMinterSignature()); int atBytesLength = blockData.getATCount() * AT_ENTRY_LENGTH; - bytes.write(Ints.toByteArray(atBytesLength)); + if (isV2) { + ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength); + long atFees = 0; - for (ATStateData atStateData : block.getATStates()) { - // Skip initial states generated by DEPLOY_AT transactions in the same block - if (atStateData.isInitial()) - continue; + for (ATStateData atStateData : block.getATStates()) { + // Skip initial states generated by DEPLOY_AT transactions in the same block + if (atStateData.isInitial()) + continue; - bytes.write(Base58.decode(atStateData.getATAddress())); - bytes.write(atStateData.getStateHash()); - bytes.write(Longs.toByteArray(atStateData.getFees())); + atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8)); + atHashBytes.write(atStateData.getStateHash()); + atHashBytes.write(Longs.toByteArray(atStateData.getFees())); + + atFees += atStateData.getFees(); + } + + bytes.write(Ints.toByteArray(blockData.getATCount())); + bytes.write(Longs.toByteArray(atFees)); + bytes.write(Crypto.digest(atHashBytes.toByteArray())); + } else { + bytes.write(Ints.toByteArray(atBytesLength)); + + for (ATStateData atStateData : block.getATStates()) { + // Skip initial states generated by DEPLOY_AT transactions in the same block + if (atStateData.isInitial()) + continue; + + bytes.write(Base58.decode(atStateData.getATAddress())); + bytes.write(atStateData.getStateHash()); + bytes.write(Longs.toByteArray(atStateData.getFees())); + } } // Transactions diff --git a/src/main/java/org/qortal/utils/BlockArchiveUtils.java b/src/main/java/org/qortal/utils/BlockArchiveUtils.java index 0beff026..84de1a31 100644 --- a/src/main/java/org/qortal/utils/BlockArchiveUtils.java +++ b/src/main/java/org/qortal/utils/BlockArchiveUtils.java @@ -6,6 +6,7 @@ import org.qortal.data.transaction.TransactionData; import org.qortal.repository.BlockArchiveReader; import org.qortal.repository.DataException; import org.qortal.repository.Repository; +import org.qortal.transform.block.BlockTransformation; import java.util.List; @@ -33,8 +34,7 @@ public class BlockArchiveUtils { repository.discardChanges(); final int requestedRange = endHeight+1-startHeight; - List, List>> blockInfoList = - BlockArchiveReader.getInstance().fetchBlocksFromRange(startHeight, endHeight); + List blockInfoList = BlockArchiveReader.getInstance().fetchBlocksFromRange(startHeight, endHeight); // Ensure that we have received all of the requested blocks if (blockInfoList == null || blockInfoList.isEmpty()) { @@ -43,27 +43,26 @@ public class BlockArchiveUtils { if (blockInfoList.size() != requestedRange) { throw new IllegalStateException("Non matching block count when importing from archive"); } - Triple, List> firstBlock = blockInfoList.get(0); - if (firstBlock == null || firstBlock.getA().getHeight() != startHeight) { + BlockTransformation firstBlock = blockInfoList.get(0); + if (firstBlock == null || firstBlock.getBlockData().getHeight() != startHeight) { throw new IllegalStateException("Non matching first block when importing from archive"); } if (blockInfoList.size() > 0) { - Triple, List> lastBlock = - blockInfoList.get(blockInfoList.size() - 1); - if (lastBlock == null || lastBlock.getA().getHeight() != endHeight) { + BlockTransformation lastBlock = blockInfoList.get(blockInfoList.size() - 1); + if (lastBlock == null || lastBlock.getBlockData().getHeight() != endHeight) { throw new IllegalStateException("Non matching last block when importing from archive"); } } // Everything seems okay, so go ahead with the import - for (Triple, List> blockInfo : blockInfoList) { + for (BlockTransformation blockInfo : blockInfoList) { try { // Save block - repository.getBlockRepository().save(blockInfo.getA()); + repository.getBlockRepository().save(blockInfo.getBlockData()); // Save AT state data hashes - for (ATStateData atStateData : blockInfo.getC()) { - atStateData.setHeight(blockInfo.getA().getHeight()); + for (ATStateData atStateData : blockInfo.getAtStates()) { + atStateData.setHeight(blockInfo.getBlockData().getHeight()); repository.getATRepository().save(atStateData); } diff --git a/src/main/resources/blockchain.json b/src/main/resources/blockchain.json index b514653d..dc84ebd0 100644 --- a/src/main/resources/blockchain.json +++ b/src/main/resources/blockchain.json @@ -60,7 +60,9 @@ "shareBinFix": 399000, "calcChainWeightTimestamp": 1620579600000, "transactionV5Timestamp": 1642176000000, - "transactionV6Timestamp": 9999999999999 + "transactionV6Timestamp": 9999999999999, + "disableReferenceTimestamp": 1655222400000, + "aggregateSignatureTimestamp": 1656864000000 }, "genesisInfo": { "version": 4, diff --git a/src/main/resources/i18n/ApiError_de.properties b/src/main/resources/i18n/ApiError_de.properties index 7a81e07a..8f5bffeb 100644 --- a/src/main/resources/i18n/ApiError_de.properties +++ b/src/main/resources/i18n/ApiError_de.properties @@ -6,15 +6,15 @@ ### Common ### JSON = JSON Nachricht konnte nicht geparst werden -INSUFFICIENT_BALANCE = insufficient balance +INSUFFICIENT_BALANCE = Kein Ausgleich UNAUTHORIZED = API-Aufruf nicht autorisiert REPOSITORY_ISSUE = Repository-Fehler -NON_PRODUCTION = this API call is not permitted for production systems +NON_PRODUCTION = Dieser APi-Aufruf ist nicht gestattet für Produtkion -BLOCKCHAIN_NEEDS_SYNC = blockchain needs to synchronize first +BLOCKCHAIN_NEEDS_SYNC = Blockchain muss sich erst verbinden NO_TIME_SYNC = noch keine Uhrensynchronisation @@ -68,16 +68,16 @@ ORDER_UNKNOWN = unbekannte asset order ID GROUP_UNKNOWN = Gruppe unbekannt ### Foreign Blockchain ### -FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = foreign blokchain or ElectrumX network issue +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = fremde Blockchain oder ElectrumX Netzwerk Problem -FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = insufficient balance on foreign blockchain +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = unzureichend Bilanz auf fremde blockchain -FOREIGN_BLOCKCHAIN_TOO_SOON = too soon to broadcast foreign blockchain transaction (LockTime/median block time) +FOREIGN_BLOCKCHAIN_TOO_SOON = zu früh um fremde Blockchain-Transaktionen zu übertragen (Sperrzeit/mittlere Blockzeit) ### Trade Portal ### -ORDER_SIZE_TOO_SMALL = order amount too low +ORDER_SIZE_TOO_SMALL = Bestellmenge zu niedrig ### Data ### FILE_NOT_FOUND = Datei nicht gefunden -NO_REPLY = peer did not reply with data +NO_REPLY = Peer hat nicht mit Daten verbinden diff --git a/src/main/resources/i18n/ApiError_ko.properties b/src/main/resources/i18n/ApiError_ko.properties new file mode 100644 index 00000000..4ada1df8 --- /dev/null +++ b/src/main/resources/i18n/ApiError_ko.properties @@ -0,0 +1,83 @@ +#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) +# Keys are from api.ApiError enum + +# "localeLang": "ko", + +### Common ### +JSON = JSON 메시지를 구문 분석하지 못했습니다. + +INSUFFICIENT_BALANCE = 잔고 부족 + +UNAUTHORIZED = 승인되지 않은 API 호출 + +REPOSITORY_ISSUE = 리포지토리 오류 + +NON_PRODUCTION = 이 API 호출은 프로덕션 시스템에 허용되지 않습니다. + +BLOCKCHAIN_NEEDS_SYNC = 블록체인이 먼저 동기화되어야 함 + +NO_TIME_SYNC = 아직 동기화가 없습니다. + +### Validation ### +INVALID_SIGNATURE = 무효 서명 + +INVALID_ADDRESS = 잘못된 주소 + +INVALID_PUBLIC_KEY = 잘못된 공개 키 + +INVALID_DATA = 잘못된 데이터 + +INVALID_NETWORK_ADDRESS = 잘못된 네트워크 주소 + +ADDRESS_UNKNOWN = 계정 주소 알 수 없음 + +INVALID_CRITERIA = 잘못된 검색 기준 + +INVALID_REFERENCE = 무효 참조 + +TRANSFORMATION_ERROR = JSON을 트랜잭션으로 변환할 수 없습니다. + +INVALID_PRIVATE_KEY = 잘못된 개인 키 + +INVALID_HEIGHT = 잘못된 블록 높이 + +CANNOT_MINT = 계정을 만들 수 없습니다. + +### Blocks ### +BLOCK_UNKNOWN = 알 수 없는 블록 + +### Transactions ### +TRANSACTION_UNKNOWN = 알 수 없는 거래 + +PUBLIC_KEY_NOT_FOUND = 공개 키를 찾을 수 없음 + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = 유효하지 않은 거래: %s (%s) + +### Naming ### +NAME_UNKNOWN = 이름 미상 + +### Asset ### +INVALID_ASSET_ID = 잘못된 자산 ID + +INVALID_ORDER_ID = 자산 주문 ID가 잘못되었습니다. + +ORDER_UNKNOWN = 알 수 없는 자산 주문 ID + +### Groups ### +GROUP_UNKNOWN = 알 수 없는 그룹 + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = 외부 블록체인 또는 일렉트럼X 네트워크 문제 + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = 외부 블록체인 잔액 부족 + +FOREIGN_BLOCKCHAIN_TOO_SOON = 외부 블록체인 트랜잭션을 브로드캐스트하기에는 너무 빠릅니다(LockTime/중앙 블록 시간). + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = 주문량이 너무 적다 + +### Data ### +FILE_NOT_FOUND = 파일을 찾을 수 없음 + +NO_REPLY = 피어가 허용된 시간 내에 응답하지 않음 diff --git a/src/main/resources/i18n/ApiError_ro.properties b/src/main/resources/i18n/ApiError_ro.properties new file mode 100644 index 00000000..87efde28 --- /dev/null +++ b/src/main/resources/i18n/ApiError_ro.properties @@ -0,0 +1,83 @@ +#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) +# Keys are from api.ApiError enum + +# "localeLang": "ro", + +### Comun ### +JSON = nu s-a reusit analizarea mesajului JSON + +INSUFFICIENT_BALANCE = fonduri insuficiente + +UNAUTHORIZED = Solicitare API neautorizata + +REPOSITORY_ISSUE = eroare a depozitarului + +NON_PRODUCTION = aceasta solictare API nu este permisa pentru sistemele de productie + +BLOCKCHAIN_NEEDS_SYNC = blockchain-ul trebuie sa se sincronizeze mai intai + +NO_TIME_SYNC = nu exista inca o sincronizare a ceasului + +### Validation ### +INVALID_SIGNATURE = semnatura invalida + +INVALID_ADDRESS = adresa invalida + +INVALID_PUBLIC_KEY = cheie publica invalid + +INVALID_DATA = date invalida + +INVALID_NETWORK_ADDRESS = invalid network address + +ADDRESS_UNKNOWN = adresa contului necunoscuta + +INVALID_CRITERIA = criteriu de cautare invalid + +INVALID_REFERENCE = referinta invalida + +TRANSFORMATION_ERROR = nu s-a putut transforma JSON in tranzactie + +INVALID_PRIVATE_KEY = invalid private key + +INVALID_HEIGHT = dimensiunea blocului invalida + +CANNOT_MINT = contul nu poate produce moneda + +### Blocks ### +BLOCK_UNKNOWN = bloc necunoscut + +### Transactions ### +TRANSACTION_UNKNOWN = tranzactie necunoscuta + +PUBLIC_KEY_NOT_FOUND = nu s-a gasit cheia publica + +# this one is special in that caller expected to pass two additional strings, hence the two %s +TRANSACTION_INVALID = tranzactie invalida: %s (%s) + +### Naming ### +NAME_UNKNOWN = nume necunoscut + +### Asset ### +INVALID_ASSET_ID = ID active invalid + +INVALID_ORDER_ID = ID-ul de comanda al activului invalid + +ORDER_UNKNOWN = ID necunoscut al comenzii activului + +### Groups ### +GROUP_UNKNOWN = grup necunoscut + +### Foreign Blockchain ### +FOREIGN_BLOCKCHAIN_NETWORK_ISSUE = problema de blockchain strain sau de retea ElectrumX + +FOREIGN_BLOCKCHAIN_BALANCE_ISSUE = sold insuficient pe blockchain strain + +FOREIGN_BLOCKCHAIN_TOO_SOON = prea devreme pentru a difuza o tranzactie blockchain straina (LockTime/median block time) + +### Trade Portal ### +ORDER_SIZE_TOO_SMALL = valoarea tranzactiei este prea mica + +### Data ### +FILE_NOT_FOUND = nu s-a gasit fisierul + +NO_REPLY = omologul nu a raspuns in termenul stabilit diff --git a/src/main/resources/i18n/SysTray_ko.properties b/src/main/resources/i18n/SysTray_ko.properties new file mode 100644 index 00000000..9773a54f --- /dev/null +++ b/src/main/resources/i18n/SysTray_ko.properties @@ -0,0 +1,46 @@ +#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) +# SysTray pop-up menu + +APPLYING_UPDATE_AND_RESTARTING = 자동 업데이트를 적용하고 다시 시작하는 중... + +AUTO_UPDATE = 자동 업데이트 + +BLOCK_HEIGHT = 높이 + +BUILD_VERSION = 빌드 버전 + +CHECK_TIME_ACCURACY = 시간 정확도 점검 + +CONNECTING = 연결하는 + +CONNECTION = 연결 + +CONNECTIONS = 연결 + +CREATING_BACKUP_OF_DB_FILES = 데이터베이스 파일의 백업을 만드는 중... + +DB_BACKUP = Database Backup + +DB_CHECKPOINT = Database Checkpoint + +DB_MAINTENANCE = 데이터베이스 유지 관리 + +EXIT = 종료 + +LITE_NODE = 라이트 노드 + +MINTING_DISABLED = 민팅중이 아님 + +MINTING_ENABLED = \u2714 민팅 + +OPEN_UI = UI 열기 + +PERFORMING_DB_CHECKPOINT = 커밋되지 않은 데이터베이스 변경 내용을 저장하는 중... + +PERFORMING_DB_MAINTENANCE = 예약된 유지 관리 수행 중... + +SYNCHRONIZE_CLOCK = 시간 동기화 + +SYNCHRONIZING_BLOCKCHAIN = 동기화중 + +SYNCHRONIZING_CLOCK = 시간 동기화 diff --git a/src/main/resources/i18n/SysTray_ro.properties b/src/main/resources/i18n/SysTray_ro.properties new file mode 100644 index 00000000..0e1aa6c6 --- /dev/null +++ b/src/main/resources/i18n/SysTray_ro.properties @@ -0,0 +1,46 @@ +#Generated by ResourceBundle Editor (http://essiembre.github.io/eclipse-rbe/) +# SysTray pop-up menu + +APPLYING_UPDATE_AND_RESTARTING = Aplicarea actualizarii automate si repornire... + +AUTO_UPDATE = Actualizare automata + +BLOCK_HEIGHT = dimensiune + +BUILD_VERSION = versiunea compilatiei + +CHECK_TIME_ACCURACY = verificare exactitate ora + +CONNECTING = Se conecteaza + +CONNECTION = conexiune + +CONNECTIONS = conexiuni + +CREATING_BACKUP_OF_DB_FILES = Se creaza copia bazei de date + +DB_BACKUP = Copie baza de date + +DB_CHECKPOINT = Punct de control al bazei de date + +DB_MAINTENANCE = Database Maintenance + +EXIT = iesire + +LITE_NODE = Nod Lite + +MINTING_DISABLED = nu produce moneda + +MINTING_ENABLED = \u2714 Minting + +OPEN_UI = Deschidere interfata utilizator IU + +PERFORMING_DB_CHECKPOINT = Salvarea modificarilor nerealizate ale bazei de date... + +PERFORMING_DB_MAINTENANCE = Efectuarea intretinerii programate + +SYNCHRONIZE_CLOCK = Sincronizare ceas + +SYNCHRONIZING_BLOCKCHAIN = Sincronizare + +SYNCHRONIZING_CLOCK = Se sincronizeaza ceasul diff --git a/src/main/resources/i18n/SysTray_sv.properties b/src/main/resources/i18n/SysTray_sv.properties index 9aec8e9b..0e74337b 100644 --- a/src/main/resources/i18n/SysTray_sv.properties +++ b/src/main/resources/i18n/SysTray_sv.properties @@ -25,7 +25,7 @@ DB_CHECKPOINT = Databaskontrollpunkt DB_MAINTENANCE = Databasunderhåll -EXIT = Utgång +EXIT = Avsluta MINTING_DISABLED = Präglar INTE diff --git a/src/main/resources/i18n/TransactionValidity_ko.properties b/src/main/resources/i18n/TransactionValidity_ko.properties new file mode 100644 index 00000000..a12b33f6 --- /dev/null +++ b/src/main/resources/i18n/TransactionValidity_ko.properties @@ -0,0 +1,195 @@ +# + +ACCOUNT_ALREADY_EXISTS = 계정이 이미 존재합니다. + +ACCOUNT_CANNOT_REWARD_SHARE = 계정이 보상을 공유할 수 없습니다. + +ADDRESS_ABOVE_RATE_LIMIT = 주소가 지정된 속도 제한에 도달했습니다. + +ADDRESS_BLOCKED = 이 주소는 차단되었습니다. + +ALREADY_GROUP_ADMIN = 이미 그룹 관리자 + +ALREADY_GROUP_MEMBER = 이미 그룹 맴버 + +ALREADY_VOTED_FOR_THAT_OPTION = 이미 그 옵션에 투표했다. + +ASSET_ALREADY_EXISTS = 자산이 이미 있습니다. + +ASSET_DOES_NOT_EXIST = 자산이 존재하지 않습니다. + +ASSET_DOES_NOT_MATCH_AT = 자산이 AT의 자산과 일치하지 않습니다. + +ASSET_NOT_SPENDABLE = 자산을 사용할 수 없습니다. + +AT_ALREADY_EXISTS = AT가 이미 있습니다. + +AT_IS_FINISHED = AT가 완료되었습니다. + +AT_UNKNOWN = 알 수 없는 AT + +BAN_EXISTS = 금지가 이미 있습니다. + +BAN_UNKNOWN = 금지 알 수 없음 + +BANNED_FROM_GROUP = 그룹에서 금지 + +BUYER_ALREADY_OWNER = 구매자는 이미 소유자입니다 + +CLOCK_NOT_SYNCED = 동기화되지 않은 시간 + +DUPLICATE_MESSAGE = 주소가 중복 메시지를 보냈습니다. + +DUPLICATE_OPTION = 중복 옵션 + +GROUP_ALREADY_EXISTS = 그룹이 이미 존재합니다 + +GROUP_APPROVAL_DECIDED = 그룹 승인이 이미 결정되었습니다. + +GROUP_APPROVAL_NOT_REQUIRED = 그룹 승인이 필요하지 않음 + +GROUP_DOES_NOT_EXIST = 그룹이 존재하지 않습니다 + +GROUP_ID_MISMATCH = 그룹 ID 불일치 + +GROUP_OWNER_CANNOT_LEAVE = 그룹 소유자는 그룹을 나갈 수 없습니다 + +HAVE_EQUALS_WANT = 소유 자산은 원하는 자산과 동일합니다. + +INCORRECT_NONCE = 잘못된 PoW nonce + +INSUFFICIENT_FEE = 부족한 수수료 + +INVALID_ADDRESS = 잘못된 주소 + +INVALID_AMOUNT = 유효하지 않은 금액 + +INVALID_ASSET_OWNER = 잘못된 자산 소유자 + +INVALID_AT_TRANSACTION = 유효하지 않은 AT 거래 + +INVALID_AT_TYPE_LENGTH = 잘못된 AT '유형' 길이 + +INVALID_BUT_OK = 유효하지 않지만 OK + +INVALID_CREATION_BYTES = 잘못된 생성 바이트 + +INVALID_DATA_LENGTH = 잘못된 데이터 길이 + +INVALID_DESCRIPTION_LENGTH = 잘못된 설명 길이 + +INVALID_GROUP_APPROVAL_THRESHOLD = 잘못된 그룹 승인 임계값 + +INVALID_GROUP_BLOCK_DELAY = 잘못된 그룹 승인 차단 지연 + +INVALID_GROUP_ID = 잘못된 그룹 ID + +INVALID_GROUP_OWNER = 잘못된 그룹 소유자 + +INVALID_LIFETIME = 유효하지 않은 수명 + +INVALID_NAME_LENGTH = 잘못된 이름 길이 + +INVALID_NAME_OWNER = 잘못된 이름 소유자 + +INVALID_OPTION_LENGTH = 잘못된 옵션 길이 + +INVALID_OPTIONS_COUNT = 잘못된 옵션 수 + +INVALID_ORDER_CREATOR = 잘못된 주문 생성자 + +INVALID_PAYMENTS_COUNT = 유효하지 않은 지불 수 + +INVALID_PUBLIC_KEY = 잘못된 공개 키 + +INVALID_QUANTITY = 유효하지 않은 수량 + +INVALID_REFERENCE = 잘못된 참조 + +INVALID_RETURN = 무효 반환 + +INVALID_REWARD_SHARE_PERCENT = 잘못된 보상 공유 비율 + +INVALID_SELLER = 무효 판매자 + +INVALID_TAGS_LENGTH = invalid 'tags' length + +INVALID_TIMESTAMP_SIGNATURE = 유효하지 않은 타임스탬프 서명 + +INVALID_TX_GROUP_ID = 잘못된 트랜잭션 그룹 ID + +INVALID_VALUE_LENGTH = 잘못된 '값' 길이 + +INVITE_UNKNOWN = 알 수 없는 그룹 초대 + +JOIN_REQUEST_EXISTS = 그룹 가입 요청이 이미 있습니다. + +MAXIMUM_REWARD_SHARES = 이미 이 계정에 대한 최대 보상 공유 수에 도달했습니다.t + +MISSING_CREATOR = 실종된 창작자 + +MULTIPLE_NAMES_FORBIDDEN = 계정당 여러 등록 이름은 금지되어 있습니다. + +NAME_ALREADY_FOR_SALE = 이미 판매 중인 이름 + +NAME_ALREADY_REGISTERED = 이미 등록된 이름 + +NAME_BLOCKED = 이 이름은 차단되었습니다 + +NAME_DOES_NOT_EXIST = 이름이 존재하지 않습니다 + +NAME_NOT_FOR_SALE = 이름은 판매용이 아닙니다 + +NAME_NOT_NORMALIZED = 유니코드 '정규화된' 형식이 아닌 이름 + +NEGATIVE_AMOUNT = 유효하지 않은/음수 금액 + +NEGATIVE_FEE = 무효/음수 수수료 + +NEGATIVE_PRICE = 유효하지 않은/음수 가격 + +NO_BALANCE = 잔액 불충분 + +NO_BLOCKCHAIN_LOCK = 노드의 블록체인이 현재 사용 중입니다. + +NO_FLAG_PERMISSION = 계정에 해당 권한이 없습니다 + +NOT_GROUP_ADMIN = 계정은 그룹 관리자가 아닙니다. + +NOT_GROUP_MEMBER = 계정이 그룹 구성원이 아닙니다. + +NOT_MINTING_ACCOUNT = 계정은 발행할 수 없습니다 + +NOT_YET_RELEASED = 아직 출시되지 않은 기능 + +OK = OK + +ORDER_ALREADY_CLOSED = 아직 출시되지 않은 기능 + +ORDER_DOES_NOT_EXIST = 자산 거래 주문이 존재하지 않습니다 + +POLL_ALREADY_EXISTS = 설문조사가 이미 존재합니다 + +POLL_DOES_NOT_EXIST = 설문조사가 존재하지 않습니다 + +POLL_OPTION_DOES_NOT_EXIST = 투표 옵션이 존재하지 않습니다 + +PUBLIC_KEY_UNKNOWN = 공개 키 알 수 없음 + +REWARD_SHARE_UNKNOWN = 알 수 없는 보상 공유 + +SELF_SHARE_EXISTS = 자체 공유(보상 공유)가 이미 존재합니다. + +TIMESTAMP_TOO_NEW = 타임스탬프가 너무 새롭습니다. + +TIMESTAMP_TOO_OLD = 너무 오래된 타임스탬프 + +TOO_MANY_UNCONFIRMED = 계정에 보류 중인 확인되지 않은 거래가 너무 많습니다. + +TRANSACTION_ALREADY_CONFIRMED = 거래가 이미 확인되었습니다 + +TRANSACTION_ALREADY_EXISTS = 거래가 이미 존재합니다 + +TRANSACTION_UNKNOWN = 알 수 없는 거래 + +TX_GROUP_ID_MISMATCH = 트랜잭션의 그룹 ID가 일치하지 않습니다 diff --git a/src/main/resources/i18n/TransactionValidity_ro.properties b/src/main/resources/i18n/TransactionValidity_ro.properties new file mode 100644 index 00000000..6c67f31b --- /dev/null +++ b/src/main/resources/i18n/TransactionValidity_ro.properties @@ -0,0 +1,195 @@ +# + +ACCOUNT_ALREADY_EXISTS = contul exista deja + +ACCOUNT_CANNOT_REWARD_SHARE = contul nu poate genera reward-share + +ADDRESS_ABOVE_RATE_LIMIT = adresa a atins limita specificata + +ADDRESS_BLOCKED = aceasta adresa este blocata + +ALREADY_GROUP_ADMIN = sunteti deja admin + +ALREADY_GROUP_MEMBER = sunteti deja membru + +ALREADY_VOTED_FOR_THAT_OPTION = deja ati votat pentru aceasta optiune + +ASSET_ALREADY_EXISTS = activul deja exista + +ASSET_DOES_NOT_EXIST = activul un exista + +ASSET_DOES_NOT_MATCH_AT = activul nu se potriveste cu activul TA + +ASSET_NOT_SPENDABLE = activul nu poate fi utilizat + +AT_ALREADY_EXISTS = TA exista deja + +AT_IS_FINISHED = TA s-a terminat + +AT_UNKNOWN = TA necunoscuta + +BAN_EXISTS = ban-ul este deja folosit + +BAN_UNKNOWN = ban necunoscut + +BANNED_FROM_GROUP = accesul la grup a fost blocat + +BUYER_ALREADY_OWNER = cumparatorul este deja detinator + +CLOCK_NOT_SYNCED = ceasul nu este sincronizat + +DUPLICATE_MESSAGE = adresa a trimis mesaje duplicate + +DUPLICATE_OPTION = optiune duplicata + +GROUP_ALREADY_EXISTS = grupul deja exista + +GROUP_APPROVAL_DECIDED = aprobarea grupului a fost deja decisa + +GROUP_APPROVAL_NOT_REQUIRED = aprobarea grupului nu este solicitata + +GROUP_DOES_NOT_EXIST = grupul nu exista + +GROUP_ID_MISMATCH = ID-ul grupului incorect + +GROUP_OWNER_CANNOT_LEAVE = proprietarul grupului nu poate parasi grupul + +HAVE_EQUALS_WANT = a avea un obiect este acelasi lucru cu a vrea un obiect + +INCORRECT_NONCE = numar PoW incorect + +INSUFFICIENT_FEE = taxa insuficienta + +INVALID_ADDRESS = adresa invalida + +INVALID_AMOUNT = suma invalida + +INVALID_ASSET_OWNER = propietar al activului invalid + +INVALID_AT_TRANSACTION = tranzactie automata invalida + +INVALID_AT_TYPE_LENGTH = TA invalida 'tip' lungime + +INVALID_BUT_OK = invalid dar OK + +INVALID_CREATION_BYTES = octeti de creatie invalizi + +INVALID_DATA_LENGTH = lungimea datelor invalida + +INVALID_DESCRIPTION_LENGTH = lungimea descrierii invalida + +INVALID_GROUP_APPROVAL_THRESHOLD = prag de aprobare a grupului invalid + +INVALID_GROUP_BLOCK_DELAY = intarziere invalida a blocului de aprobare a grupului + +INVALID_GROUP_ID = ID de grup invalid + +INVALID_GROUP_OWNER = proprietar de grup invalid + +INVALID_LIFETIME = durata de viata invalida + +INVALID_NAME_LENGTH = lungimea numelui invalida + +INVALID_NAME_OWNER = numele proprietarului invalid + +INVALID_OPTION_LENGTH = lungimea optiunii invalida + +INVALID_OPTIONS_COUNT = contor de optiuni invalid + +INVALID_ORDER_CREATOR = creator de ordine invalid + +INVALID_PAYMENTS_COUNT = contor de plati invalid + +INVALID_PUBLIC_KEY = cheie publica invalida + +INVALID_QUANTITY = cantitate invalida + +INVALID_REFERENCE = referinta invalida + +INVALID_RETURN = returnare invalida + +INVALID_REWARD_SHARE_PERCENT = procentaj al cotei de recompensa invalid + +INVALID_SELLER = vanzator invalid + +INVALID_TAGS_LENGTH = lungime a tagurilor invalida + +INVALID_TIMESTAMP_SIGNATURE = semnatura timestamp invalida + +INVALID_TX_GROUP_ID = ID-ul grupului de tranzactii invalid + +INVALID_VALUE_LENGTH = lungimea "valorii "invalida + +INVITE_UNKNOWN = invitatie de grup invalida + +JOIN_REQUEST_EXISTS = cererea de aderare la grup exista deja + +MAXIMUM_REWARD_SHARES = ati ajuns deja la numarul maxim de cote de recompensa pentru acest cont + +MISSING_CREATOR = creator lipsa + +MULTIPLE_NAMES_FORBIDDEN = este interzisa folosirea mai multor nume inregistrate pe cont + +NAME_ALREADY_FOR_SALE = numele este deja de vanzare + +NAME_ALREADY_REGISTERED = nume deja inregistrat + +NAME_BLOCKED = numele este blocat + +NAME_DOES_NOT_EXIST = numele nu exista + +NAME_NOT_FOR_SALE = numele nu este de vanzare + +NAME_NOT_NORMALIZED = numele nu este in forma "normalizata" Unicode + +NEGATIVE_AMOUNT = suma invalida/negativa + +NEGATIVE_FEE = taxa invalida/negativa + +NEGATIVE_PRICE = pret invalid/negativ + +NO_BALANCE = fonduri insuficiente + +NO_BLOCKCHAIN_LOCK = nodul blochain-ului este momentan ocupat + +NO_FLAG_PERMISSION = contul nu are aceasta permisiune + +NOT_GROUP_ADMIN = contul nu este un administrator de grup + +NOT_GROUP_MEMBER = contul nu este un membru al grupului + +NOT_MINTING_ACCOUNT = contul nu poate genera moneda Qort + +NOT_YET_RELEASED = caracteristica nu este inca disponibila + +OK = OK + +ORDER_ALREADY_CLOSED = ordinul de tranzactionare a activului este deja inchis + +ORDER_DOES_NOT_EXIST = ordinul de comercializare a activului nu exista + +POLL_ALREADY_EXISTS = sondajul exista deja + +POLL_DOES_NOT_EXIST = sondajul nu exista + +POLL_OPTION_DOES_NOT_EXIST = optiunea de sondaj nu exista + +PUBLIC_KEY_UNKNOWN = cheie publica necunoscuta + +REWARD_SHARE_UNKNOWN = cheie de cota de recompensa necunoscuta + +SELF_SHARE_EXISTS = cota personala (cota de recompensa) exista deja + +TIMESTAMP_TOO_NEW = timestamp prea nou + +TIMESTAMP_TOO_OLD = timestamp prea vechi + +TOO_MANY_UNCONFIRMED = contul are prea multe tranzactii neconfirmate in asteptare + +TRANZACTIE_DEJA_CONFIRMATA = tranzactia a fost deja confirmata + +TRANSACTION_ALREADY_EXISTS = tranzactia exista deja + +TRANSACTION_UNKNOWN = tranzactie necunoscuta + +TX_GROUP_ID_MISMATCH = ID-ul de grup al tranzactiei nu se potriveste diff --git a/src/test/java/org/qortal/test/BlockArchiveTests.java b/src/test/java/org/qortal/test/BlockArchiveTests.java index e2f2ed1c..32fd0283 100644 --- a/src/test/java/org/qortal/test/BlockArchiveTests.java +++ b/src/test/java/org/qortal/test/BlockArchiveTests.java @@ -20,6 +20,7 @@ import org.qortal.test.common.Common; import org.qortal.transaction.DeployAtTransaction; import org.qortal.transaction.Transaction; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.utils.BlockArchiveUtils; import org.qortal.utils.NTP; import org.qortal.utils.Triple; @@ -123,8 +124,8 @@ public class BlockArchiveTests extends Common { // Read block 2 from the archive BlockArchiveReader reader = BlockArchiveReader.getInstance(); - Triple, List> block2Info = reader.fetchBlockAtHeight(2); - BlockData block2ArchiveData = block2Info.getA(); + BlockTransformation block2Info = reader.fetchBlockAtHeight(2); + BlockData block2ArchiveData = block2Info.getBlockData(); // Read block 2 from the repository BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2); @@ -137,8 +138,8 @@ public class BlockArchiveTests extends Common { assertEquals(1, block2ArchiveData.getOnlineAccountsCount()); // Read block 900 from the archive - Triple, List> block900Info = reader.fetchBlockAtHeight(900); - BlockData block900ArchiveData = block900Info.getA(); + BlockTransformation block900Info = reader.fetchBlockAtHeight(900); + BlockData block900ArchiveData = block900Info.getBlockData(); // Read block 900 from the repository BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900); @@ -200,10 +201,10 @@ public class BlockArchiveTests extends Common { // Read a block from the archive BlockArchiveReader reader = BlockArchiveReader.getInstance(); - Triple, List> blockInfo = reader.fetchBlockAtHeight(testHeight); - BlockData archivedBlockData = blockInfo.getA(); - ATStateData archivedAtStateData = blockInfo.getC().isEmpty() ? null : blockInfo.getC().get(0); - List archivedTransactions = blockInfo.getB(); + BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight); + BlockData archivedBlockData = blockInfo.getBlockData(); + ATStateData archivedAtStateData = blockInfo.getAtStates().isEmpty() ? null : blockInfo.getAtStates().get(0); + List archivedTransactions = blockInfo.getTransactions(); // Read the same block from the repository BlockData repositoryBlockData = repository.getBlockRepository().fromHeight(testHeight); @@ -255,7 +256,7 @@ public class BlockArchiveTests extends Common { // Check block 10 (unarchived) BlockArchiveReader reader = BlockArchiveReader.getInstance(); - Triple, List> blockInfo = reader.fetchBlockAtHeight(10); + BlockTransformation blockInfo = reader.fetchBlockAtHeight(10); assertNull(blockInfo); } diff --git a/src/test/java/org/qortal/test/BlockTests.java b/src/test/java/org/qortal/test/BlockTests.java index d6fdac02..53b216ec 100644 --- a/src/test/java/org/qortal/test/BlockTests.java +++ b/src/test/java/org/qortal/test/BlockTests.java @@ -24,6 +24,7 @@ import org.qortal.test.common.TransactionUtils; import org.qortal.transaction.Transaction; import org.qortal.transaction.Transaction.TransactionType; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.transform.block.BlockTransformer; import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.utils.Base58; @@ -121,10 +122,10 @@ public class BlockTests extends Common { assertEquals(BlockTransformer.getDataLength(block), bytes.length); - Triple, List> blockInfo = BlockTransformer.fromBytes(bytes); + BlockTransformation blockInfo = BlockTransformer.fromBytes(bytes); // Compare transactions - List deserializedTransactions = blockInfo.getB(); + List deserializedTransactions = blockInfo.getTransactions(); assertEquals("Transaction count differs", blockData.getTransactionCount(), deserializedTransactions.size()); for (int i = 0; i < blockData.getTransactionCount(); ++i) { diff --git a/src/test/java/org/qortal/test/CryptoTests.java b/src/test/java/org/qortal/test/CryptoTests.java index 6a0133d2..2cc73182 100644 --- a/src/test/java/org/qortal/test/CryptoTests.java +++ b/src/test/java/org/qortal/test/CryptoTests.java @@ -4,7 +4,7 @@ import org.junit.Test; import org.qortal.account.PrivateKeyAccount; import org.qortal.block.BlockChain; import org.qortal.crypto.AES; -import org.qortal.crypto.BouncyCastle25519; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.crypto.Crypto; import org.qortal.test.common.Common; import org.qortal.utils.Base58; @@ -123,14 +123,14 @@ public class CryptoTests extends Common { random.nextBytes(ed25519PrivateKey); PrivateKeyAccount account = new PrivateKeyAccount(null, ed25519PrivateKey); - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(account.getPrivateKey()); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(account.getPrivateKey()); X25519PrivateKeyParameters x25519PrivateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); // Derive X25519 public key from X25519 private key byte[] x25519PublicKeyFromPrivate = x25519PrivateKeyParams.generatePublicKey().getEncoded(); // Derive X25519 public key from Ed25519 public key - byte[] x25519PublicKeyFromEd25519 = BouncyCastle25519.toX25519PublicKey(account.getPublicKey()); + byte[] x25519PublicKeyFromEd25519 = Qortal25519Extras.toX25519PublicKey(account.getPublicKey()); assertEquals(String.format("Public keys do not match, from private key %s", Base58.encode(ed25519PrivateKey)), Base58.encode(x25519PublicKeyFromPrivate), Base58.encode(x25519PublicKeyFromEd25519)); } @@ -162,10 +162,10 @@ public class CryptoTests extends Common { } private static byte[] calcBCSharedSecret(byte[] ed25519PrivateKey, byte[] ed25519PublicKey) { - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(ed25519PrivateKey); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(ed25519PrivateKey); X25519PrivateKeyParameters privateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); - byte[] x25519PublicKey = BouncyCastle25519.toX25519PublicKey(ed25519PublicKey); + byte[] x25519PublicKey = Qortal25519Extras.toX25519PublicKey(ed25519PublicKey); X25519PublicKeyParameters publicKeyParams = new X25519PublicKeyParameters(x25519PublicKey, 0); byte[] sharedSecret = new byte[32]; @@ -186,10 +186,10 @@ public class CryptoTests extends Common { final String expectedTheirX25519PublicKey = "ANjnZLRSzW9B1aVamiYGKP3XtBooU9tGGDjUiibUfzp2"; final String expectedSharedSecret = "DTMZYG96x8XZuGzDvHFByVLsXedimqtjiXHhXPVe58Ap"; - byte[] ourX25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(ourPrivateKey); + byte[] ourX25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(ourPrivateKey); assertEquals("X25519 private key incorrect", expectedOurX25519PrivateKey, Base58.encode(ourX25519PrivateKey)); - byte[] theirX25519PublicKey = BouncyCastle25519.toX25519PublicKey(theirPublicKey); + byte[] theirX25519PublicKey = Qortal25519Extras.toX25519PublicKey(theirPublicKey); assertEquals("X25519 public key incorrect", expectedTheirX25519PublicKey, Base58.encode(theirX25519PublicKey)); byte[] sharedSecret = calcBCSharedSecret(ourPrivateKey, theirPublicKey); diff --git a/src/test/java/org/qortal/test/SchnorrTests.java b/src/test/java/org/qortal/test/SchnorrTests.java new file mode 100644 index 00000000..03c92d2f --- /dev/null +++ b/src/test/java/org/qortal/test/SchnorrTests.java @@ -0,0 +1,190 @@ +package org.qortal.test; + +import com.google.common.hash.HashCode; +import com.google.common.primitives.Bytes; +import com.google.common.primitives.Longs; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; +import org.junit.Test; +import org.qortal.crypto.Qortal25519Extras; +import org.qortal.data.network.OnlineAccountData; +import org.qortal.transform.Transformer; + +import java.math.BigInteger; +import java.security.SecureRandom; +import java.security.Security; +import java.util.*; +import java.util.stream.Collectors; + +import static org.junit.Assert.*; + +public class SchnorrTests extends Qortal25519Extras { + + static { + // This must go before any calls to LogManager/Logger + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); + + Security.insertProviderAt(new BouncyCastleProvider(), 0); + Security.insertProviderAt(new BouncyCastleJsseProvider(), 1); + } + + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); + + @Test + public void testConversion() { + // Scalar form + byte[] scalarA = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + System.out.printf("a: %s%n", HashCode.fromBytes(scalarA)); + + byte[] pointA = HashCode.fromString("5866666666666666666666666666666666666666666666666666666666666666".toLowerCase()).asBytes(); + + BigInteger expectedY = new BigInteger("46316835694926478169428394003475163141307993866256225615783033603165251855960"); + + PointAccum pointAccum = Qortal25519Extras.newPointAccum(); + scalarMultBase(scalarA, pointAccum); + + byte[] encoded = new byte[POINT_BYTES]; + if (0 == encodePoint(pointAccum, encoded, 0)) + fail("Point encoding failed"); + + System.out.printf("aG: %s%n", HashCode.fromBytes(encoded)); + assertArrayEquals(pointA, encoded); + + byte[] yBytes = new byte[POINT_BYTES]; + System.arraycopy(encoded,0, yBytes, 0, encoded.length); + Bytes.reverse(yBytes); + + System.out.printf("yBytes: %s%n", HashCode.fromBytes(yBytes)); + BigInteger yBI = new BigInteger(yBytes); + + System.out.printf("aG y: %s%n", yBI); + assertEquals(expectedY, yBI); + } + + @Test + public void testAddition() { + /* + * 1G: b'5866666666666666666666666666666666666666666666666666666666666666' + * 2G: b'c9a3f86aae465f0e56513864510f3997561fa2c9e85ea21dc2292309f3cd6022' + * 3G: b'd4b4f5784868c3020403246717ec169ff79e26608ea126a1ab69ee77d1b16712' + */ + + // Scalar form + byte[] s1 = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] s2 = HashCode.fromString("0200000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + + // Point form + byte[] g1 = HashCode.fromString("5866666666666666666666666666666666666666666666666666666666666666".toLowerCase()).asBytes(); + byte[] g2 = HashCode.fromString("c9a3f86aae465f0e56513864510f3997561fa2c9e85ea21dc2292309f3cd6022".toLowerCase()).asBytes(); + byte[] g3 = HashCode.fromString("d4b4f5784868c3020403246717ec169ff79e26608ea126a1ab69ee77d1b16712".toLowerCase()).asBytes(); + + PointAccum p1 = Qortal25519Extras.newPointAccum(); + scalarMultBase(s1, p1); + + PointAccum p2 = Qortal25519Extras.newPointAccum(); + scalarMultBase(s2, p2); + + pointAdd(pointCopy(p1), p2); + + byte[] encoded = new byte[POINT_BYTES]; + if (0 == encodePoint(p2, encoded, 0)) + fail("Point encoding failed"); + + System.out.printf("sum: %s%n", HashCode.fromBytes(encoded)); + assertArrayEquals(g3, encoded); + } + + @Test + public void testSimpleSign() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + + byte[] signature = signForAggregation(privateKey, message); + System.out.printf("signature: %s%n", HashCode.fromBytes(signature)); + } + + @Test + public void testSimpleVerify() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + byte[] signature = HashCode.fromString("13e58e88f3df9e06637d2d5bbb814c028e3ba135494530b9d3b120bdb31168d62c70a37ae9cfba816fe6038ee1ce2fb521b95c4a91c7ff0bb1dd2e67733f2b0d".toLowerCase()).asBytes(); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + assertTrue(verifyAggregated(publicKey, signature, message)); + } + + @Test + public void testSimpleSignAndVerify() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + + byte[] signature = signForAggregation(privateKey, message); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + assertTrue(verifyAggregated(publicKey, signature, message)); + } + + @Test + public void testSimpleAggregate() { + List onlineAccounts = generateOnlineAccounts(1); + + byte[] aggregatePublicKey = aggregatePublicKeys(onlineAccounts.stream().map(OnlineAccountData::getPublicKey).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate public key: %s%n", HashCode.fromBytes(aggregatePublicKey)); + + byte[] aggregateSignature = aggregateSignatures(onlineAccounts.stream().map(OnlineAccountData::getSignature).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate signature: %s%n", HashCode.fromBytes(aggregateSignature)); + + OnlineAccountData onlineAccount = onlineAccounts.get(0); + + assertArrayEquals(String.format("expected: %s, actual: %s", HashCode.fromBytes(onlineAccount.getPublicKey()), HashCode.fromBytes(aggregatePublicKey)), onlineAccount.getPublicKey(), aggregatePublicKey); + assertArrayEquals(String.format("expected: %s, actual: %s", HashCode.fromBytes(onlineAccount.getSignature()), HashCode.fromBytes(aggregateSignature)), onlineAccount.getSignature(), aggregateSignature); + + // This is the crucial test: + long timestamp = onlineAccount.getTimestamp(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + assertTrue(verifyAggregated(aggregatePublicKey, aggregateSignature, timestampBytes)); + } + + @Test + public void testMultipleAggregate() { + List onlineAccounts = generateOnlineAccounts(5000); + + byte[] aggregatePublicKey = aggregatePublicKeys(onlineAccounts.stream().map(OnlineAccountData::getPublicKey).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate public key: %s%n", HashCode.fromBytes(aggregatePublicKey)); + + byte[] aggregateSignature = aggregateSignatures(onlineAccounts.stream().map(OnlineAccountData::getSignature).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate signature: %s%n", HashCode.fromBytes(aggregateSignature)); + + OnlineAccountData onlineAccount = onlineAccounts.get(0); + + // This is the crucial test: + long timestamp = onlineAccount.getTimestamp(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + assertTrue(verifyAggregated(aggregatePublicKey, aggregateSignature, timestampBytes)); + } + + private List generateOnlineAccounts(int numAccounts) { + List onlineAccounts = new ArrayList<>(); + + long timestamp = System.currentTimeMillis(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + + for (int a = 0; a < numAccounts; ++a) { + byte[] privateKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + SECURE_RANDOM.nextBytes(privateKey); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + byte[] signature = signForAggregation(privateKey, timestampBytes); + + onlineAccounts.add(new OnlineAccountData(timestamp, signature, publicKey)); + } + + return onlineAccounts; + } +} diff --git a/src/test/java/org/qortal/test/TransactionReferenceTests.java b/src/test/java/org/qortal/test/TransactionReferenceTests.java new file mode 100644 index 00000000..fdff8b59 --- /dev/null +++ b/src/test/java/org/qortal/test/TransactionReferenceTests.java @@ -0,0 +1,165 @@ +package org.qortal.test; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.account.PrivateKeyAccount; +import org.qortal.data.transaction.PaymentTransactionData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.Common; +import org.qortal.test.common.TransactionUtils; +import org.qortal.test.common.transaction.TestTransaction; +import org.qortal.transaction.Transaction; + +import java.util.Random; + +import static org.junit.Assert.assertEquals; + +public class TransactionReferenceTests extends Common { + + @Before + public void beforeTest() throws DataException { + Common.useDefaultSettings(); + } + + @Test + public void testInvalidRandomReferenceBeforeFeatureTrigger() throws DataException { + Random random = new Random(); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + + byte[] randomPrivateKey = new byte[32]; + random.nextBytes(randomPrivateKey); + PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey); + + // Create payment transaction data + TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L); + + // Set random reference + byte[] randomReference = new byte[64]; + random.nextBytes(randomReference); + paymentTransactionData.setReference(randomReference); + + Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData); + + // Transaction should be invalid due to random reference + Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed(); + assertEquals(Transaction.ValidationResult.INVALID_REFERENCE, validationResult); + } + } + + @Test + public void testValidRandomReferenceAfterFeatureTrigger() throws DataException { + Common.useSettings("test-settings-v2-disable-reference.json"); + Random random = new Random(); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + + byte[] randomPrivateKey = new byte[32]; + random.nextBytes(randomPrivateKey); + PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey); + + // Create payment transaction data + TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L); + + // Set random reference + byte[] randomReference = new byte[64]; + random.nextBytes(randomReference); + paymentTransactionData.setReference(randomReference); + + Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData); + + // Transaction should be valid, even with random reference, because reference checking is now disabled + Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed(); + assertEquals(Transaction.ValidationResult.OK, validationResult); + TransactionUtils.signAndImportValid(repository, paymentTransactionData, alice); + } + } + + @Test + public void testNullReferenceAfterFeatureTrigger() throws DataException { + Common.useSettings("test-settings-v2-disable-reference.json"); + Random random = new Random(); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + + byte[] randomPrivateKey = new byte[32]; + random.nextBytes(randomPrivateKey); + PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey); + + // Create payment transaction data + TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L); + + // Set null reference + paymentTransactionData.setReference(null); + + Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData); + + // Transaction should be invalid, as we require a non-null reference + Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed(); + assertEquals(Transaction.ValidationResult.INVALID_REFERENCE, validationResult); + } + } + + @Test + public void testShortReferenceAfterFeatureTrigger() throws DataException { + Common.useSettings("test-settings-v2-disable-reference.json"); + Random random = new Random(); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + + byte[] randomPrivateKey = new byte[32]; + random.nextBytes(randomPrivateKey); + PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey); + + // Create payment transaction data + TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L); + + // Set a 1-byte reference + byte[] randomByte = new byte[63]; + random.nextBytes(randomByte); + paymentTransactionData.setReference(randomByte); + + Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData); + + // Transaction should be invalid, as reference isn't long enough + Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed(); + assertEquals(Transaction.ValidationResult.INVALID_REFERENCE, validationResult); + } + } + + @Test + public void testLongReferenceAfterFeatureTrigger() throws DataException { + Common.useSettings("test-settings-v2-disable-reference.json"); + Random random = new Random(); + + try (final Repository repository = RepositoryManager.getRepository()) { + PrivateKeyAccount alice = Common.getTestAccount(repository, "alice"); + + byte[] randomPrivateKey = new byte[32]; + random.nextBytes(randomPrivateKey); + PrivateKeyAccount recipient = new PrivateKeyAccount(repository, randomPrivateKey); + + // Create payment transaction data + TransactionData paymentTransactionData = new PaymentTransactionData(TestTransaction.generateBase(alice), recipient.getAddress(), 100000L); + + // Set a 1-byte reference + byte[] randomByte = new byte[65]; + random.nextBytes(randomByte); + paymentTransactionData.setReference(randomByte); + + Transaction paymentTransaction = Transaction.fromData(repository, paymentTransactionData); + + // Transaction should be invalid, as reference is too long + Transaction.ValidationResult validationResult = paymentTransaction.isValidUnconfirmed(); + assertEquals(Transaction.ValidationResult.INVALID_REFERENCE, validationResult); + } + } + +} diff --git a/src/test/java/org/qortal/test/apps/RewardShareKeys.java b/src/test/java/org/qortal/test/apps/RewardShareKeys.java index e0bfc1cf..5ba1aab4 100644 --- a/src/test/java/org/qortal/test/apps/RewardShareKeys.java +++ b/src/test/java/org/qortal/test/apps/RewardShareKeys.java @@ -6,6 +6,7 @@ import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; import org.qortal.account.PrivateKeyAccount; import org.qortal.account.PublicKeyAccount; +import org.qortal.crypto.Crypto; import org.qortal.utils.Base58; public class RewardShareKeys { @@ -28,7 +29,7 @@ public class RewardShareKeys { PublicKeyAccount recipientAccount = new PublicKeyAccount(null, args.length > 1 ? Base58.decode(args[1]) : minterAccount.getPublicKey()); byte[] rewardSharePrivateKey = minterAccount.getRewardSharePrivateKey(recipientAccount.getPublicKey()); - byte[] rewardSharePublicKey = PrivateKeyAccount.toPublicKey(rewardSharePrivateKey); + byte[] rewardSharePublicKey = Crypto.toPublicKey(rewardSharePrivateKey); System.out.println(String.format("Minter account: %s", minterAccount.getAddress())); System.out.println(String.format("Minter's public key: %s", Base58.encode(minterAccount.getPublicKey()))); diff --git a/src/test/java/org/qortal/test/common/AccountUtils.java b/src/test/java/org/qortal/test/common/AccountUtils.java index 0e7ef020..bda1ae61 100644 --- a/src/test/java/org/qortal/test/common/AccountUtils.java +++ b/src/test/java/org/qortal/test/common/AccountUtils.java @@ -6,6 +6,7 @@ import java.util.HashMap; import java.util.Map; import org.qortal.account.PrivateKeyAccount; +import org.qortal.crypto.Crypto; import org.qortal.data.transaction.BaseTransactionData; import org.qortal.data.transaction.PaymentTransactionData; import org.qortal.data.transaction.RewardShareTransactionData; @@ -45,7 +46,7 @@ public class AccountUtils { long timestamp = repository.getTransactionRepository().fromSignature(reference).getTimestamp() + 1; byte[] rewardSharePrivateKey = mintingAccount.getRewardSharePrivateKey(recipientAccount.getPublicKey()); - byte[] rewardSharePublicKey = PrivateKeyAccount.toPublicKey(rewardSharePrivateKey); + byte[] rewardSharePublicKey = Crypto.toPublicKey(rewardSharePrivateKey); BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, mintingAccount.getPublicKey(), fee, null); TransactionData transactionData = new RewardShareTransactionData(baseTransactionData, recipientAccount.getAddress(), rewardSharePublicKey, sharePercent); diff --git a/src/test/java/org/qortal/test/common/Common.java b/src/test/java/org/qortal/test/common/Common.java index c45fcfd7..cb782343 100644 --- a/src/test/java/org/qortal/test/common/Common.java +++ b/src/test/java/org/qortal/test/common/Common.java @@ -61,6 +61,7 @@ public class Common { public static final String testSettingsFilename = "test-settings-v2.json"; + public static boolean shouldRetainRepositoryAfterTest = false; static { // Load/check settings, which potentially sets up blockchain config, etc. @@ -126,6 +127,7 @@ public class Common { public static void useSettings(String settingsFilename) throws DataException { Common.useSettingsAndDb(settingsFilename, true); + setShouldRetainRepositoryAfterTest(false); } public static void useDefaultSettings() throws DataException { @@ -207,7 +209,16 @@ public class Common { RepositoryManager.setRepositoryFactory(repositoryFactory); } + public static void setShouldRetainRepositoryAfterTest(boolean shouldRetain) { + shouldRetainRepositoryAfterTest = shouldRetain; + } + public static void deleteTestRepository() throws DataException { + if (shouldRetainRepositoryAfterTest) { + // Don't delete if we've requested to keep the db intact + return; + } + // Delete repository directory if exists Path repositoryPath = Paths.get(Settings.getInstance().getRepositoryPath()); try { diff --git a/src/test/java/org/qortal/test/minting/BlockTimestampTests.java b/src/test/java/org/qortal/test/minting/BlockTimestampTests.java new file mode 100644 index 00000000..0f91408f --- /dev/null +++ b/src/test/java/org/qortal/test/minting/BlockTimestampTests.java @@ -0,0 +1,63 @@ +package org.qortal.test.minting; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.block.Block; +import org.qortal.data.block.BlockData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.BlockUtils; +import org.qortal.test.common.Common; +import org.qortal.transform.Transformer; +import org.qortal.utils.NTP; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +public class BlockTimestampTests extends Common { + + private static class BlockTimestampDataPoint { + public byte[] minterPublicKey; + public int minterAccountLevel; + public long blockTimestamp; + } + + private static final Random RANDOM = new Random(); + + @Before + public void beforeTest() throws DataException { + Common.useSettings("test-settings-v2-block-timestamps.json"); + NTP.setFixedOffset(0L); + } + + @Test + public void testTimestamps() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + Block parentBlock = BlockUtils.mintBlock(repository); + BlockData parentBlockData = parentBlock.getBlockData(); + + // Generate lots of test minters + List dataPoints = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + BlockTimestampDataPoint dataPoint = new BlockTimestampDataPoint(); + + dataPoint.minterPublicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + RANDOM.nextBytes(dataPoint.minterPublicKey); + + dataPoint.minterAccountLevel = RANDOM.nextInt(5) + 5; + + dataPoint.blockTimestamp = Block.calcTimestamp(parentBlockData, dataPoint.minterPublicKey, dataPoint.minterAccountLevel); + + System.out.printf("[%d] level %d, blockTimestamp %d - parentTimestamp %d = %d%n", + i, + dataPoint.minterAccountLevel, + dataPoint.blockTimestamp, + parentBlockData.getTimestamp(), + dataPoint.blockTimestamp - parentBlockData.getTimestamp() + ); + } + } + } +} diff --git a/src/test/java/org/qortal/test/naming/IntegrityTests.java b/src/test/java/org/qortal/test/naming/IntegrityTests.java index d08c8558..d52d4983 100644 --- a/src/test/java/org/qortal/test/naming/IntegrityTests.java +++ b/src/test/java/org/qortal/test/naming/IntegrityTests.java @@ -509,6 +509,7 @@ public class IntegrityTests extends Common { @Ignore("Checks 'live' repository") @Test public void testRepository() throws DataException { + Common.setShouldRetainRepositoryAfterTest(true); Settings.fileInstance("settings.json"); // use 'live' settings String repositoryUrlTemplate = "jdbc:hsqldb:file:%s" + File.separator + "blockchain;create=false;hsqldb.full_log_replay=true"; diff --git a/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java b/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java new file mode 100644 index 00000000..6136c1e1 --- /dev/null +++ b/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java @@ -0,0 +1,210 @@ +package org.qortal.test.network; + +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; +import org.junit.Ignore; +import org.junit.Test; +import org.qortal.controller.OnlineAccountsManager; +import org.qortal.data.network.OnlineAccountData; +import org.qortal.network.message.*; +import org.qortal.transform.Transformer; + +import java.nio.ByteBuffer; +import java.security.Security; +import java.util.*; + +import static org.junit.Assert.*; + +public class OnlineAccountsV3Tests { + + private static final Random RANDOM = new Random(); + static { + // This must go before any calls to LogManager/Logger + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); + + Security.insertProviderAt(new BouncyCastleProvider(), 0); + Security.insertProviderAt(new BouncyCastleJsseProvider(), 1); + } + + @Ignore("For informational use") + @Test + public void compareV2ToV3() throws MessageException { + List onlineAccounts = generateOnlineAccounts(false); + + // How many of each timestamp and leading byte (of public key) + Map> hashesByTimestampThenByte = convertToHashMaps(onlineAccounts); + + byte[] v3DataBytes = new GetOnlineAccountsV3Message(hashesByTimestampThenByte).toBytes(); + int v3ByteSize = v3DataBytes.length; + + byte[] v2DataBytes = new GetOnlineAccountsV2Message(onlineAccounts).toBytes(); + int v2ByteSize = v2DataBytes.length; + + int numTimestamps = hashesByTimestampThenByte.size(); + System.out.printf("For %d accounts split across %d timestamp%s: V2 size %d vs V3 size %d%n", + onlineAccounts.size(), + numTimestamps, + numTimestamps != 1 ? "s" : "", + v2ByteSize, + v3ByteSize + ); + + for (var outerMapEntry : hashesByTimestampThenByte.entrySet()) { + long timestamp = outerMapEntry.getKey(); + + var innerMap = outerMapEntry.getValue(); + + System.out.printf("For timestamp %d: %d / 256 slots used.%n", + timestamp, + innerMap.size() + ); + } + } + + private Map> convertToHashMaps(List onlineAccounts) { + // How many of each timestamp and leading byte (of public key) + Map> hashesByTimestampThenByte = new HashMap<>(); + + for (OnlineAccountData onlineAccountData : onlineAccounts) { + Long timestamp = onlineAccountData.getTimestamp(); + Byte leadingByte = onlineAccountData.getPublicKey()[0]; + + hashesByTimestampThenByte + .computeIfAbsent(timestamp, k -> new HashMap<>()) + .compute(leadingByte, (k, v) -> OnlineAccountsManager.xorByteArrayInPlace(v, onlineAccountData.getPublicKey())); + } + + return hashesByTimestampThenByte; + } + + @Test + public void testOnGetOnlineAccountsV3() { + List ourOnlineAccounts = generateOnlineAccounts(false); + List peersOnlineAccounts = generateOnlineAccounts(false); + + Map> ourConvertedHashes = convertToHashMaps(ourOnlineAccounts); + Map> peersConvertedHashes = convertToHashMaps(peersOnlineAccounts); + + List mockReply = new ArrayList<>(); + + // Warning: no double-checking/fetching - we must be ConcurrentMap compatible! + // So no contains()-then-get() or multiple get()s on the same key/map. + for (var ourOuterMapEntry : ourConvertedHashes.entrySet()) { + Long timestamp = ourOuterMapEntry.getKey(); + + var ourInnerMap = ourOuterMapEntry.getValue(); + var peersInnerMap = peersConvertedHashes.get(timestamp); + + if (peersInnerMap == null) { + // Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours + for (Byte leadingByte : ourInnerMap.keySet()) + mockReply.add(timestamp + ":" + leadingByte); + } else { + // We have entries for this timestamp so compare against peer's entries + for (var ourInnerMapEntry : ourInnerMap.entrySet()) { + Byte leadingByte = ourInnerMapEntry.getKey(); + byte[] peersHash = peersInnerMap.get(leadingByte); + + if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) { + // We don't match peer, or peer doesn't have - send all online accounts for this timestamp and leading byte + mockReply.add(timestamp + ":" + leadingByte); + } + } + } + } + + int numOurTimestamps = ourConvertedHashes.size(); + System.out.printf("We have %d accounts split across %d timestamp%s%n", + ourOnlineAccounts.size(), + numOurTimestamps, + numOurTimestamps != 1 ? "s" : "" + ); + + int numPeerTimestamps = peersConvertedHashes.size(); + System.out.printf("Peer sent %d accounts split across %d timestamp%s%n", + peersOnlineAccounts.size(), + numPeerTimestamps, + numPeerTimestamps != 1 ? "s" : "" + ); + + System.out.printf("We need to send: %d%n%s%n", mockReply.size(), String.join(", ", mockReply)); + } + + @Test + public void testSerialization() throws MessageException { + List onlineAccountsOut = generateOnlineAccounts(true); + Map> hashesByTimestampThenByteOut = convertToHashMaps(onlineAccountsOut); + + validateSerialization(hashesByTimestampThenByteOut); + } + + @Test + public void testEmptySerialization() throws MessageException { + Map> hashesByTimestampThenByteOut = Collections.emptyMap(); + validateSerialization(hashesByTimestampThenByteOut); + + hashesByTimestampThenByteOut = new HashMap<>(); + validateSerialization(hashesByTimestampThenByteOut); + } + + private void validateSerialization(Map> hashesByTimestampThenByteOut) throws MessageException { + Message messageOut = new GetOnlineAccountsV3Message(hashesByTimestampThenByteOut); + byte[] messageBytes = messageOut.toBytes(); + + ByteBuffer byteBuffer = ByteBuffer.wrap(messageBytes).asReadOnlyBuffer(); + + GetOnlineAccountsV3Message messageIn = (GetOnlineAccountsV3Message) Message.fromByteBuffer(byteBuffer); + + Map> hashesByTimestampThenByteIn = messageIn.getHashesByTimestampThenByte(); + + Set timestampsIn = hashesByTimestampThenByteIn.keySet(); + Set timestampsOut = hashesByTimestampThenByteOut.keySet(); + assertEquals("timestamp count mismatch", timestampsOut.size(), timestampsIn.size()); + assertTrue("timestamps mismatch", timestampsIn.containsAll(timestampsOut)); + + for (Long timestamp : timestampsIn) { + Map hashesByByteIn = hashesByTimestampThenByteIn.get(timestamp); + Map hashesByByteOut = hashesByTimestampThenByteOut.get(timestamp); + assertNotNull("timestamp entry missing", hashesByByteOut); + + Set leadingBytesIn = hashesByByteIn.keySet(); + Set leadingBytesOut = hashesByByteOut.keySet(); + assertEquals("leading byte entry count mismatch", leadingBytesOut.size(), leadingBytesIn.size()); + assertTrue("leading byte entry mismatch", leadingBytesIn.containsAll(leadingBytesOut)); + + for (Byte leadingByte : leadingBytesOut) { + byte[] bytesIn = hashesByByteIn.get(leadingByte); + byte[] bytesOut = hashesByByteOut.get(leadingByte); + + assertTrue("pubkey hash mismatch", Arrays.equals(bytesOut, bytesIn)); + } + } + } + + private List generateOnlineAccounts(boolean withSignatures) { + List onlineAccounts = new ArrayList<>(); + + int numTimestamps = RANDOM.nextInt(2) + 1; // 1 or 2 + + for (int t = 0; t < numTimestamps; ++t) { + long timestamp = 1 << 31 + (t + 1) << 12; + int numAccounts = RANDOM.nextInt(3000); + + for (int a = 0; a < numAccounts; ++a) { + byte[] sig = null; + if (withSignatures) { + sig = new byte[Transformer.SIGNATURE_LENGTH]; + RANDOM.nextBytes(sig); + } + + byte[] pubkey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + RANDOM.nextBytes(pubkey); + + onlineAccounts.add(new OnlineAccountData(timestamp, sig, pubkey)); + } + } + + return onlineAccounts; + } + +} diff --git a/src/test/resources/test-chain-v2-block-timestamps.json b/src/test/resources/test-chain-v2-block-timestamps.json new file mode 100644 index 00000000..3fa8a1e5 --- /dev/null +++ b/src/test/resources/test-chain-v2-block-timestamps.json @@ -0,0 +1,86 @@ +{ + "isTestChain": true, + "blockTimestampMargin": 500, + "transactionExpiryPeriod": 86400000, + "maxBlockSize": 2097152, + "maxBytesPerUnitFee": 1024, + "unitFee": "0.1", + "nameRegistrationUnitFees": [ + { "timestamp": 1645372800000, "fee": "5" } + ], + "requireGroupForApproval": false, + "minAccountLevelToRewardShare": 5, + "maxRewardSharesPerMintingAccount": 20, + "founderEffectiveMintingLevel": 10, + "onlineAccountSignaturesMinLifetime": 3600000, + "onlineAccountSignaturesMaxLifetime": 86400000, + "rewardsByHeight": [ + { "height": 1, "reward": 100 }, + { "height": 11, "reward": 10 }, + { "height": 21, "reward": 1 } + ], + "sharesByLevel": [ + { "levels": [ 1, 2 ], "share": 0.05 }, + { "levels": [ 3, 4 ], "share": 0.10 }, + { "levels": [ 5, 6 ], "share": 0.15 }, + { "levels": [ 7, 8 ], "share": 0.20 }, + { "levels": [ 9, 10 ], "share": 0.25 } + ], + "qoraHoldersShare": 0.20, + "qoraPerQortReward": 250, + "blocksNeededByLevel": [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 ], + "blockTimingsByHeight": [ + { "height": 1, "target": 60000, "deviation": 30000, "power": 0.2 }, + { "height": 2, "target": 70000, "deviation": 10000, "power": 0.8 } + ], + "ciyamAtSettings": { + "feePerStep": "0.0001", + "maxStepsPerRound": 500, + "stepsPerFunctionCall": 10, + "minutesPerBlock": 1 + }, + "featureTriggers": { + "messageHeight": 0, + "atHeight": 0, + "assetsTimestamp": 0, + "votingTimestamp": 0, + "arbitraryTimestamp": 0, + "powfixTimestamp": 0, + "qortalTimestamp": 0, + "newAssetPricingTimestamp": 0, + "groupApprovalTimestamp": 0, + "atFindNextTransactionFix": 0, + "newBlockSigHeight": 999999, + "shareBinFix": 999999, + "calcChainWeightTimestamp": 0, + "transactionV5Timestamp": 0, + "transactionV6Timestamp": 9999999999999, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 + }, + "genesisInfo": { + "version": 4, + "timestamp": 0, + "transactions": [ + { "type": "ISSUE_ASSET", "assetName": "QORT", "description": "QORT native coin", "data": "", "quantity": 0, "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "assetName": "Legacy-QORA", "description": "Representative legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true }, + { "type": "ISSUE_ASSET", "assetName": "QORT-from-QORA", "description": "QORT gained from holding legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true }, + + { "type": "GENESIS", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "amount": "1000000000" }, + { "type": "GENESIS", "recipient": "QixPbJUwsaHsVEofJdozU9zgVqkK6aYhrK", "amount": "1000000" }, + { "type": "GENESIS", "recipient": "QaUpHNhT3Ygx6avRiKobuLdusppR5biXjL", "amount": "1000000" }, + { "type": "GENESIS", "recipient": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "amount": "1000000" }, + + { "type": "CREATE_GROUP", "creatorPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "groupName": "dev-group", "description": "developer group", "isOpen": false, "approvalThreshold": "PCT100", "minimumBlockDelay": 0, "maximumBlockDelay": 1440 }, + + { "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "TEST", "description": "test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "issuerPublicKey": "C6wuddsBV3HzRrXUtezE7P5MoRXp5m3mEDokRDGZB6ry", "assetName": "OTHER", "description": "other test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "GOLD", "description": "gold test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + + { "type": "ACCOUNT_FLAGS", "target": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "andMask": -1, "orMask": 1, "xorMask": 0 }, + { "type": "REWARD_SHARE", "minterPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "rewardSharePublicKey": "7PpfnvLSG7y4HPh8hE7KoqAjLCkv7Ui6xw4mKAkbZtox", "sharePercent": "100" }, + + { "type": "ACCOUNT_LEVEL", "target": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "level": 5 } + ] + } +} diff --git a/src/test/resources/test-chain-v2-disable-reference.json b/src/test/resources/test-chain-v2-disable-reference.json new file mode 100644 index 00000000..64aecbb6 --- /dev/null +++ b/src/test/resources/test-chain-v2-disable-reference.json @@ -0,0 +1,85 @@ +{ + "isTestChain": true, + "blockTimestampMargin": 500, + "transactionExpiryPeriod": 86400000, + "maxBlockSize": 2097152, + "maxBytesPerUnitFee": 1024, + "unitFee": "0.1", + "nameRegistrationUnitFees": [ + { "timestamp": 1645372800000, "fee": "5" } + ], + "requireGroupForApproval": false, + "minAccountLevelToRewardShare": 5, + "maxRewardSharesPerMintingAccount": 20, + "founderEffectiveMintingLevel": 10, + "onlineAccountSignaturesMinLifetime": 3600000, + "onlineAccountSignaturesMaxLifetime": 86400000, + "rewardsByHeight": [ + { "height": 1, "reward": 100 }, + { "height": 11, "reward": 10 }, + { "height": 21, "reward": 1 } + ], + "sharesByLevel": [ + { "levels": [ 1, 2 ], "share": 0.05 }, + { "levels": [ 3, 4 ], "share": 0.10 }, + { "levels": [ 5, 6 ], "share": 0.15 }, + { "levels": [ 7, 8 ], "share": 0.20 }, + { "levels": [ 9, 10 ], "share": 0.25 } + ], + "qoraHoldersShare": 0.20, + "qoraPerQortReward": 250, + "blocksNeededByLevel": [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 ], + "blockTimingsByHeight": [ + { "height": 1, "target": 60000, "deviation": 30000, "power": 0.2 } + ], + "ciyamAtSettings": { + "feePerStep": "0.0001", + "maxStepsPerRound": 500, + "stepsPerFunctionCall": 10, + "minutesPerBlock": 1 + }, + "featureTriggers": { + "messageHeight": 0, + "atHeight": 0, + "assetsTimestamp": 0, + "votingTimestamp": 0, + "arbitraryTimestamp": 0, + "powfixTimestamp": 0, + "qortalTimestamp": 0, + "newAssetPricingTimestamp": 0, + "groupApprovalTimestamp": 0, + "atFindNextTransactionFix": 0, + "newBlockSigHeight": 999999, + "shareBinFix": 999999, + "calcChainWeightTimestamp": 0, + "transactionV5Timestamp": 0, + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 0, + "aggregateSignatureTimestamp": 0 + }, + "genesisInfo": { + "version": 4, + "timestamp": 0, + "transactions": [ + { "type": "ISSUE_ASSET", "assetName": "QORT", "description": "QORT native coin", "data": "", "quantity": 0, "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "assetName": "Legacy-QORA", "description": "Representative legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true }, + { "type": "ISSUE_ASSET", "assetName": "QORT-from-QORA", "description": "QORT gained from holding legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true }, + + { "type": "GENESIS", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "amount": "1000000000" }, + { "type": "GENESIS", "recipient": "QixPbJUwsaHsVEofJdozU9zgVqkK6aYhrK", "amount": "1000000" }, + { "type": "GENESIS", "recipient": "QaUpHNhT3Ygx6avRiKobuLdusppR5biXjL", "amount": "1000000" }, + { "type": "GENESIS", "recipient": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "amount": "1000000" }, + + { "type": "CREATE_GROUP", "creatorPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "groupName": "dev-group", "description": "developer group", "isOpen": false, "approvalThreshold": "PCT100", "minimumBlockDelay": 0, "maximumBlockDelay": 1440 }, + + { "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "TEST", "description": "test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "issuerPublicKey": "C6wuddsBV3HzRrXUtezE7P5MoRXp5m3mEDokRDGZB6ry", "assetName": "OTHER", "description": "other test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "GOLD", "description": "gold test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + + { "type": "ACCOUNT_FLAGS", "target": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "andMask": -1, "orMask": 1, "xorMask": 0 }, + { "type": "REWARD_SHARE", "minterPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "rewardSharePublicKey": "7PpfnvLSG7y4HPh8hE7KoqAjLCkv7Ui6xw4mKAkbZtox", "sharePercent": "100" }, + + { "type": "ACCOUNT_LEVEL", "target": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "level": 5 } + ] + } +} diff --git a/src/test/resources/test-chain-v2-founder-rewards.json b/src/test/resources/test-chain-v2-founder-rewards.json index 2f8c2832..30b1ffdd 100644 --- a/src/test/resources/test-chain-v2-founder-rewards.json +++ b/src/test/resources/test-chain-v2-founder-rewards.json @@ -54,7 +54,9 @@ "shareBinFix": 999999, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-leftover-reward.json b/src/test/resources/test-chain-v2-leftover-reward.json index 91c16b22..bb94231a 100644 --- a/src/test/resources/test-chain-v2-leftover-reward.json +++ b/src/test/resources/test-chain-v2-leftover-reward.json @@ -54,7 +54,9 @@ "shareBinFix": 999999, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-minting.json b/src/test/resources/test-chain-v2-minting.json index 565763f9..4c1c51b9 100644 --- a/src/test/resources/test-chain-v2-minting.json +++ b/src/test/resources/test-chain-v2-minting.json @@ -54,7 +54,9 @@ "shareBinFix": 999999, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-qora-holder-extremes.json b/src/test/resources/test-chain-v2-qora-holder-extremes.json index 3f694399..cfeac824 100644 --- a/src/test/resources/test-chain-v2-qora-holder-extremes.json +++ b/src/test/resources/test-chain-v2-qora-holder-extremes.json @@ -54,7 +54,9 @@ "shareBinFix": 999999, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-qora-holder.json b/src/test/resources/test-chain-v2-qora-holder.json index f92171c8..71d05767 100644 --- a/src/test/resources/test-chain-v2-qora-holder.json +++ b/src/test/resources/test-chain-v2-qora-holder.json @@ -54,7 +54,9 @@ "shareBinFix": 999999, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-reward-levels.json b/src/test/resources/test-chain-v2-reward-levels.json index 958e689b..eda61338 100644 --- a/src/test/resources/test-chain-v2-reward-levels.json +++ b/src/test/resources/test-chain-v2-reward-levels.json @@ -54,7 +54,9 @@ "shareBinFix": 6, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2-reward-scaling.json b/src/test/resources/test-chain-v2-reward-scaling.json index ab031a17..dafd3a51 100644 --- a/src/test/resources/test-chain-v2-reward-scaling.json +++ b/src/test/resources/test-chain-v2-reward-scaling.json @@ -54,7 +54,9 @@ "shareBinFix": 999999, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-chain-v2.json b/src/test/resources/test-chain-v2.json index d466936e..114cf91e 100644 --- a/src/test/resources/test-chain-v2.json +++ b/src/test/resources/test-chain-v2.json @@ -54,7 +54,9 @@ "shareBinFix": 999999, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "disableReferenceTimestamp": 9999999999999, + "aggregateSignatureTimestamp": 0 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-settings-v2-block-timestamps.json b/src/test/resources/test-settings-v2-block-timestamps.json new file mode 100644 index 00000000..dbbbebbe --- /dev/null +++ b/src/test/resources/test-settings-v2-block-timestamps.json @@ -0,0 +1,19 @@ +{ + "repositoryPath": "testdb", + "bitcoinNet": "TEST3", + "litecoinNet": "TEST3", + "restrictedApi": false, + "blockchainConfig": "src/test/resources/test-chain-v2-block-timestamps.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, + "wipeUnconfirmedOnStart": false, + "testNtpOffset": 0, + "minPeers": 0, + "pruneBlockLimit": 100, + "bootstrapFilenamePrefix": "test-", + "dataPath": "data-test", + "tempDataPath": "data-test/_temp", + "listsPath": "lists-test", + "storagePolicy": "FOLLOWED_OR_VIEWED", + "maxStorageCapacity": 104857600 +} diff --git a/src/test/resources/test-settings-v2-disable-reference.json b/src/test/resources/test-settings-v2-disable-reference.json new file mode 100644 index 00000000..d9607c83 --- /dev/null +++ b/src/test/resources/test-settings-v2-disable-reference.json @@ -0,0 +1,11 @@ +{ + "repositoryPath": "testdb", + "restrictedApi": false, + "blockchainConfig": "src/test/resources/test-chain-v2-disable-reference.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, + "wipeUnconfirmedOnStart": false, + "testNtpOffset": 0, + "minPeers": 0, + "pruneBlockLimit": 100 +}