diff --git a/src/main/java/org/qortal/account/PrivateKeyAccount.java b/src/main/java/org/qortal/account/PrivateKeyAccount.java index 3b370d12..4b646b4a 100644 --- a/src/main/java/org/qortal/account/PrivateKeyAccount.java +++ b/src/main/java/org/qortal/account/PrivateKeyAccount.java @@ -11,15 +11,15 @@ public class PrivateKeyAccount extends PublicKeyAccount { private final Ed25519PrivateKeyParameters edPrivateKeyParams; /** - * Create PrivateKeyAccount using byte[32] seed. + * Create PrivateKeyAccount using byte[32] private key. * - * @param seed + * @param privateKey * byte[32] used to create private/public key pair * @throws IllegalArgumentException - * if passed invalid seed + * if passed invalid privateKey */ - public PrivateKeyAccount(Repository repository, byte[] seed) { - this(repository, new Ed25519PrivateKeyParameters(seed, 0)); + public PrivateKeyAccount(Repository repository, byte[] privateKey) { + this(repository, new Ed25519PrivateKeyParameters(privateKey, 0)); } private PrivateKeyAccount(Repository repository, Ed25519PrivateKeyParameters edPrivateKeyParams) { @@ -37,10 +37,6 @@ public class PrivateKeyAccount extends PublicKeyAccount { return this.privateKey; } - public static byte[] toPublicKey(byte[] seed) { - return new Ed25519PrivateKeyParameters(seed, 0).generatePublicKey().getEncoded(); - } - public byte[] sign(byte[] message) { return Crypto.sign(this.edPrivateKeyParams, message); } diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index 5fe005d6..ddfe247a 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -10,6 +10,7 @@ import java.math.BigInteger; import java.math.RoundingMode; import java.nio.charset.StandardCharsets; import java.text.DecimalFormat; +import java.text.MessageFormat; import java.text.NumberFormat; import java.util.*; import java.util.stream.Collectors; @@ -27,6 +28,7 @@ import org.qortal.block.BlockChain.BlockTimingByHeight; import org.qortal.block.BlockChain.AccountLevelShareBin; import org.qortal.controller.OnlineAccountsManager; import org.qortal.crypto.Crypto; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.data.account.AccountBalanceData; import org.qortal.data.account.AccountData; import org.qortal.data.account.EligibleQoraHolderData; @@ -221,11 +223,10 @@ public class Block { return accountAmount; } } + /** Always use getExpandedAccounts() to access this, as it's lazy-instantiated. */ private List cachedExpandedAccounts = null; - /** Opportunistic cache of this block's valid online accounts. Only created by call to isValid(). */ - private List cachedValidOnlineAccounts = null; /** Opportunistic cache of this block's valid online reward-shares. Only created by call to isValid(). */ private List cachedOnlineRewardShares = null; @@ -347,18 +348,21 @@ public class Block { int version = parentBlock.getNextBlockVersion(); byte[] reference = parentBlockData.getSignature(); - // Fetch our list of online accounts - List onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(); - if (onlineAccounts.isEmpty()) { - LOGGER.error("No online accounts - not even our own?"); + // Qortal: minter is always a reward-share, so find actual minter and get their effective minting level + int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey()); + if (minterLevel == 0) { + LOGGER.error("Minter effective level returned zero?"); return null; } - // Find newest online accounts timestamp - long onlineAccountsTimestamp = 0; - for (OnlineAccountData onlineAccountData : onlineAccounts) { - if (onlineAccountData.getTimestamp() > onlineAccountsTimestamp) - onlineAccountsTimestamp = onlineAccountData.getTimestamp(); + long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel); + long onlineAccountsTimestamp = OnlineAccountsManager.getCurrentOnlineAccountTimestamp(); + + // Fetch our list of online accounts + List onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(onlineAccountsTimestamp); + if (onlineAccounts.isEmpty()) { + LOGGER.error("No online accounts - not even our own?"); + return null; } // Load sorted list of reward share public keys into memory, so that the indexes can be obtained. @@ -369,10 +373,6 @@ public class Block { // Map using index into sorted list of reward-shares as key Map indexedOnlineAccounts = new HashMap<>(); for (OnlineAccountData onlineAccountData : onlineAccounts) { - // Disregard online accounts with different timestamps - if (onlineAccountData.getTimestamp() != onlineAccountsTimestamp) - continue; - Integer accountIndex = getRewardShareIndex(onlineAccountData.getPublicKey(), allRewardSharePublicKeys); if (accountIndex == null) // Online account (reward-share) with current timestamp but reward-share cancelled @@ -389,26 +389,29 @@ public class Block { byte[] encodedOnlineAccounts = BlockTransformer.encodeOnlineAccounts(onlineAccountsSet); int onlineAccountsCount = onlineAccountsSet.size(); - // Concatenate online account timestamp signatures (in correct order) - byte[] onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH]; - for (int i = 0; i < onlineAccountsCount; ++i) { - Integer accountIndex = accountIndexes.get(i); - OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex); - System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH); + byte[] onlineAccountsSignatures; + if (timestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // Collate all signatures + Collection signaturesToAggregate = indexedOnlineAccounts.values() + .stream() + .map(OnlineAccountData::getSignature) + .collect(Collectors.toList()); + + // Aggregated, single signature + onlineAccountsSignatures = Qortal25519Extras.aggregateSignatures(signaturesToAggregate); + } else { + // Concatenate online account timestamp signatures (in correct order) + onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH]; + for (int i = 0; i < onlineAccountsCount; ++i) { + Integer accountIndex = accountIndexes.get(i); + OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex); + System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH); + } } byte[] minterSignature = minter.sign(BlockTransformer.getBytesForMinterSignature(parentBlockData, minter.getPublicKey(), encodedOnlineAccounts)); - // Qortal: minter is always a reward-share, so find actual minter and get their effective minting level - int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey()); - if (minterLevel == 0) { - LOGGER.error("Minter effective level returned zero?"); - return null; - } - - long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel); - int transactionCount = 0; byte[] transactionsSignature = null; int height = parentBlockData.getHeight() + 1; @@ -1013,49 +1016,59 @@ public class Block { if (this.blockData.getOnlineAccountsSignatures() == null || this.blockData.getOnlineAccountsSignatures().length == 0) return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MISSING; - if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH) - return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + if (this.blockData.getTimestamp() >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // We expect just the one, aggregated signature + if (this.blockData.getOnlineAccountsSignatures().length != Transformer.SIGNATURE_LENGTH) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + } else { + if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + } // Check signatures long onlineTimestamp = this.blockData.getOnlineAccountsTimestamp(); byte[] onlineTimestampBytes = Longs.toByteArray(onlineTimestamp); - // If this block is much older than current online timestamp, then there's no point checking current online accounts - List currentOnlineAccounts = onlineTimestamp < NTP.getTime() - OnlineAccountsManager.ONLINE_TIMESTAMP_MODULUS - ? null - : OnlineAccountsManager.getInstance().getOnlineAccounts(); - List latestBlocksOnlineAccounts = OnlineAccountsManager.getInstance().getLatestBlocksOnlineAccounts(); - - // Extract online accounts' timestamp signatures from block data + // Extract online accounts' timestamp signatures from block data. Only one signature if aggregated. List onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(this.blockData.getOnlineAccountsSignatures()); - // We'll build up a list of online accounts to hand over to Controller if block is added to chain - // and this will become latestBlocksOnlineAccounts (above) to reduce CPU load when we process next block... - List ourOnlineAccounts = new ArrayList<>(); + if (this.blockData.getTimestamp() >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // Aggregate all public keys + Collection publicKeys = onlineRewardShares.stream() + .map(RewardShareData::getRewardSharePublicKey) + .collect(Collectors.toList()); - for (int i = 0; i < onlineAccountsSignatures.size(); ++i) { - byte[] signature = onlineAccountsSignatures.get(i); - byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey(); + byte[] aggregatePublicKey = Qortal25519Extras.aggregatePublicKeys(publicKeys); - OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey); - ourOnlineAccounts.add(onlineAccountData); + byte[] aggregateSignature = onlineAccountsSignatures.get(0); - // If signature is still current then no need to perform Ed25519 verify - if (currentOnlineAccounts != null && currentOnlineAccounts.remove(onlineAccountData)) - // remove() returned true, so online account still current - // and one less entry in currentOnlineAccounts to check next time - continue; - - // If signature was okay in latest block then no need to perform Ed25519 verify - if (latestBlocksOnlineAccounts != null && latestBlocksOnlineAccounts.contains(onlineAccountData)) - continue; - - if (!Crypto.verify(publicKey, signature, onlineTimestampBytes)) + // One-step verification of aggregate signature using aggregate public key + if (!Qortal25519Extras.verifyAggregated(aggregatePublicKey, aggregateSignature, onlineTimestampBytes)) return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT; + } else { + // Build block's view of online accounts + Set onlineAccounts = new HashSet<>(); + for (int i = 0; i < onlineAccountsSignatures.size(); ++i) { + byte[] signature = onlineAccountsSignatures.get(i); + byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey(); + + OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey); + onlineAccounts.add(onlineAccountData); + } + + // Remove those already validated & cached by online accounts manager - no need to re-validate them + OnlineAccountsManager.getInstance().removeKnown(onlineAccounts, onlineTimestamp); + + // Validate the rest + for (OnlineAccountData onlineAccount : onlineAccounts) + if (!Crypto.verify(onlineAccount.getPublicKey(), onlineAccount.getSignature(), onlineTimestampBytes)) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT; + + // We've validated these, so allow online accounts manager to cache + OnlineAccountsManager.getInstance().addBlocksOnlineAccounts(onlineAccounts, onlineTimestamp); } // All online accounts valid, so save our list of online accounts for potential later use - this.cachedValidOnlineAccounts = ourOnlineAccounts; this.cachedOnlineRewardShares = onlineRewardShares; return ValidationResult.OK; @@ -1426,9 +1439,6 @@ public class Block { postBlockTidy(); - // Give Controller our cached, valid online accounts data (if any) to help reduce CPU load for next block - OnlineAccountsManager.getInstance().pushLatestBlocksOnlineAccounts(this.cachedValidOnlineAccounts); - // Log some debugging info relating to the block weight calculation this.logDebugInfo(); } @@ -1644,9 +1654,6 @@ public class Block { this.blockData.setHeight(null); postBlockTidy(); - - // Remove any cached, valid online accounts data from Controller - OnlineAccountsManager.getInstance().popLatestBlocksOnlineAccounts(); } protected void orphanTransactionsFromBlock() throws DataException { diff --git a/src/main/java/org/qortal/block/BlockChain.java b/src/main/java/org/qortal/block/BlockChain.java index 76815bdf..e0e64e24 100644 --- a/src/main/java/org/qortal/block/BlockChain.java +++ b/src/main/java/org/qortal/block/BlockChain.java @@ -71,7 +71,8 @@ public class BlockChain { calcChainWeightTimestamp, transactionV5Timestamp, transactionV6Timestamp, - disableReferenceTimestamp + disableReferenceTimestamp, + aggregateSignatureTimestamp; } // Custom transaction fees @@ -415,6 +416,10 @@ public class BlockChain { return this.featureTriggers.get(FeatureTrigger.disableReferenceTimestamp.name()).longValue(); } + public long getAggregateSignatureTimestamp() { + return this.featureTriggers.get(FeatureTrigger.aggregateSignatureTimestamp.name()).longValue(); + } + // More complex getters for aspects that change by height or timestamp public long getRewardAtHeight(int ourHeight) { diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java index 9966d6a9..c77bc579 100644 --- a/src/main/java/org/qortal/controller/BlockMinter.java +++ b/src/main/java/org/qortal/controller/BlockMinter.java @@ -65,9 +65,8 @@ public class BlockMinter extends Thread { // Lite nodes do not mint return; } - - try (final Repository repository = RepositoryManager.getRepository()) { - if (Settings.getInstance().getWipeUnconfirmedOnStart()) { + if (Settings.getInstance().getWipeUnconfirmedOnStart()) { + try (final Repository repository = RepositoryManager.getRepository()) { // Wipe existing unconfirmed transactions List unconfirmedTransactions = repository.getTransactionRepository().getUnconfirmedTransactions(); @@ -77,30 +76,31 @@ public class BlockMinter extends Thread { } repository.saveChanges(); + } catch (DataException e) { + LOGGER.warn("Repository issue trying to wipe unconfirmed transactions on start-up: {}", e.getMessage()); + // Fall-through to normal behaviour in case we can recover } + } - // Going to need this a lot... - BlockRepository blockRepository = repository.getBlockRepository(); - BlockData previousBlockData = null; + BlockData previousBlockData = null; - // Vars to keep track of blocks that were skipped due to chain weight - byte[] parentSignatureForLastLowWeightBlock = null; - Long timeOfLastLowWeightBlock = null; + // Vars to keep track of blocks that were skipped due to chain weight + byte[] parentSignatureForLastLowWeightBlock = null; + Long timeOfLastLowWeightBlock = null; - List newBlocks = new ArrayList<>(); + List newBlocks = new ArrayList<>(); - // Flags for tracking change in whether minting is possible, - // so we can notify Controller, and further update SysTray, etc. - boolean isMintingPossible = false; - boolean wasMintingPossible = isMintingPossible; - while (running) { - repository.discardChanges(); // Free repository locks, if any + // Flags for tracking change in whether minting is possible, + // so we can notify Controller, and further update SysTray, etc. + boolean isMintingPossible = false; + boolean wasMintingPossible = isMintingPossible; + while (running) { + if (isMintingPossible != wasMintingPossible) + Controller.getInstance().onMintingPossibleChange(isMintingPossible); - if (isMintingPossible != wasMintingPossible) - Controller.getInstance().onMintingPossibleChange(isMintingPossible); - - wasMintingPossible = isMintingPossible; + wasMintingPossible = isMintingPossible; + try { // Sleep for a while Thread.sleep(1000); @@ -114,319 +114,338 @@ public class BlockMinter extends Thread { if (minLatestBlockTimestamp == null) continue; - // No online accounts? (e.g. during startup) - if (OnlineAccountsManager.getInstance().getOnlineAccounts().isEmpty()) + // No online accounts for current timestamp? (e.g. during startup) + if (!OnlineAccountsManager.getInstance().hasOnlineAccounts()) continue; - List mintingAccountsData = repository.getAccountRepository().getMintingAccounts(); - // No minting accounts? - if (mintingAccountsData.isEmpty()) - continue; + try (final Repository repository = RepositoryManager.getRepository()) { + // Going to need this a lot... + BlockRepository blockRepository = repository.getBlockRepository(); - // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level - // Note that minting accounts are actually reward-shares in Qortal - Iterator madi = mintingAccountsData.iterator(); - while (madi.hasNext()) { - MintingAccountData mintingAccountData = madi.next(); - - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey()); - if (rewardShareData == null) { - // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts - madi.remove(); - continue; - } - - Account mintingAccount = new Account(repository, rewardShareData.getMinter()); - if (!mintingAccount.canMint()) { - // Minting-account component of reward-share can no longer mint - disregard - madi.remove(); - continue; - } - - // Optional (non-validated) prevention of block submissions below a defined level. - // This is an unvalidated version of Blockchain.minAccountLevelToMint - // and exists only to reduce block candidates by default. - int level = mintingAccount.getEffectiveMintingLevel(); - if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) { - madi.remove(); - continue; - } - } - - // Needs a mutable copy of the unmodifiableList - List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers()); - BlockData lastBlockData = blockRepository.getLastBlock(); - - // Disregard peers that have "misbehaved" recently - peers.removeIf(Controller.hasMisbehaved); - - // Disregard peers that don't have a recent block, but only if we're not in recovery mode. - // In that mode, we want to allow minting on top of older blocks, to recover stalled networks. - if (Synchronizer.getInstance().getRecoveryMode() == false) - peers.removeIf(Controller.hasNoRecentBlock); - - // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from? - if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) - continue; - - // If we are stuck on an invalid block, we should allow an alternative to be minted - boolean recoverInvalidBlock = false; - if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) { - // We've had at least one invalid block - long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived; - long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived; - if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) { - if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) { - // Last valid block was more than 10 mins ago, but we've had an invalid block since then - // Assume that the chain has stalled because there is no alternative valid candidate - // Enter recovery mode to allow alternative, valid candidates to be minted - recoverInvalidBlock = true; - } - } - } - - // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode. - if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp) - if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false) + List mintingAccountsData = repository.getAccountRepository().getMintingAccounts(); + // No minting accounts? + if (mintingAccountsData.isEmpty()) continue; - // There are enough peers with a recent block and our latest block is recent - // so go ahead and mint a block if possible. - isMintingPossible = true; + // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level + // Note that minting accounts are actually reward-shares in Qortal + Iterator madi = mintingAccountsData.iterator(); + while (madi.hasNext()) { + MintingAccountData mintingAccountData = madi.next(); - // Check blockchain hasn't changed - if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) { - previousBlockData = lastBlockData; - newBlocks.clear(); - - // Reduce log timeout - logTimeout = 10 * 1000L; - - // Last low weight block is no longer valid - parentSignatureForLastLowWeightBlock = null; - } - - // Discard accounts we have already built blocks with - mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey()))); - - // Do we need to build any potential new blocks? - List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList()); - - // We might need to sit the next block out, if one of our minting accounts signed the previous one - final byte[] previousBlockMinter = previousBlockData.getMinterPublicKey(); - final boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter)); - if (mintedLastBlock) { - LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one")); - continue; - } - - if (parentSignatureForLastLowWeightBlock != null) { - // The last iteration found a higher weight block in the network, so sleep for a while - // to allow is to sync the higher weight chain. We are sleeping here rather than when - // detected as we don't want to hold the blockchain lock open. - LOGGER.debug("Sleeping for 10 seconds..."); - Thread.sleep(10 * 1000L); - } - - for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) { - // First block does the AT heavy-lifting - if (newBlocks.isEmpty()) { - Block newBlock = Block.mint(repository, previousBlockData, mintingAccount); - if (newBlock == null) { - // For some reason we can't mint right now - moderatedLog(() -> LOGGER.error("Couldn't build a to-be-minted block")); + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey()); + if (rewardShareData == null) { + // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts + madi.remove(); continue; } - newBlocks.add(newBlock); - } else { - // The blocks for other minters require less effort... - Block newBlock = newBlocks.get(0).remint(mintingAccount); - if (newBlock == null) { - // For some reason we can't mint right now - moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block")); + Account mintingAccount = new Account(repository, rewardShareData.getMinter()); + if (!mintingAccount.canMint()) { + // Minting-account component of reward-share can no longer mint - disregard + madi.remove(); continue; } - newBlocks.add(newBlock); + // Optional (non-validated) prevention of block submissions below a defined level. + // This is an unvalidated version of Blockchain.minAccountLevelToMint + // and exists only to reduce block candidates by default. + int level = mintingAccount.getEffectiveMintingLevel(); + if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) { + madi.remove(); + continue; + } } - } - // No potential block candidates? - if (newBlocks.isEmpty()) - continue; + // Needs a mutable copy of the unmodifiableList + List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers()); + BlockData lastBlockData = blockRepository.getLastBlock(); - // Make sure we're the only thread modifying the blockchain - ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); - if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) { - LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds"); - continue; - } + // Disregard peers that have "misbehaved" recently + peers.removeIf(Controller.hasMisbehaved); - boolean newBlockMinted = false; - Block newBlock = null; + // Disregard peers that don't have a recent block, but only if we're not in recovery mode. + // In that mode, we want to allow minting on top of older blocks, to recover stalled networks. + if (Synchronizer.getInstance().getRecoveryMode() == false) + peers.removeIf(Controller.hasNoRecentBlock); - try { - // Clear repository session state so we have latest view of data - repository.discardChanges(); - - // Now that we have blockchain lock, do final check that chain hasn't changed - BlockData latestBlockData = blockRepository.getLastBlock(); - if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature())) + // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from? + if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) continue; - List goodBlocks = new ArrayList<>(); - for (Block testBlock : newBlocks) { - // Is new block's timestamp valid yet? - // We do a separate check as some timestamp checks are skipped for testchains - if (testBlock.isTimestampValid() != ValidationResult.OK) - continue; - - testBlock.preProcess(); - - // Is new block valid yet? (Before adding unconfirmed transactions) - ValidationResult result = testBlock.isValid(); - if (result != ValidationResult.OK) { - moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name()))); - - continue; - } - - goodBlocks.add(testBlock); - } - - if (goodBlocks.isEmpty()) - continue; - - // Pick best block - final int parentHeight = previousBlockData.getHeight(); - final byte[] parentBlockSignature = previousBlockData.getSignature(); - - BigInteger bestWeight = null; - - for (int bi = 0; bi < goodBlocks.size(); ++bi) { - BlockData blockData = goodBlocks.get(bi).getBlockData(); - - BlockSummaryData blockSummaryData = new BlockSummaryData(blockData); - int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey()); - blockSummaryData.setMinterLevel(minterLevel); - - BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData); - - if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) { - newBlock = goodBlocks.get(bi); - bestWeight = blockWeight; - } - } - - try { - if (this.higherWeightChainExists(repository, bestWeight)) { - - // Check if the base block has updated since the last time we were here - if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null || - !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) { - // We've switched to a different chain, so reset the timer - timeOfLastLowWeightBlock = NTP.getTime(); + // If we are stuck on an invalid block, we should allow an alternative to be minted + boolean recoverInvalidBlock = false; + if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) { + // We've had at least one invalid block + long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived; + long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived; + if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) { + if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) { + // Last valid block was more than 10 mins ago, but we've had an invalid block since then + // Assume that the chain has stalled because there is no alternative valid candidate + // Enter recovery mode to allow alternative, valid candidates to be minted + recoverInvalidBlock = true; } - parentSignatureForLastLowWeightBlock = previousBlockData.getSignature(); + } + } - // If less than 30 seconds has passed since first detection the higher weight chain, - // we should skip our block submission to give us the opportunity to sync to the better chain - if (NTP.getTime() - timeOfLastLowWeightBlock < 30*1000L) { - LOGGER.debug("Higher weight chain found in peers, so not signing a block this round"); - LOGGER.debug("Time since detected: {}ms", NTP.getTime() - timeOfLastLowWeightBlock); + // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode. + if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp) + if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false) + continue; + + // There are enough peers with a recent block and our latest block is recent + // so go ahead and mint a block if possible. + isMintingPossible = true; + + // Reattach newBlocks to new repository handle + for (Block newBlock : newBlocks) + newBlock.setRepository(repository); + + // Check blockchain hasn't changed + if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) { + previousBlockData = lastBlockData; + newBlocks.clear(); + + // Reduce log timeout + logTimeout = 10 * 1000L; + + // Last low weight block is no longer valid + parentSignatureForLastLowWeightBlock = null; + } + + // Discard accounts we have already built blocks with + mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey()))); + + // Do we need to build any potential new blocks? + List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList()); + + // We might need to sit the next block out, if one of our minting accounts signed the previous one + byte[] previousBlockMinter = previousBlockData.getMinterPublicKey(); + boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter)); + if (mintedLastBlock) { + LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one")); + continue; + } + + if (parentSignatureForLastLowWeightBlock != null) { + // The last iteration found a higher weight block in the network, so sleep for a while + // to allow is to sync the higher weight chain. We are sleeping here rather than when + // detected as we don't want to hold the blockchain lock open. + LOGGER.info("Sleeping for 10 seconds..."); + Thread.sleep(10 * 1000L); + } + + for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) { + // First block does the AT heavy-lifting + if (newBlocks.isEmpty()) { + Block newBlock = Block.mint(repository, previousBlockData, mintingAccount); + if (newBlock == null) { + // For some reason we can't mint right now + moderatedLog(() -> LOGGER.error("Couldn't build a to-be-minted block")); continue; } - else { - // More than 30 seconds have passed, so we should submit our block candidate anyway. - LOGGER.debug("More than 30 seconds passed, so proceeding to submit block candidate..."); + + newBlocks.add(newBlock); + } else { + // The blocks for other minters require less effort... + Block newBlock = newBlocks.get(0).remint(mintingAccount); + if (newBlock == null) { + // For some reason we can't mint right now + moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block")); + continue; } + + newBlocks.add(newBlock); } - else { - LOGGER.debug("No higher weight chain found in peers"); - } - } catch (DataException e) { - LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway..."); } - // Discard any uncommitted changes as a result of the higher weight chain detection - repository.discardChanges(); + // No potential block candidates? + if (newBlocks.isEmpty()) + continue; - // Clear variables that track low weight blocks - parentSignatureForLastLowWeightBlock = null; - timeOfLastLowWeightBlock = null; - - - // Add unconfirmed transactions - addUnconfirmedTransactions(repository, newBlock); - - // Sign to create block's signature - newBlock.sign(); - - // Is newBlock still valid? - ValidationResult validationResult = newBlock.isValid(); - if (validationResult != ValidationResult.OK) { - // No longer valid? Report and discard - LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name())); - - // Rebuild block candidates, just to be sure - newBlocks.clear(); + // Make sure we're the only thread modifying the blockchain + ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); + if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) { + LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds"); continue; } - // Add to blockchain - something else will notice and broadcast new block to network + boolean newBlockMinted = false; + Block newBlock = null; + try { - newBlock.process(); + // Clear repository session state so we have latest view of data + repository.discardChanges(); - repository.saveChanges(); + // Now that we have blockchain lock, do final check that chain hasn't changed + BlockData latestBlockData = blockRepository.getLastBlock(); + if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature())) + continue; - LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight())); + List goodBlocks = new ArrayList<>(); + boolean wasInvalidBlockDiscarded = false; + Iterator newBlocksIterator = newBlocks.iterator(); - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey()); + while (newBlocksIterator.hasNext()) { + Block testBlock = newBlocksIterator.next(); - if (rewardShareData != null) { - LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s", - newBlock.getBlockData().getHeight(), - Base58.encode(newBlock.getBlockData().getSignature()), - Base58.encode(newBlock.getParent().getSignature()), - rewardShareData.getMinter(), - rewardShareData.getRecipient())); - } else { - LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s", - newBlock.getBlockData().getHeight(), - Base58.encode(newBlock.getBlockData().getSignature()), - Base58.encode(newBlock.getParent().getSignature()), - newBlock.getMinter().getAddress())); + // Is new block's timestamp valid yet? + // We do a separate check as some timestamp checks are skipped for testchains + if (testBlock.isTimestampValid() != ValidationResult.OK) + continue; + + testBlock.preProcess(); + + // Is new block valid yet? (Before adding unconfirmed transactions) + ValidationResult result = testBlock.isValid(); + if (result != ValidationResult.OK) { + moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name()))); + + newBlocksIterator.remove(); + wasInvalidBlockDiscarded = true; + /* + * Bail out fast so that we loop around from the top again. + * This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks, + * via the Blocks.remint() method, which avoids having to re-process Block ATs all over again. + * Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class). + */ + break; + } + + goodBlocks.add(testBlock); } - // Notify network after we're released blockchain lock - newBlockMinted = true; + if (wasInvalidBlockDiscarded || goodBlocks.isEmpty()) + continue; - // Notify Controller - repository.discardChanges(); // clear transaction status to prevent deadlocks - Controller.getInstance().onNewBlock(newBlock.getBlockData()); - } catch (DataException e) { - // Unable to process block - report and discard - LOGGER.error("Unable to process newly minted block?", e); - newBlocks.clear(); + // Pick best block + final int parentHeight = previousBlockData.getHeight(); + final byte[] parentBlockSignature = previousBlockData.getSignature(); + + BigInteger bestWeight = null; + + for (int bi = 0; bi < goodBlocks.size(); ++bi) { + BlockData blockData = goodBlocks.get(bi).getBlockData(); + + BlockSummaryData blockSummaryData = new BlockSummaryData(blockData); + int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey()); + blockSummaryData.setMinterLevel(minterLevel); + + BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData); + + if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) { + newBlock = goodBlocks.get(bi); + bestWeight = blockWeight; + } + } + + try { + if (this.higherWeightChainExists(repository, bestWeight)) { + + // Check if the base block has updated since the last time we were here + if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null || + !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) { + // We've switched to a different chain, so reset the timer + timeOfLastLowWeightBlock = NTP.getTime(); + } + parentSignatureForLastLowWeightBlock = previousBlockData.getSignature(); + + // If less than 30 seconds has passed since first detection the higher weight chain, + // we should skip our block submission to give us the opportunity to sync to the better chain + if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) { + LOGGER.info("Higher weight chain found in peers, so not signing a block this round"); + LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock); + continue; + } else { + // More than 30 seconds have passed, so we should submit our block candidate anyway. + LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate..."); + } + } else { + LOGGER.debug("No higher weight chain found in peers"); + } + } catch (DataException e) { + LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway..."); + } + + // Discard any uncommitted changes as a result of the higher weight chain detection + repository.discardChanges(); + + // Clear variables that track low weight blocks + parentSignatureForLastLowWeightBlock = null; + timeOfLastLowWeightBlock = null; + + // Add unconfirmed transactions + addUnconfirmedTransactions(repository, newBlock); + + // Sign to create block's signature + newBlock.sign(); + + // Is newBlock still valid? + ValidationResult validationResult = newBlock.isValid(); + if (validationResult != ValidationResult.OK) { + // No longer valid? Report and discard + LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name())); + + // Rebuild block candidates, just to be sure + newBlocks.clear(); + continue; + } + + // Add to blockchain - something else will notice and broadcast new block to network + try { + newBlock.process(); + + repository.saveChanges(); + + LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight())); + + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey()); + + if (rewardShareData != null) { + LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s", + newBlock.getBlockData().getHeight(), + Base58.encode(newBlock.getBlockData().getSignature()), + Base58.encode(newBlock.getParent().getSignature()), + rewardShareData.getMinter(), + rewardShareData.getRecipient())); + } else { + LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s", + newBlock.getBlockData().getHeight(), + Base58.encode(newBlock.getBlockData().getSignature()), + Base58.encode(newBlock.getParent().getSignature()), + newBlock.getMinter().getAddress())); + } + + // Notify network after we're released blockchain lock + newBlockMinted = true; + + // Notify Controller + repository.discardChanges(); // clear transaction status to prevent deadlocks + Controller.getInstance().onNewBlock(newBlock.getBlockData()); + } catch (DataException e) { + // Unable to process block - report and discard + LOGGER.error("Unable to process newly minted block?", e); + newBlocks.clear(); + } + } finally { + blockchainLock.unlock(); } - } finally { - blockchainLock.unlock(); - } - if (newBlockMinted) { - // Broadcast our new chain to network - BlockData newBlockData = newBlock.getBlockData(); + if (newBlockMinted) { + // Broadcast our new chain to network + BlockData newBlockData = newBlock.getBlockData(); - Network network = Network.getInstance(); - network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData)); + Network network = Network.getInstance(); + network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData)); + } + } catch (DataException e) { + LOGGER.warn("Repository issue while running block minter", e); } + } catch (InterruptedException e) { + // We've been interrupted - time to exit + return; } - } catch (DataException e) { - LOGGER.warn("Repository issue while running block minter", e); - } catch (InterruptedException e) { - // We've been interrupted - time to exit - return; } } diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index 8e806c3c..cde965c1 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -1241,6 +1241,10 @@ public class Controller extends Thread { OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV2Message(peer, message); break; + case GET_ONLINE_ACCOUNTS_V3: + OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsV3Message(peer, message); + break; + case GET_ARBITRARY_DATA: // Not currently supported break; diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index 482e9d90..55c690d6 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -1,12 +1,15 @@ package org.qortal.controller; +import com.google.common.hash.HashCode; import com.google.common.primitives.Longs; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; -import org.qortal.account.PublicKeyAccount; +import org.qortal.block.Block; import org.qortal.block.BlockChain; +import org.qortal.crypto.Crypto; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.data.account.MintingAccountData; import org.qortal.data.account.RewardShareData; import org.qortal.data.network.OnlineAccountData; @@ -18,270 +21,352 @@ import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; import org.qortal.utils.Base58; import org.qortal.utils.NTP; +import org.qortal.utils.NamedThreadFactory; import java.util.*; +import java.util.concurrent.*; import java.util.stream.Collectors; -public class OnlineAccountsManager extends Thread { - - private class OurOnlineAccountsThread extends Thread { - - public void run() { - try { - while (!isStopping) { - Thread.sleep(10000L); - - // Refresh our online accounts signatures? - sendOurOnlineAccountsInfo(); - - } - } catch (InterruptedException e) { - // Fall through to exit thread - } - } - } - +public class OnlineAccountsManager { private static final Logger LOGGER = LogManager.getLogger(OnlineAccountsManager.class); - private static OnlineAccountsManager instance; + // 'Current' as in 'now' + + /** + * How long online accounts signatures last before they expire. + */ + public static final long ONLINE_TIMESTAMP_MODULUS = 5 * 60 * 1000L; + + /** + * How many 'current' timestamp-sets of online accounts we cache. + */ + private static final int MAX_CACHED_TIMESTAMP_SETS = 2; + + /** + * How many timestamp-sets of online accounts we cache for 'latest blocks'. + */ + private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 3; + + private static final long ONLINE_ACCOUNTS_QUEUE_INTERVAL = 100L; //ms + private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms + private static final long ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL = 60 * 1000L; // ms + private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 15 * 1000L; // ms + + private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; // v3.2.0 + private static final long ONLINE_ACCOUNTS_V3_PEER_VERSION = 0x03000300cbL; // v3.3.203 + + private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts")); private volatile boolean isStopping = false; - // To do with online accounts list - private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms - private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 1 * 60 * 1000L; // ms - public static final long ONLINE_TIMESTAMP_MODULUS = 5 * 60 * 1000L; - private static final long LAST_SEEN_EXPIRY_PERIOD = (ONLINE_TIMESTAMP_MODULUS * 2) + (1 * 60 * 1000L); - /** How many (latest) blocks' worth of online accounts we cache */ - private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 2; - private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; + private final Set onlineAccountsImportQueue = ConcurrentHashMap.newKeySet(); - private long onlineAccountsTasksTimestamp = Controller.startTime + ONLINE_ACCOUNTS_TASKS_INTERVAL; // ms + /** + * Cache of 'current' online accounts, keyed by timestamp + */ + private final Map> currentOnlineAccounts = new ConcurrentHashMap<>(); + /** + * Cache of hash-summary of 'current' online accounts, keyed by timestamp, then leading byte of public key. + */ + private final Map> currentOnlineAccountsHashes = new ConcurrentHashMap<>(); - private final List onlineAccountsImportQueue = Collections.synchronizedList(new ArrayList<>()); + /** + * Cache of online accounts for latest blocks - not necessarily 'current' / now. + * Probably only accessed / modified by a single Synchronizer thread. + */ + private final SortedMap> latestBlocksOnlineAccounts = new ConcurrentSkipListMap<>(); - private boolean hasOnlineAccounts = false; + private boolean hasOurOnlineAccounts = false; + public static Long getCurrentOnlineAccountTimestamp() { + Long now = NTP.getTime(); + if (now == null) + return null; - /** Cache of current 'online accounts' */ - List onlineAccounts = new ArrayList<>(); - /** Cache of latest blocks' online accounts */ - Deque> latestBlocksOnlineAccounts = new ArrayDeque<>(MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS); - - public OnlineAccountsManager() { - + return (now / ONLINE_TIMESTAMP_MODULUS) * ONLINE_TIMESTAMP_MODULUS; } - public static synchronized OnlineAccountsManager getInstance() { - if (instance == null) { - instance = new OnlineAccountsManager(); - } - - return instance; + private OnlineAccountsManager() { } - public void run() { + private static class SingletonContainer { + private static final OnlineAccountsManager INSTANCE = new OnlineAccountsManager(); + } - // Start separate thread to prepare our online accounts - // This could be converted to a thread pool later if more concurrency is needed - OurOnlineAccountsThread ourOnlineAccountsThread = new OurOnlineAccountsThread(); - ourOnlineAccountsThread.start(); + public static OnlineAccountsManager getInstance() { + return SingletonContainer.INSTANCE; + } - try { - while (!Controller.isStopping()) { - Thread.sleep(100L); + public void start() { + // Expire old online accounts signatures + executor.scheduleAtFixedRate(this::expireOldOnlineAccounts, ONLINE_ACCOUNTS_TASKS_INTERVAL, ONLINE_ACCOUNTS_TASKS_INTERVAL, TimeUnit.MILLISECONDS); - final Long now = NTP.getTime(); - if (now == null) { - continue; - } + // Send our online accounts + executor.scheduleAtFixedRate(this::sendOurOnlineAccountsInfo, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); - // Perform tasks to do with managing online accounts list - if (now >= onlineAccountsTasksTimestamp) { - onlineAccountsTasksTimestamp = now + ONLINE_ACCOUNTS_TASKS_INTERVAL; - performOnlineAccountsTasks(); - } + // Request online accounts from peers (legacy) + executor.scheduleAtFixedRate(this::requestLegacyRemoteOnlineAccounts, ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); + // Request online accounts from peers (V3+) + executor.scheduleAtFixedRate(this::requestRemoteOnlineAccounts, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); - // Process queued online account verifications - this.processOnlineAccountsImportQueue(); - - } - } catch (InterruptedException e) { - // Fall through to exit thread - } - - ourOnlineAccountsThread.interrupt(); + // Process import queue + executor.scheduleWithFixedDelay(this::processOnlineAccountsImportQueue, ONLINE_ACCOUNTS_QUEUE_INTERVAL, ONLINE_ACCOUNTS_QUEUE_INTERVAL, TimeUnit.MILLISECONDS); } public void shutdown() { isStopping = true; - this.interrupt(); - } - - public boolean hasOnlineAccounts() { - return this.hasOnlineAccounts; - } - - - // Online accounts import queue - - private void processOnlineAccountsImportQueue() { - if (this.onlineAccountsImportQueue.isEmpty()) { - // Nothing to do - return; - } - - LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size()); - - try (final Repository repository = RepositoryManager.getRepository()) { - - List onlineAccountDataCopy = new ArrayList<>(this.onlineAccountsImportQueue); - for (OnlineAccountData onlineAccountData : onlineAccountDataCopy) { - if (isStopping) { - return; - } - - this.verifyAndAddAccount(repository, onlineAccountData); - - // Remove from queue - onlineAccountsImportQueue.remove(onlineAccountData); - } - - LOGGER.debug("Finished processing online accounts import queue"); - - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while verifying online accounts"), e); - } - } - - - // Utilities - - private void verifyAndAddAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { - final Long now = NTP.getTime(); - if (now == null) - return; - - PublicKeyAccount otherAccount = new PublicKeyAccount(repository, onlineAccountData.getPublicKey()); - - // Check timestamp is 'recent' here - if (Math.abs(onlineAccountData.getTimestamp() - now) > ONLINE_TIMESTAMP_MODULUS * 2) { - LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp())); - return; - } - - // Verify - byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); - if (!otherAccount.verify(onlineAccountData.getSignature(), data)) { - LOGGER.trace(() -> String.format("Rejecting invalid online account %s", otherAccount.getAddress())); - return; - } - - // Qortal: check online account is actually reward-share - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(onlineAccountData.getPublicKey()); - if (rewardShareData == null) { - // Reward-share doesn't even exist - probably not a good sign - LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(onlineAccountData.getPublicKey()))); - return; - } - - Account mintingAccount = new Account(repository, rewardShareData.getMinter()); - if (!mintingAccount.canMint()) { - // Minting-account component of reward-share can no longer mint - disregard - LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress())); - return; - } - - synchronized (this.onlineAccounts) { - OnlineAccountData existingAccountData = this.onlineAccounts.stream().filter(account -> Arrays.equals(account.getPublicKey(), onlineAccountData.getPublicKey())).findFirst().orElse(null); - - if (existingAccountData != null) { - if (existingAccountData.getTimestamp() < onlineAccountData.getTimestamp()) { - this.onlineAccounts.remove(existingAccountData); - - LOGGER.trace(() -> String.format("Updated online account %s with timestamp %d (was %d)", otherAccount.getAddress(), onlineAccountData.getTimestamp(), existingAccountData.getTimestamp())); - } else { - LOGGER.trace(() -> String.format("Not updating existing online account %s", otherAccount.getAddress())); - - return; - } - } else { - LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp())); - } - - this.onlineAccounts.add(onlineAccountData); - } + executor.shutdownNow(); } + // Testing support public void ensureTestingAccountsOnline(PrivateKeyAccount... onlineAccounts) { if (!BlockChain.getInstance().isTestChain()) { LOGGER.warn("Ignoring attempt to ensure test account is online for non-test chain!"); return; } - final Long now = NTP.getTime(); - if (now == null) + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) return; - final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now); byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); + final boolean useAggregateCompatibleSignature = onlineAccountsTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp(); - synchronized (this.onlineAccounts) { - this.onlineAccounts.clear(); + Set replacementAccounts = new HashSet<>(); + for (PrivateKeyAccount onlineAccount : onlineAccounts) { + // Check mintingAccount is actually reward-share? - for (PrivateKeyAccount onlineAccount : onlineAccounts) { - // Check mintingAccount is actually reward-share? + byte[] signature = useAggregateCompatibleSignature + ? Qortal25519Extras.signForAggregation(onlineAccount.getPrivateKey(), timestampBytes) + : onlineAccount.sign(timestampBytes); + byte[] publicKey = onlineAccount.getPublicKey(); - byte[] signature = onlineAccount.sign(timestampBytes); - byte[] publicKey = onlineAccount.getPublicKey(); + OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); + replacementAccounts.add(ourOnlineAccountData); + } - OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); - this.onlineAccounts.add(ourOnlineAccountData); + this.currentOnlineAccounts.clear(); + addAccounts(replacementAccounts); + } + + // Online accounts import queue + + private void processOnlineAccountsImportQueue() { + if (this.onlineAccountsImportQueue.isEmpty()) + // Nothing to do + return; + + LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size()); + + Set onlineAccountsToAdd = new HashSet<>(); + try (final Repository repository = RepositoryManager.getRepository()) { + for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) { + if (isStopping) + return; + + boolean isValid = this.isValidCurrentAccount(repository, onlineAccountData); + if (isValid) + onlineAccountsToAdd.add(onlineAccountData); + + // Remove from queue + onlineAccountsImportQueue.remove(onlineAccountData); } + } catch (DataException e) { + LOGGER.error("Repository issue while verifying online accounts", e); + } + + if (!onlineAccountsToAdd.isEmpty()) { + LOGGER.debug("Merging {} validated online accounts from import queue", onlineAccountsToAdd.size()); + addAccounts(onlineAccountsToAdd); } } - private void performOnlineAccountsTasks() { + // Utilities + + public static byte[] xorByteArrayInPlace(byte[] inplaceArray, byte[] otherArray) { + if (inplaceArray == null) + return Arrays.copyOf(otherArray, otherArray.length); + + // Start from index 1 to enforce static leading byte + for (int i = 1; i < otherArray.length; i++) + inplaceArray[i] ^= otherArray[i]; + + return inplaceArray; + } + + private static boolean isValidCurrentAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { + final Long now = NTP.getTime(); + if (now == null) + return false; + + byte[] rewardSharePublicKey = onlineAccountData.getPublicKey(); + long onlineAccountTimestamp = onlineAccountData.getTimestamp(); + + // Check timestamp is 'recent' here + if (Math.abs(onlineAccountTimestamp - now) > ONLINE_TIMESTAMP_MODULUS * 2) { + LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + return false; + } + + // Verify signature + byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); + boolean isSignatureValid = onlineAccountTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp() + ? Qortal25519Extras.verifyAggregated(rewardSharePublicKey, onlineAccountData.getSignature(), data) + : Crypto.verify(rewardSharePublicKey, onlineAccountData.getSignature(), data); + if (!isSignatureValid) { + LOGGER.trace(() -> String.format("Rejecting invalid online account %s", Base58.encode(rewardSharePublicKey))); + return false; + } + + // Qortal: check online account is actually reward-share + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey); + if (rewardShareData == null) { + // Reward-share doesn't even exist - probably not a good sign + LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(rewardSharePublicKey))); + return false; + } + + Account mintingAccount = new Account(repository, rewardShareData.getMinter()); + if (!mintingAccount.canMint()) { + // Minting-account component of reward-share can no longer mint - disregard + LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress())); + return false; + } + + return true; + } + + /** Adds accounts, maybe rebuilds hashes, returns whether any new accounts were added / hashes rebuilt. */ + private boolean addAccounts(Collection onlineAccountsToAdd) { + // For keeping track of which hashes to rebuild + Map> hashesToRebuild = new HashMap<>(); + + for (OnlineAccountData onlineAccountData : onlineAccountsToAdd) { + boolean isNewEntry = this.addAccount(onlineAccountData); + + if (isNewEntry) + hashesToRebuild.computeIfAbsent(onlineAccountData.getTimestamp(), k -> new HashSet<>()).add(onlineAccountData.getPublicKey()[0]); + } + + if (hashesToRebuild.isEmpty()) + return false; + + for (var entry : hashesToRebuild.entrySet()) { + Long timestamp = entry.getKey(); + + LOGGER.debug(() -> String.format("Rehashing for timestamp %d and leading bytes %s", + timestamp, + entry.getValue().stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", ")) + ) + ); + + for (Byte leadingByte : entry.getValue()) { + byte[] pubkeyHash = currentOnlineAccounts.get(timestamp).stream() + .map(OnlineAccountData::getPublicKey) + .filter(publicKey -> leadingByte == publicKey[0]) + .reduce(null, OnlineAccountsManager::xorByteArrayInPlace); + + currentOnlineAccountsHashes.computeIfAbsent(timestamp, k -> new ConcurrentHashMap<>()).put(leadingByte, pubkeyHash); + + LOGGER.trace(() -> String.format("Rebuilt hash %s for timestamp %d and leading byte %02x using %d public keys", + HashCode.fromBytes(pubkeyHash), + timestamp, + leadingByte, + currentOnlineAccounts.get(timestamp).stream() + .map(OnlineAccountData::getPublicKey) + .filter(publicKey -> leadingByte == publicKey[0]) + .count() + )); + } + } + + LOGGER.info(String.format("we have online accounts for timestamps: %s", String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", "))))); + + return true; + } + + private boolean addAccount(OnlineAccountData onlineAccountData) { + byte[] rewardSharePublicKey = onlineAccountData.getPublicKey(); + long onlineAccountTimestamp = onlineAccountData.getTimestamp(); + + Set onlineAccounts = this.currentOnlineAccounts.computeIfAbsent(onlineAccountTimestamp, k -> ConcurrentHashMap.newKeySet()); + boolean isNewEntry = onlineAccounts.add(onlineAccountData); + + if (isNewEntry) + LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + else + LOGGER.trace(() -> String.format("Not updating existing online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + + return isNewEntry; + } + + /** + * Expire old entries. + */ + private void expireOldOnlineAccounts() { final Long now = NTP.getTime(); if (now == null) return; - // Expire old entries - final long cutoffThreshold = now - LAST_SEEN_EXPIRY_PERIOD; - synchronized (this.onlineAccounts) { - Iterator iterator = this.onlineAccounts.iterator(); - while (iterator.hasNext()) { - OnlineAccountData onlineAccountData = iterator.next(); - - if (onlineAccountData.getTimestamp() < cutoffThreshold) { - iterator.remove(); - - LOGGER.trace(() -> { - PublicKeyAccount otherAccount = new PublicKeyAccount(null, onlineAccountData.getPublicKey()); - return String.format("Removed expired online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp()); - }); - } - } - } - - // Request data from other peers? - if ((this.onlineAccountsTasksTimestamp % ONLINE_ACCOUNTS_BROADCAST_INTERVAL) < ONLINE_ACCOUNTS_TASKS_INTERVAL) { - List safeOnlineAccounts; - synchronized (this.onlineAccounts) { - safeOnlineAccounts = new ArrayList<>(this.onlineAccounts); - } - - Message messageV1 = new GetOnlineAccountsMessage(safeOnlineAccounts); - Message messageV2 = new GetOnlineAccountsV2Message(safeOnlineAccounts); - - Network.getInstance().broadcast(peer -> - peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1 - ); - } + final long cutoffThreshold = now - MAX_CACHED_TIMESTAMP_SETS * ONLINE_TIMESTAMP_MODULUS; + this.currentOnlineAccounts.keySet().removeIf(timestamp -> timestamp < cutoffThreshold); + this.currentOnlineAccountsHashes.keySet().removeIf(timestamp -> timestamp < cutoffThreshold); } - private void sendOurOnlineAccountsInfo() { + /** + * Request data from other peers. (Pre-V3) + */ + private void requestLegacyRemoteOnlineAccounts() { final Long now = NTP.getTime(); + if (now == null) + return; + + // Don't bother if we're not up to date + if (!Controller.getInstance().isUpToDate()) + return; + + List mergedOnlineAccounts = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + + Message messageV2 = new GetOnlineAccountsV2Message(mergedOnlineAccounts); + + Network.getInstance().broadcast(peer -> + peer.getPeersVersion() < ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV2 + : null + ); + } + + /** + * Request data from other peers. V3+ + */ + private void requestRemoteOnlineAccounts() { + final Long now = NTP.getTime(); + if (now == null) + return; + + // Don't bother if we're not up to date + if (!Controller.getInstance().isUpToDate()) + return; + + Message messageV3 = new GetOnlineAccountsV3Message(currentOnlineAccountsHashes); + + Network.getInstance().broadcast(peer -> + peer.getPeersVersion() >= ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV3 + : null + ); + } + + /** + * Send online accounts that are minting on this node. + */ + private void sendOurOnlineAccountsInfo() { + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) + return; + + Long now = NTP.getTime(); if (now == null) { return; } @@ -296,13 +381,12 @@ public class OnlineAccountsManager extends Thread { try (final Repository repository = RepositoryManager.getRepository()) { mintingAccounts = repository.getAccountRepository().getMintingAccounts(); - // We have no accounts, but don't reset timestamp + // We have no accounts to send if (mintingAccounts.isEmpty()) return; - // Only reward-share accounts allowed + // Only active reward-shares allowed Iterator iterator = mintingAccounts.iterator(); - int i = 0; while (iterator.hasNext()) { MintingAccountData mintingAccountData = iterator.next(); @@ -319,113 +403,138 @@ public class OnlineAccountsManager extends Thread { iterator.remove(); continue; } - - if (++i > 1+1) { - iterator.remove(); - continue; - } } } catch (DataException e) { LOGGER.warn(String.format("Repository issue trying to fetch minting accounts: %s", e.getMessage())); return; } - // 'current' timestamp - final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now); - boolean hasInfoChanged = false; - boolean existingAccountsExist = false; + final boolean useAggregateCompatibleSignature = onlineAccountsTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp(); byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); List ourOnlineAccounts = new ArrayList<>(); - MINTING_ACCOUNTS: for (MintingAccountData mintingAccountData : mintingAccounts) { - PrivateKeyAccount mintingAccount = new PrivateKeyAccount(null, mintingAccountData.getPrivateKey()); + byte[] privateKey = mintingAccountData.getPrivateKey(); + byte[] publicKey = Crypto.toPublicKey(privateKey); - byte[] signature = mintingAccount.sign(timestampBytes); - byte[] publicKey = mintingAccount.getPublicKey(); + byte[] signature = useAggregateCompatibleSignature + ? Qortal25519Extras.signForAggregation(privateKey, timestampBytes) + : Crypto.sign(privateKey, timestampBytes); // Our account is online OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); - synchronized (this.onlineAccounts) { - Iterator iterator = this.onlineAccounts.iterator(); - while (iterator.hasNext()) { - OnlineAccountData existingOnlineAccountData = iterator.next(); - - if (Arrays.equals(existingOnlineAccountData.getPublicKey(), ourOnlineAccountData.getPublicKey())) { - // If our online account is already present, with same timestamp, then move on to next mintingAccount - if (existingOnlineAccountData.getTimestamp() == onlineAccountsTimestamp) { - existingAccountsExist = true; - continue MINTING_ACCOUNTS; - } - - // If our online account is already present, but with older timestamp, then remove it - iterator.remove(); - break; - } - } - - this.onlineAccounts.add(ourOnlineAccountData); - } - - LOGGER.trace(() -> String.format("Added our online account %s with timestamp %d", mintingAccount.getAddress(), onlineAccountsTimestamp)); ourOnlineAccounts.add(ourOnlineAccountData); - hasInfoChanged = true; } - // Keep track of whether we have online accounts circulating in the network, for systray status - this.hasOnlineAccounts = existingAccountsExist || !ourOnlineAccounts.isEmpty(); + this.hasOurOnlineAccounts = !ourOnlineAccounts.isEmpty(); + + boolean hasInfoChanged = addAccounts(ourOnlineAccounts); if (!hasInfoChanged) return; Message messageV1 = new OnlineAccountsMessage(ourOnlineAccounts); Message messageV2 = new OnlineAccountsV2Message(ourOnlineAccounts); + Message messageV3 = new OnlineAccountsV2Message(ourOnlineAccounts); // TODO: V3 message Network.getInstance().broadcast(peer -> - peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1 + peer.getPeersVersion() >= ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV3 + : peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION + ? messageV2 + : messageV1 ); - LOGGER.trace(() -> String.format("Broadcasted %d online account%s with timestamp %d", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp)); + LOGGER.debug("Broadcasted {} online account{} with timestamp {}", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp); } - public static long toOnlineAccountTimestamp(long timestamp) { - return (timestamp / ONLINE_TIMESTAMP_MODULUS) * ONLINE_TIMESTAMP_MODULUS; + /** + * Returns whether online accounts manager has any online accounts with timestamp recent enough to be considered currently online. + */ + // BlockMinter: only calls this to check whether returned list is empty or not, to determine whether minting is even possible or not + public boolean hasOnlineAccounts() { + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) + return false; + + return this.currentOnlineAccounts.containsKey(onlineAccountsTimestamp); } - /** Returns list of online accounts with timestamp recent enough to be considered currently online. */ + public boolean hasOurOnlineAccounts() { + return this.hasOurOnlineAccounts; + } + + /** + * Returns list of online accounts matching given timestamp. + */ + // Block::mint() - only wants online accounts with (online) timestamp that matches block's (online) timestamp so they can be added to new block + public List getOnlineAccounts(long onlineTimestamp) { + LOGGER.info(String.format("caller's timestamp: %d, our timestamps: %s", onlineTimestamp, String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", "))))); + + return new ArrayList<>(Set.copyOf(this.currentOnlineAccounts.getOrDefault(onlineTimestamp, Collections.emptySet()))); + } + + /** + * Returns list of online accounts with timestamp recent enough to be considered currently online. + */ + // API: calls this to return list of online accounts - probably expects ALL timestamps - but going to get 'current' from now on public List getOnlineAccounts() { - final long onlineTimestamp = toOnlineAccountTimestamp(NTP.getTime()); + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) + return Collections.emptyList(); - synchronized (this.onlineAccounts) { - return this.onlineAccounts.stream().filter(account -> account.getTimestamp() == onlineTimestamp).collect(Collectors.toList()); - } + return getOnlineAccounts(onlineAccountsTimestamp); } + // Block processing - /** Returns cached, unmodifiable list of latest block's online accounts. */ - public List getLatestBlocksOnlineAccounts() { - synchronized (this.latestBlocksOnlineAccounts) { - return this.latestBlocksOnlineAccounts.peekFirst(); - } + /** + * Removes previously validated entries from block's online accounts. + *

+ * Checks both 'current' and block caches. + *

+ * Typically called by {@link Block#areOnlineAccountsValid()} + */ + public void removeKnown(Set blocksOnlineAccounts, Long timestamp) { + Set onlineAccounts = this.currentOnlineAccounts.get(timestamp); + + // If not 'current' timestamp - try block cache instead + if (onlineAccounts == null) + onlineAccounts = this.latestBlocksOnlineAccounts.get(timestamp); + + if (onlineAccounts != null) + blocksOnlineAccounts.removeAll(onlineAccounts); } - /** Caches list of latest block's online accounts. Typically called by Block.process() */ - public void pushLatestBlocksOnlineAccounts(List latestBlocksOnlineAccounts) { - synchronized (this.latestBlocksOnlineAccounts) { - if (this.latestBlocksOnlineAccounts.size() == MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) - this.latestBlocksOnlineAccounts.pollLast(); - - this.latestBlocksOnlineAccounts.addFirst(latestBlocksOnlineAccounts == null - ? Collections.emptyList() - : Collections.unmodifiableList(latestBlocksOnlineAccounts)); + /** + * Adds block's online accounts to one of OnlineAccountManager's caches. + *

+ * It is assumed that the online accounts have been verified. + *

+ * Typically called by {@link Block#areOnlineAccountsValid()} + */ + public void addBlocksOnlineAccounts(Set blocksOnlineAccounts, Long timestamp) { + // We want to add to 'current' in preference if possible + if (this.currentOnlineAccounts.containsKey(timestamp)) { + addAccounts(blocksOnlineAccounts); + return; } - } - /** Reverts list of latest block's online accounts. Typically called by Block.orphan() */ - public void popLatestBlocksOnlineAccounts() { - synchronized (this.latestBlocksOnlineAccounts) { - this.latestBlocksOnlineAccounts.pollFirst(); + // Add to block cache instead + this.latestBlocksOnlineAccounts.computeIfAbsent(timestamp, k -> ConcurrentHashMap.newKeySet()) + .addAll(blocksOnlineAccounts); + + // If block cache has grown too large then we need to trim. + if (this.latestBlocksOnlineAccounts.size() > MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) { + // However, be careful to trim the opposite end to the entry we just added! + Long firstKey = this.latestBlocksOnlineAccounts.firstKey(); + if (!firstKey.equals(timestamp)) + this.latestBlocksOnlineAccounts.remove(firstKey); + else + this.latestBlocksOnlineAccounts.remove(this.latestBlocksOnlineAccounts.lastKey()); } } @@ -438,45 +547,48 @@ public class OnlineAccountsManager extends Thread { List excludeAccounts = getOnlineAccountsMessage.getOnlineAccounts(); // Send online accounts info, excluding entries with matching timestamp & public key from excludeAccounts - List accountsToSend; - synchronized (this.onlineAccounts) { - accountsToSend = new ArrayList<>(this.onlineAccounts); - } + List accountsToSend = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + int prefilterSize = accountsToSend.size(); Iterator iterator = accountsToSend.iterator(); - - SEND_ITERATOR: while (iterator.hasNext()) { OnlineAccountData onlineAccountData = iterator.next(); - for (int i = 0; i < excludeAccounts.size(); ++i) { - OnlineAccountData excludeAccountData = excludeAccounts.get(i); - + for (OnlineAccountData excludeAccountData : excludeAccounts) { if (onlineAccountData.getTimestamp() == excludeAccountData.getTimestamp() && Arrays.equals(onlineAccountData.getPublicKey(), excludeAccountData.getPublicKey())) { iterator.remove(); - continue SEND_ITERATOR; + break; } } } + if (accountsToSend.isEmpty()) + return; + Message onlineAccountsMessage = new OnlineAccountsMessage(accountsToSend); peer.sendMessage(onlineAccountsMessage); - LOGGER.trace(() -> String.format("Sent %d of our %d online accounts to %s", accountsToSend.size(), this.onlineAccounts.size(), peer)); + LOGGER.debug("Sent {} of our {} online accounts to {}", accountsToSend.size(), prefilterSize, peer); } public void onNetworkOnlineAccountsMessage(Peer peer, Message message) { OnlineAccountsMessage onlineAccountsMessage = (OnlineAccountsMessage) message; List peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts(); - LOGGER.trace(() -> String.format("Received %d online accounts from %s", peersOnlineAccounts.size(), peer)); + LOGGER.debug("Received {} online accounts from {}", peersOnlineAccounts.size(), peer); - try (final Repository repository = RepositoryManager.getRepository()) { - for (OnlineAccountData onlineAccountData : peersOnlineAccounts) - this.verifyAndAddAccount(repository, onlineAccountData); - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while verifying online accounts from peer %s", peer), e); + int importCount = 0; + + // Add any online accounts to the queue that aren't already present + for (OnlineAccountData onlineAccountData : peersOnlineAccounts) { + boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData); + + if (isNewEntry) + importCount++; } + + if (importCount > 0) + LOGGER.debug("Added {} online accounts to queue", importCount); } public void onNetworkGetOnlineAccountsV2Message(Peer peer, Message message) { @@ -485,58 +597,106 @@ public class OnlineAccountsManager extends Thread { List excludeAccounts = getOnlineAccountsMessage.getOnlineAccounts(); // Send online accounts info, excluding entries with matching timestamp & public key from excludeAccounts - List accountsToSend; - synchronized (this.onlineAccounts) { - accountsToSend = new ArrayList<>(this.onlineAccounts); - } + List accountsToSend = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + int prefilterSize = accountsToSend.size(); Iterator iterator = accountsToSend.iterator(); - - SEND_ITERATOR: while (iterator.hasNext()) { OnlineAccountData onlineAccountData = iterator.next(); - for (int i = 0; i < excludeAccounts.size(); ++i) { - OnlineAccountData excludeAccountData = excludeAccounts.get(i); - + for (OnlineAccountData excludeAccountData : excludeAccounts) { if (onlineAccountData.getTimestamp() == excludeAccountData.getTimestamp() && Arrays.equals(onlineAccountData.getPublicKey(), excludeAccountData.getPublicKey())) { iterator.remove(); - continue SEND_ITERATOR; + break; } } } + if (accountsToSend.isEmpty()) + return; + Message onlineAccountsMessage = new OnlineAccountsV2Message(accountsToSend); peer.sendMessage(onlineAccountsMessage); - LOGGER.trace(() -> String.format("Sent %d of our %d online accounts to %s", accountsToSend.size(), this.onlineAccounts.size(), peer)); + LOGGER.debug("Sent {} of our {} online accounts to {}", accountsToSend.size(), prefilterSize, peer); } public void onNetworkOnlineAccountsV2Message(Peer peer, Message message) { OnlineAccountsV2Message onlineAccountsMessage = (OnlineAccountsV2Message) message; List peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts(); - LOGGER.debug(String.format("Received %d online accounts from %s", peersOnlineAccounts.size(), peer)); + LOGGER.debug("Received {} online accounts from {}", peersOnlineAccounts.size(), peer); int importCount = 0; // Add any online accounts to the queue that aren't already present for (OnlineAccountData onlineAccountData : peersOnlineAccounts) { + boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData); - // Do we already know about this online account data? - if (onlineAccounts.contains(onlineAccountData)) { - continue; - } - - // Is it already in the import queue? - if (onlineAccountsImportQueue.contains(onlineAccountData)) { - continue; - } - - onlineAccountsImportQueue.add(onlineAccountData); - importCount++; + if (isNewEntry) + importCount++; } - LOGGER.debug(String.format("Added %d online accounts to queue", importCount)); + if (importCount > 0) + LOGGER.debug("Added {} online accounts to queue", importCount); + } + + public void onNetworkGetOnlineAccountsV3Message(Peer peer, Message message) { + GetOnlineAccountsV3Message getOnlineAccountsMessage = (GetOnlineAccountsV3Message) message; + + Map> peersHashes = getOnlineAccountsMessage.getHashesByTimestampThenByte(); + List outgoingOnlineAccounts = new ArrayList<>(); + + // Warning: no double-checking/fetching - we must be ConcurrentMap compatible! + // So no contains()-then-get() or multiple get()s on the same key/map. + // We also use getOrDefault() with emptySet() on currentOnlineAccounts in case corresponding timestamp entry isn't there. + for (var ourOuterMapEntry : currentOnlineAccountsHashes.entrySet()) { + Long timestamp = ourOuterMapEntry.getKey(); + + var ourInnerMap = ourOuterMapEntry.getValue(); + var peersInnerMap = peersHashes.get(timestamp); + + if (peersInnerMap == null) { + // Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours + Set timestampsOnlineAccounts = this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet()); + outgoingOnlineAccounts.addAll(timestampsOnlineAccounts); + + LOGGER.debug(() -> String.format("Going to send all %d online accounts for timestamp %d", timestampsOnlineAccounts.size(), timestamp)); + } else { + // Quick cache of which leading bytes to send so we only have to filter once + Set outgoingLeadingBytes = new HashSet<>(); + + // We have entries for this timestamp so compare against peer's entries + for (var ourInnerMapEntry : ourInnerMap.entrySet()) { + Byte leadingByte = ourInnerMapEntry.getKey(); + byte[] peersHash = peersInnerMap.get(leadingByte); + + if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) { + // For this leading byte: hashes don't match or peer doesn't have entry + // Send all online accounts for this timestamp and leading byte + outgoingLeadingBytes.add(leadingByte); + } + } + + int beforeAddSize = outgoingOnlineAccounts.size(); + + this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet()).stream() + .filter(account -> outgoingLeadingBytes.contains(account.getPublicKey()[0])) + .forEach(outgoingOnlineAccounts::add); + + if (outgoingOnlineAccounts.size() > beforeAddSize) + LOGGER.debug(String.format("Going to send %d online accounts for timestamp %d and leading bytes %s", + outgoingOnlineAccounts.size() - beforeAddSize, + timestamp, + outgoingLeadingBytes.stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", ")) + ) + ); + } + } + + Message onlineAccountsMessage = new OnlineAccountsV2Message(outgoingOnlineAccounts); // TODO: V3 message + peer.sendMessage(onlineAccountsMessage); + + LOGGER.debug("Sent {} online accounts to {}", outgoingOnlineAccounts.size(), peer); } } diff --git a/src/main/java/org/qortal/controller/tradebot/TradeBot.java b/src/main/java/org/qortal/controller/tradebot/TradeBot.java index 0583a0f0..2c448607 100644 --- a/src/main/java/org/qortal/controller/tradebot/TradeBot.java +++ b/src/main/java/org/qortal/controller/tradebot/TradeBot.java @@ -292,7 +292,7 @@ public class TradeBot implements Listener { } public static byte[] deriveTradeNativePublicKey(byte[] privateKey) { - return PrivateKeyAccount.toPublicKey(privateKey); + return Crypto.toPublicKey(privateKey); } public static byte[] deriveTradeForeignPublicKey(byte[] privateKey) { diff --git a/src/main/java/org/qortal/crypto/BouncyCastle25519.java b/src/main/java/org/qortal/crypto/BouncyCastle25519.java deleted file mode 100644 index 1a2e0de9..00000000 --- a/src/main/java/org/qortal/crypto/BouncyCastle25519.java +++ /dev/null @@ -1,99 +0,0 @@ -package org.qortal.crypto; - -import java.lang.reflect.Constructor; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.Arrays; - -import org.bouncycastle.crypto.Digest; -import org.bouncycastle.math.ec.rfc7748.X25519; -import org.bouncycastle.math.ec.rfc7748.X25519Field; -import org.bouncycastle.math.ec.rfc8032.Ed25519; - -/** Additions to BouncyCastle providing Ed25519 to X25519 key conversion. */ -public class BouncyCastle25519 { - - private static final Class pointAffineClass; - private static final Constructor pointAffineCtor; - private static final Method decodePointVarMethod; - private static final Field yField; - - static { - try { - Class ed25519Class = Ed25519.class; - pointAffineClass = Arrays.stream(ed25519Class.getDeclaredClasses()).filter(clazz -> clazz.getSimpleName().equals("PointAffine")).findFirst().get(); - if (pointAffineClass == null) - throw new ClassNotFoundException("Can't locate PointExt inner class inside Ed25519"); - - decodePointVarMethod = ed25519Class.getDeclaredMethod("decodePointVar", byte[].class, int.class, boolean.class, pointAffineClass); - decodePointVarMethod.setAccessible(true); - - pointAffineCtor = pointAffineClass.getDeclaredConstructors()[0]; - pointAffineCtor.setAccessible(true); - - yField = pointAffineClass.getDeclaredField("y"); - yField.setAccessible(true); - } catch (NoSuchMethodException | SecurityException | IllegalArgumentException | NoSuchFieldException | ClassNotFoundException e) { - throw new RuntimeException("Can't initialize BouncyCastle25519 shim", e); - } - } - - private static int[] obtainYFromPublicKey(byte[] ed25519PublicKey) { - try { - Object pA = pointAffineCtor.newInstance(); - - Boolean result = (Boolean) decodePointVarMethod.invoke(null, ed25519PublicKey, 0, true, pA); - if (result == null || !result) - return null; - - return (int[]) yField.get(pA); - } catch (SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { - throw new RuntimeException("Can't reflect into BouncyCastle", e); - } - } - - public static byte[] toX25519PublicKey(byte[] ed25519PublicKey) { - int[] one = new int[X25519Field.SIZE]; - X25519Field.one(one); - - int[] y = obtainYFromPublicKey(ed25519PublicKey); - - int[] oneMinusY = new int[X25519Field.SIZE]; - X25519Field.sub(one, y, oneMinusY); - - int[] onePlusY = new int[X25519Field.SIZE]; - X25519Field.add(one, y, onePlusY); - - int[] oneMinusYInverted = new int[X25519Field.SIZE]; - X25519Field.inv(oneMinusY, oneMinusYInverted); - - int[] u = new int[X25519Field.SIZE]; - X25519Field.mul(onePlusY, oneMinusYInverted, u); - - X25519Field.normalize(u); - - byte[] x25519PublicKey = new byte[X25519.SCALAR_SIZE]; - X25519Field.encode(u, x25519PublicKey, 0); - - return x25519PublicKey; - } - - public static byte[] toX25519PrivateKey(byte[] ed25519PrivateKey) { - Digest d = Ed25519.createPrehash(); - byte[] h = new byte[d.getDigestSize()]; - - d.update(ed25519PrivateKey, 0, ed25519PrivateKey.length); - d.doFinal(h, 0); - - byte[] s = new byte[X25519.SCALAR_SIZE]; - - System.arraycopy(h, 0, s, 0, X25519.SCALAR_SIZE); - s[0] &= 0xF8; - s[X25519.SCALAR_SIZE - 1] &= 0x7F; - s[X25519.SCALAR_SIZE - 1] |= 0x40; - - return s; - } - -} diff --git a/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java b/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java new file mode 100644 index 00000000..ebcf0f97 --- /dev/null +++ b/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java @@ -0,0 +1,1427 @@ +package org.qortal.crypto; + +import java.security.SecureRandom; + +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.digests.SHA512Digest; +import org.bouncycastle.math.ec.rfc7748.X25519; +import org.bouncycastle.math.ec.rfc7748.X25519Field; +import org.bouncycastle.math.raw.Interleave; +import org.bouncycastle.math.raw.Nat; +import org.bouncycastle.math.raw.Nat256; +import org.bouncycastle.util.Arrays; + +/** + * Duplicate of {@link org.bouncycastle.math.ec.rfc8032.Ed25519}, + * but with {@code private} modifiers replaced with {@code protected}, + * to allow for extension by {@link org.qortal.crypto.Qortal25519Extras}. + */ +public abstract class BouncyCastleEd25519 +{ + // -x^2 + y^2 == 1 + 0x52036CEE2B6FFE738CC740797779E89800700A4D4141D8AB75EB4DCA135978A3 * x^2 * y^2 + + public static final class Algorithm + { + public static final int Ed25519 = 0; + public static final int Ed25519ctx = 1; + public static final int Ed25519ph = 2; + } + + protected static class F extends X25519Field {}; + + protected static final long M08L = 0x000000FFL; + protected static final long M28L = 0x0FFFFFFFL; + protected static final long M32L = 0xFFFFFFFFL; + + protected static final int POINT_BYTES = 32; + protected static final int SCALAR_INTS = 8; + protected static final int SCALAR_BYTES = SCALAR_INTS * 4; + + public static final int PREHASH_SIZE = 64; + public static final int PUBLIC_KEY_SIZE = POINT_BYTES; + public static final int SECRET_KEY_SIZE = 32; + public static final int SIGNATURE_SIZE = POINT_BYTES + SCALAR_BYTES; + + // "SigEd25519 no Ed25519 collisions" + protected static final byte[] DOM2_PREFIX = new byte[]{ 0x53, 0x69, 0x67, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x20, 0x6e, 0x6f, 0x20, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, 0x20, 0x63, 0x6f, 0x6c, 0x6c, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x73 }; + + protected static final int[] P = new int[]{ 0xFFFFFFED, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0x7FFFFFFF }; + protected static final int[] L = new int[]{ 0x5CF5D3ED, 0x5812631A, 0xA2F79CD6, 0x14DEF9DE, 0x00000000, 0x00000000, + 0x00000000, 0x10000000 }; + + protected static final int L0 = 0xFCF5D3ED; // L0:26/-- + protected static final int L1 = 0x012631A6; // L1:24/22 + protected static final int L2 = 0x079CD658; // L2:27/-- + protected static final int L3 = 0xFF9DEA2F; // L3:23/-- + protected static final int L4 = 0x000014DF; // L4:12/11 + + protected static final int[] B_x = new int[]{ 0x0325D51A, 0x018B5823, 0x007B2C95, 0x0304A92D, 0x00D2598E, 0x01D6DC5C, + 0x01388C7F, 0x013FEC0A, 0x029E6B72, 0x0042D26D }; + protected static final int[] B_y = new int[]{ 0x02666658, 0x01999999, 0x00666666, 0x03333333, 0x00CCCCCC, 0x02666666, + 0x01999999, 0x00666666, 0x03333333, 0x00CCCCCC, }; + protected static final int[] C_d = new int[]{ 0x035978A3, 0x02D37284, 0x018AB75E, 0x026A0A0E, 0x0000E014, 0x0379E898, + 0x01D01E5D, 0x01E738CC, 0x03715B7F, 0x00A406D9 }; + protected static final int[] C_d2 = new int[]{ 0x02B2F159, 0x01A6E509, 0x01156EBD, 0x00D4141D, 0x0001C029, 0x02F3D130, + 0x03A03CBB, 0x01CE7198, 0x02E2B6FF, 0x00480DB3 }; + protected static final int[] C_d4 = new int[]{ 0x0165E2B2, 0x034DCA13, 0x002ADD7A, 0x01A8283B, 0x00038052, 0x01E7A260, + 0x03407977, 0x019CE331, 0x01C56DFF, 0x00901B67 }; + + protected static final int WNAF_WIDTH_BASE = 7; + + protected static final int PRECOMP_BLOCKS = 8; + protected static final int PRECOMP_TEETH = 4; + protected static final int PRECOMP_SPACING = 8; + protected static final int PRECOMP_POINTS = 1 << (PRECOMP_TEETH - 1); + protected static final int PRECOMP_MASK = PRECOMP_POINTS - 1; + + protected static final Object precompLock = new Object(); + // TODO[ed25519] Convert to PointPrecomp + protected static PointExt[] precompBaseTable = null; + protected static int[] precompBase = null; + + protected static class PointAccum + { + int[] x = F.create(); + int[] y = F.create(); + int[] z = F.create(); + int[] u = F.create(); + int[] v = F.create(); + } + + protected static class PointAffine + { + int[] x = F.create(); + int[] y = F.create(); + } + + protected static class PointExt + { + int[] x = F.create(); + int[] y = F.create(); + int[] z = F.create(); + int[] t = F.create(); + } + + protected static class PointPrecomp + { + int[] ypx_h = F.create(); + int[] ymx_h = F.create(); + int[] xyd = F.create(); + } + + protected static byte[] calculateS(byte[] r, byte[] k, byte[] s) + { + int[] t = new int[SCALAR_INTS * 2]; decodeScalar(r, 0, t); + int[] u = new int[SCALAR_INTS]; decodeScalar(k, 0, u); + int[] v = new int[SCALAR_INTS]; decodeScalar(s, 0, v); + + Nat256.mulAddTo(u, v, t); + + byte[] result = new byte[SCALAR_BYTES * 2]; + for (int i = 0; i < t.length; ++i) + { + encode32(t[i], result, i * 4); + } + return reduceScalar(result); + } + + protected static boolean checkContextVar(byte[] ctx , byte phflag) + { + return ctx == null && phflag == 0x00 + || ctx != null && ctx.length < 256; + } + + protected static int checkPoint(int[] x, int[] y) + { + int[] t = F.create(); + int[] u = F.create(); + int[] v = F.create(); + + F.sqr(x, u); + F.sqr(y, v); + F.mul(u, v, t); + F.sub(v, u, v); + F.mul(t, C_d, t); + F.addOne(t); + F.sub(t, v, t); + F.normalize(t); + + return F.isZero(t); + } + + protected static int checkPoint(int[] x, int[] y, int[] z) + { + int[] t = F.create(); + int[] u = F.create(); + int[] v = F.create(); + int[] w = F.create(); + + F.sqr(x, u); + F.sqr(y, v); + F.sqr(z, w); + F.mul(u, v, t); + F.sub(v, u, v); + F.mul(v, w, v); + F.sqr(w, w); + F.mul(t, C_d, t); + F.add(t, w, t); + F.sub(t, v, t); + F.normalize(t); + + return F.isZero(t); + } + + protected static boolean checkPointVar(byte[] p) + { + int[] t = new int[8]; + decode32(p, 0, t, 0, 8); + t[7] &= 0x7FFFFFFF; + return !Nat256.gte(t, P); + } + + protected static boolean checkScalarVar(byte[] s) + { + int[] n = new int[SCALAR_INTS]; + decodeScalar(s, 0, n); + return !Nat256.gte(n, L); + } + + protected static Digest createDigest() + { + return new SHA512Digest(); + } + + public static Digest createPrehash() + { + return createDigest(); + } + + protected static int decode24(byte[] bs, int off) + { + int n = bs[ off] & 0xFF; + n |= (bs[++off] & 0xFF) << 8; + n |= (bs[++off] & 0xFF) << 16; + return n; + } + + protected static int decode32(byte[] bs, int off) + { + int n = bs[off] & 0xFF; + n |= (bs[++off] & 0xFF) << 8; + n |= (bs[++off] & 0xFF) << 16; + n |= bs[++off] << 24; + return n; + } + + protected static void decode32(byte[] bs, int bsOff, int[] n, int nOff, int nLen) + { + for (int i = 0; i < nLen; ++i) + { + n[nOff + i] = decode32(bs, bsOff + i * 4); + } + } + + protected static boolean decodePointVar(byte[] p, int pOff, boolean negate, PointAffine r) + { + byte[] py = Arrays.copyOfRange(p, pOff, pOff + POINT_BYTES); + if (!checkPointVar(py)) + { + return false; + } + + int x_0 = (py[POINT_BYTES - 1] & 0x80) >>> 7; + py[POINT_BYTES - 1] &= 0x7F; + + F.decode(py, 0, r.y); + + int[] u = F.create(); + int[] v = F.create(); + + F.sqr(r.y, u); + F.mul(C_d, u, v); + F.subOne(u); + F.addOne(v); + + if (!F.sqrtRatioVar(u, v, r.x)) + { + return false; + } + + F.normalize(r.x); + if (x_0 == 1 && F.isZeroVar(r.x)) + { + return false; + } + + if (negate ^ (x_0 != (r.x[0] & 1))) + { + F.negate(r.x, r.x); + } + + return true; + } + + protected static void decodeScalar(byte[] k, int kOff, int[] n) + { + decode32(k, kOff, n, 0, SCALAR_INTS); + } + + protected static void dom2(Digest d, byte phflag, byte[] ctx) + { + if (ctx != null) + { + int n = DOM2_PREFIX.length; + byte[] t = new byte[n + 2 + ctx.length]; + System.arraycopy(DOM2_PREFIX, 0, t, 0, n); + t[n] = phflag; + t[n + 1] = (byte)ctx.length; + System.arraycopy(ctx, 0, t, n + 2, ctx.length); + + d.update(t, 0, t.length); + } + } + + protected static void encode24(int n, byte[] bs, int off) + { + bs[ off] = (byte)(n ); + bs[++off] = (byte)(n >>> 8); + bs[++off] = (byte)(n >>> 16); + } + + protected static void encode32(int n, byte[] bs, int off) + { + bs[ off] = (byte)(n ); + bs[++off] = (byte)(n >>> 8); + bs[++off] = (byte)(n >>> 16); + bs[++off] = (byte)(n >>> 24); + } + + protected static void encode56(long n, byte[] bs, int off) + { + encode32((int)n, bs, off); + encode24((int)(n >>> 32), bs, off + 4); + } + + protected static int encodePoint(PointAccum p, byte[] r, int rOff) + { + int[] x = F.create(); + int[] y = F.create(); + + F.inv(p.z, y); + F.mul(p.x, y, x); + F.mul(p.y, y, y); + F.normalize(x); + F.normalize(y); + + int result = checkPoint(x, y); + + F.encode(y, r, rOff); + r[rOff + POINT_BYTES - 1] |= ((x[0] & 1) << 7); + + return result; + } + + public static void generatePrivateKey(SecureRandom random, byte[] k) + { + random.nextBytes(k); + } + + public static void generatePublicKey(byte[] sk, int skOff, byte[] pk, int pkOff) + { + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + scalarMultBaseEncoded(s, pk, pkOff); + } + + protected static int getWindow4(int[] x, int n) + { + int w = n >>> 3, b = (n & 7) << 2; + return (x[w] >>> b) & 15; + } + + protected static byte[] getWnafVar(int[] n, int width) + { +// assert n[SCALAR_INTS - 1] >>> 28 == 0; + + int[] t = new int[SCALAR_INTS * 2]; + { + int tPos = t.length, c = 0; + int i = SCALAR_INTS; + while (--i >= 0) + { + int next = n[i]; + t[--tPos] = (next >>> 16) | (c << 16); + t[--tPos] = c = next; + } + } + + byte[] ws = new byte[253]; + + final int pow2 = 1 << width; + final int mask = pow2 - 1; + final int sign = pow2 >>> 1; + + int j = 0, carry = 0; + for (int i = 0; i < t.length; ++i, j -= 16) + { + int word = t[i]; + while (j < 16) + { + int word16 = word >>> j; + int bit = word16 & 1; + + if (bit == carry) + { + ++j; + continue; + } + + int digit = (word16 & mask) + carry; + carry = digit & sign; + digit -= (carry << 1); + carry >>>= (width - 1); + + ws[(i << 4) + j] = (byte)digit; + + j += width; + } + } + +// assert carry == 0; + + return ws; + } + + protected static void implSign(Digest d, byte[] h, byte[] s, byte[] pk, int pkOff, byte[] ctx, byte phflag, byte[] m, + int mOff, int mLen, byte[] sig, int sigOff) + { + dom2(d, phflag, ctx); + d.update(h, SCALAR_BYTES, SCALAR_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] r = reduceScalar(h); + byte[] R = new byte[POINT_BYTES]; + scalarMultBaseEncoded(r, R, 0); + + dom2(d, phflag, ctx); + d.update(R, 0, POINT_BYTES); + d.update(pk, pkOff, POINT_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + byte[] S = calculateS(r, k, s); + + System.arraycopy(R, 0, sig, sigOff, POINT_BYTES); + System.arraycopy(S, 0, sig, sigOff + POINT_BYTES, SCALAR_BYTES); + } + + protected static void implSign(byte[] sk, int skOff, byte[] ctx, byte phflag, byte[] m, int mOff, int mLen, + byte[] sig, int sigOff) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + byte[] pk = new byte[POINT_BYTES]; + scalarMultBaseEncoded(s, pk, 0); + + implSign(d, h, s, pk, 0, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + protected static void implSign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte phflag, + byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + implSign(d, h, s, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + protected static boolean implVerify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte phflag, byte[] m, + int mOff, int mLen) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + byte[] R = Arrays.copyOfRange(sig, sigOff, sigOff + POINT_BYTES); + byte[] S = Arrays.copyOfRange(sig, sigOff + POINT_BYTES, sigOff + SIGNATURE_SIZE); + + if (!checkPointVar(R)) + { + return false; + } + if (!checkScalarVar(S)) + { + return false; + } + + PointAffine pA = new PointAffine(); + if (!decodePointVar(pk, pkOff, true, pA)) + { + return false; + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + dom2(d, phflag, ctx); + d.update(R, 0, POINT_BYTES); + d.update(pk, pkOff, POINT_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + + int[] nS = new int[SCALAR_INTS]; + decodeScalar(S, 0, nS); + + int[] nA = new int[SCALAR_INTS]; + decodeScalar(k, 0, nA); + + PointAccum pR = new PointAccum(); + scalarMultStrausVar(nS, nA, pA, pR); + + byte[] check = new byte[POINT_BYTES]; + return 0 != encodePoint(pR, check, 0) && Arrays.areEqual(check, R); + } + + protected static void pointAdd(PointExt p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.apm(r.y, r.x, b, a); + F.apm(p.y, p.x, d, c); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(r.u, r.v, c); + F.mul(c, p.t, c); + F.mul(c, C_d2, c); + F.mul(r.z, p.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointAdd(PointExt p, PointExt r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = F.create(); + int[] f = F.create(); + int[] g = F.create(); + int[] h = F.create(); + + F.apm(p.y, p.x, b, a); + F.apm(r.y, r.x, d, c); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(p.t, r.t, c); + F.mul(c, C_d2, c); + F.mul(p.z, r.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + F.mul(e, h, r.t); + } + + protected static void pointAddVar(boolean negate, PointExt p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + int[] nc, nd, nf, ng; + if (negate) + { + nc = d; nd = c; nf = g; ng = f; + } + else + { + nc = c; nd = d; nf = f; ng = g; + } + + F.apm(r.y, r.x, b, a); + F.apm(p.y, p.x, nd, nc); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(r.u, r.v, c); + F.mul(c, p.t, c); + F.mul(c, C_d2, c); + F.mul(r.z, p.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, ng, nf); + F.carry(ng); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointAddVar(boolean negate, PointExt p, PointExt q, PointExt r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = F.create(); + int[] f = F.create(); + int[] g = F.create(); + int[] h = F.create(); + + int[] nc, nd, nf, ng; + if (negate) + { + nc = d; nd = c; nf = g; ng = f; + } + else + { + nc = c; nd = d; nf = f; ng = g; + } + + F.apm(p.y, p.x, b, a); + F.apm(q.y, q.x, nd, nc); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(p.t, q.t, c); + F.mul(c, C_d2, c); + F.mul(p.z, q.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, ng, nf); + F.carry(ng); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + F.mul(e, h, r.t); + } + + protected static void pointAddPrecomp(PointPrecomp p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.apm(r.y, r.x, b, a); + F.mul(a, p.ymx_h, a); + F.mul(b, p.ypx_h, b); + F.mul(r.u, r.v, c); + F.mul(c, p.xyd, c); + F.apm(b, a, h, e); + F.apm(r.z, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static PointExt pointCopy(PointAccum p) + { + PointExt r = new PointExt(); + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + F.copy(p.z, 0, r.z, 0); + F.mul(p.u, p.v, r.t); + return r; + } + + protected static PointExt pointCopy(PointAffine p) + { + PointExt r = new PointExt(); + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + pointExtendXY(r); + return r; + } + + protected static PointExt pointCopy(PointExt p) + { + PointExt r = new PointExt(); + pointCopy(p, r); + return r; + } + + protected static void pointCopy(PointAffine p, PointAccum r) + { + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + pointExtendXY(r); + } + + protected static void pointCopy(PointExt p, PointExt r) + { + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + F.copy(p.z, 0, r.z, 0); + F.copy(p.t, 0, r.t, 0); + } + + protected static void pointDouble(PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.sqr(r.x, a); + F.sqr(r.y, b); + F.sqr(r.z, c); + F.add(c, c, c); + F.apm(a, b, h, g); + F.add(r.x, r.y, e); + F.sqr(e, e); + F.sub(h, e, e); + F.add(c, g, f); + F.carry(f); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointExtendXY(PointAccum p) + { + F.one(p.z); + F.copy(p.x, 0, p.u, 0); + F.copy(p.y, 0, p.v, 0); + } + + protected static void pointExtendXY(PointExt p) + { + F.one(p.z); + F.mul(p.x, p.y, p.t); + } + + protected static void pointLookup(int block, int index, PointPrecomp p) + { +// assert 0 <= block && block < PRECOMP_BLOCKS; +// assert 0 <= index && index < PRECOMP_POINTS; + + int off = block * PRECOMP_POINTS * 3 * F.SIZE; + + for (int i = 0; i < PRECOMP_POINTS; ++i) + { + int cond = ((i ^ index) - 1) >> 31; + F.cmov(cond, precompBase, off, p.ypx_h, 0); off += F.SIZE; + F.cmov(cond, precompBase, off, p.ymx_h, 0); off += F.SIZE; + F.cmov(cond, precompBase, off, p.xyd, 0); off += F.SIZE; + } + } + + protected static void pointLookup(int[] x, int n, int[] table, PointExt r) + { + // TODO This method is currently hardcoded to 4-bit windows and 8 precomputed points + + int w = getWindow4(x, n); + + int sign = (w >>> (4 - 1)) ^ 1; + int abs = (w ^ -sign) & 7; + +// assert sign == 0 || sign == 1; +// assert 0 <= abs && abs < 8; + + for (int i = 0, off = 0; i < 8; ++i) + { + int cond = ((i ^ abs) - 1) >> 31; + F.cmov(cond, table, off, r.x, 0); off += F.SIZE; + F.cmov(cond, table, off, r.y, 0); off += F.SIZE; + F.cmov(cond, table, off, r.z, 0); off += F.SIZE; + F.cmov(cond, table, off, r.t, 0); off += F.SIZE; + } + + F.cnegate(sign, r.x); + F.cnegate(sign, r.t); + } + + protected static void pointLookup(int[] table, int index, PointExt r) + { + int off = F.SIZE * 4 * index; + + F.copy(table, off, r.x, 0); off += F.SIZE; + F.copy(table, off, r.y, 0); off += F.SIZE; + F.copy(table, off, r.z, 0); off += F.SIZE; + F.copy(table, off, r.t, 0); + } + + protected static int[] pointPrecompute(PointAffine p, int count) + { +// assert count > 0; + + PointExt q = pointCopy(p); + PointExt d = pointCopy(q); + pointAdd(q, d); + + int[] table = F.createTable(count * 4); + int off = 0; + + int i = 0; + for (;;) + { + F.copy(q.x, 0, table, off); off += F.SIZE; + F.copy(q.y, 0, table, off); off += F.SIZE; + F.copy(q.z, 0, table, off); off += F.SIZE; + F.copy(q.t, 0, table, off); off += F.SIZE; + + if (++i == count) + { + break; + } + + pointAdd(d, q); + } + + return table; + } + + protected static PointExt[] pointPrecomputeVar(PointExt p, int count) + { +// assert count > 0; + + PointExt d = new PointExt(); + pointAddVar(false, p, p, d); + + PointExt[] table = new PointExt[count]; + table[0] = pointCopy(p); + for (int i = 1; i < count; ++i) + { + pointAddVar(false, table[i - 1], d, table[i] = new PointExt()); + } + return table; + } + + protected static void pointSetNeutral(PointAccum p) + { + F.zero(p.x); + F.one(p.y); + F.one(p.z); + F.zero(p.u); + F.one(p.v); + } + + protected static void pointSetNeutral(PointExt p) + { + F.zero(p.x); + F.one(p.y); + F.one(p.z); + F.zero(p.t); + } + + public static void precompute() + { + synchronized (precompLock) + { + if (precompBase != null) + { + return; + } + + // Precomputed table for the base point in verification ladder + { + PointExt b = new PointExt(); + F.copy(B_x, 0, b.x, 0); + F.copy(B_y, 0, b.y, 0); + pointExtendXY(b); + + precompBaseTable = pointPrecomputeVar(b, 1 << (WNAF_WIDTH_BASE - 2)); + } + + PointAccum p = new PointAccum(); + F.copy(B_x, 0, p.x, 0); + F.copy(B_y, 0, p.y, 0); + pointExtendXY(p); + + precompBase = F.createTable(PRECOMP_BLOCKS * PRECOMP_POINTS * 3); + + int off = 0; + for (int b = 0; b < PRECOMP_BLOCKS; ++b) + { + PointExt[] ds = new PointExt[PRECOMP_TEETH]; + + PointExt sum = new PointExt(); + pointSetNeutral(sum); + + for (int t = 0; t < PRECOMP_TEETH; ++t) + { + PointExt q = pointCopy(p); + pointAddVar(true, sum, q, sum); + pointDouble(p); + + ds[t] = pointCopy(p); + + if (b + t != PRECOMP_BLOCKS + PRECOMP_TEETH - 2) + { + for (int s = 1; s < PRECOMP_SPACING; ++s) + { + pointDouble(p); + } + } + } + + PointExt[] points = new PointExt[PRECOMP_POINTS]; + int k = 0; + points[k++] = sum; + + for (int t = 0; t < (PRECOMP_TEETH - 1); ++t) + { + int size = 1 << t; + for (int j = 0; j < size; ++j, ++k) + { + pointAddVar(false, points[k - size], ds[t], points[k] = new PointExt()); + } + } + +// assert k == PRECOMP_POINTS; + + int[] cs = F.createTable(PRECOMP_POINTS); + + // TODO[ed25519] A single batch inversion across all blocks? + { + int[] u = F.create(); + F.copy(points[0].z, 0, u, 0); + F.copy(u, 0, cs, 0); + + int i = 0; + while (++i < PRECOMP_POINTS) + { + F.mul(u, points[i].z, u); + F.copy(u, 0, cs, i * F.SIZE); + } + + F.add(u, u, u); + F.invVar(u, u); + --i; + + int[] t = F.create(); + + while (i > 0) + { + int j = i--; + F.copy(cs, i * F.SIZE, t, 0); + F.mul(t, u, t); + F.copy(t, 0, cs, j * F.SIZE); + F.mul(u, points[j].z, u); + } + + F.copy(u, 0, cs, 0); + } + + for (int i = 0; i < PRECOMP_POINTS; ++i) + { + PointExt q = points[i]; + + int[] x = F.create(); + int[] y = F.create(); + +// F.add(q.z, q.z, x); +// F.invVar(x, y); + F.copy(cs, i * F.SIZE, y, 0); + + F.mul(q.x, y, x); + F.mul(q.y, y, y); + + PointPrecomp r = new PointPrecomp(); + F.apm(y, x, r.ypx_h, r.ymx_h); + F.mul(x, y, r.xyd); + F.mul(r.xyd, C_d4, r.xyd); + + F.normalize(r.ypx_h); + F.normalize(r.ymx_h); +// F.normalize(r.xyd); + + F.copy(r.ypx_h, 0, precompBase, off); off += F.SIZE; + F.copy(r.ymx_h, 0, precompBase, off); off += F.SIZE; + F.copy(r.xyd, 0, precompBase, off); off += F.SIZE; + } + } + +// assert off == precompBase.length; + } + } + + protected static void pruneScalar(byte[] n, int nOff, byte[] r) + { + System.arraycopy(n, nOff, r, 0, SCALAR_BYTES); + + r[0] &= 0xF8; + r[SCALAR_BYTES - 1] &= 0x7F; + r[SCALAR_BYTES - 1] |= 0x40; + } + + protected static byte[] reduceScalar(byte[] n) + { + long x00 = decode32(n, 0) & M32L; // x00:32/-- + long x01 = (decode24(n, 4) << 4) & M32L; // x01:28/-- + long x02 = decode32(n, 7) & M32L; // x02:32/-- + long x03 = (decode24(n, 11) << 4) & M32L; // x03:28/-- + long x04 = decode32(n, 14) & M32L; // x04:32/-- + long x05 = (decode24(n, 18) << 4) & M32L; // x05:28/-- + long x06 = decode32(n, 21) & M32L; // x06:32/-- + long x07 = (decode24(n, 25) << 4) & M32L; // x07:28/-- + long x08 = decode32(n, 28) & M32L; // x08:32/-- + long x09 = (decode24(n, 32) << 4) & M32L; // x09:28/-- + long x10 = decode32(n, 35) & M32L; // x10:32/-- + long x11 = (decode24(n, 39) << 4) & M32L; // x11:28/-- + long x12 = decode32(n, 42) & M32L; // x12:32/-- + long x13 = (decode24(n, 46) << 4) & M32L; // x13:28/-- + long x14 = decode32(n, 49) & M32L; // x14:32/-- + long x15 = (decode24(n, 53) << 4) & M32L; // x15:28/-- + long x16 = decode32(n, 56) & M32L; // x16:32/-- + long x17 = (decode24(n, 60) << 4) & M32L; // x17:28/-- + long x18 = n[63] & M08L; // x18:08/-- + long t; + +// x18 += (x17 >> 28); x17 &= M28L; + x09 -= x18 * L0; // x09:34/28 + x10 -= x18 * L1; // x10:33/30 + x11 -= x18 * L2; // x11:35/28 + x12 -= x18 * L3; // x12:32/31 + x13 -= x18 * L4; // x13:28/21 + + x17 += (x16 >> 28); x16 &= M28L; // x17:28/--, x16:28/-- + x08 -= x17 * L0; // x08:54/32 + x09 -= x17 * L1; // x09:52/51 + x10 -= x17 * L2; // x10:55/34 + x11 -= x17 * L3; // x11:51/36 + x12 -= x17 * L4; // x12:41/-- + +// x16 += (x15 >> 28); x15 &= M28L; + x07 -= x16 * L0; // x07:54/28 + x08 -= x16 * L1; // x08:54/53 + x09 -= x16 * L2; // x09:55/53 + x10 -= x16 * L3; // x10:55/52 + x11 -= x16 * L4; // x11:51/41 + + x15 += (x14 >> 28); x14 &= M28L; // x15:28/--, x14:28/-- + x06 -= x15 * L0; // x06:54/32 + x07 -= x15 * L1; // x07:54/53 + x08 -= x15 * L2; // x08:56/-- + x09 -= x15 * L3; // x09:55/54 + x10 -= x15 * L4; // x10:55/53 + +// x14 += (x13 >> 28); x13 &= M28L; + x05 -= x14 * L0; // x05:54/28 + x06 -= x14 * L1; // x06:54/53 + x07 -= x14 * L2; // x07:56/-- + x08 -= x14 * L3; // x08:56/51 + x09 -= x14 * L4; // x09:56/-- + + x13 += (x12 >> 28); x12 &= M28L; // x13:28/22, x12:28/-- + x04 -= x13 * L0; // x04:54/49 + x05 -= x13 * L1; // x05:54/53 + x06 -= x13 * L2; // x06:56/-- + x07 -= x13 * L3; // x07:56/52 + x08 -= x13 * L4; // x08:56/52 + + x12 += (x11 >> 28); x11 &= M28L; // x12:28/24, x11:28/-- + x03 -= x12 * L0; // x03:54/49 + x04 -= x12 * L1; // x04:54/51 + x05 -= x12 * L2; // x05:56/-- + x06 -= x12 * L3; // x06:56/52 + x07 -= x12 * L4; // x07:56/53 + + x11 += (x10 >> 28); x10 &= M28L; // x11:29/--, x10:28/-- + x02 -= x11 * L0; // x02:55/32 + x03 -= x11 * L1; // x03:55/-- + x04 -= x11 * L2; // x04:56/55 + x05 -= x11 * L3; // x05:56/52 + x06 -= x11 * L4; // x06:56/53 + + x10 += (x09 >> 28); x09 &= M28L; // x10:29/--, x09:28/-- + x01 -= x10 * L0; // x01:55/28 + x02 -= x10 * L1; // x02:55/54 + x03 -= x10 * L2; // x03:56/55 + x04 -= x10 * L3; // x04:57/-- + x05 -= x10 * L4; // x05:56/53 + + x08 += (x07 >> 28); x07 &= M28L; // x08:56/53, x07:28/-- + x09 += (x08 >> 28); x08 &= M28L; // x09:29/25, x08:28/-- + + t = x08 >>> 27; + x09 += t; // x09:29/26 + + x00 -= x09 * L0; // x00:55/53 + x01 -= x09 * L1; // x01:55/54 + x02 -= x09 * L2; // x02:57/-- + x03 -= x09 * L3; // x03:57/-- + x04 -= x09 * L4; // x04:57/42 + + x01 += (x00 >> 28); x00 &= M28L; + x02 += (x01 >> 28); x01 &= M28L; + x03 += (x02 >> 28); x02 &= M28L; + x04 += (x03 >> 28); x03 &= M28L; + x05 += (x04 >> 28); x04 &= M28L; + x06 += (x05 >> 28); x05 &= M28L; + x07 += (x06 >> 28); x06 &= M28L; + x08 += (x07 >> 28); x07 &= M28L; + x09 = (x08 >> 28); x08 &= M28L; + + x09 -= t; + +// assert x09 == 0L || x09 == -1L; + + x00 += x09 & L0; + x01 += x09 & L1; + x02 += x09 & L2; + x03 += x09 & L3; + x04 += x09 & L4; + + x01 += (x00 >> 28); x00 &= M28L; + x02 += (x01 >> 28); x01 &= M28L; + x03 += (x02 >> 28); x02 &= M28L; + x04 += (x03 >> 28); x03 &= M28L; + x05 += (x04 >> 28); x04 &= M28L; + x06 += (x05 >> 28); x05 &= M28L; + x07 += (x06 >> 28); x06 &= M28L; + x08 += (x07 >> 28); x07 &= M28L; + + byte[] r = new byte[SCALAR_BYTES]; + encode56(x00 | (x01 << 28), r, 0); + encode56(x02 | (x03 << 28), r, 7); + encode56(x04 | (x05 << 28), r, 14); + encode56(x06 | (x07 << 28), r, 21); + encode32((int)x08, r, 28); + return r; + } + + protected static void scalarMult(byte[] k, PointAffine p, PointAccum r) + { + int[] n = new int[SCALAR_INTS]; + decodeScalar(k, 0, n); + +// assert 0 == (n[0] & 7); +// assert 1 == n[SCALAR_INTS - 1] >>> 30; + + Nat.shiftDownBits(SCALAR_INTS, n, 3, 1); + + // Recode the scalar into signed-digit form + { + //int c1 = + Nat.cadd(SCALAR_INTS, ~n[0] & 1, n, L, n); //assert c1 == 0; + //int c2 = + Nat.shiftDownBit(SCALAR_INTS, n, 0); //assert c2 == (1 << 31); + } + +// assert 1 == n[SCALAR_INTS - 1] >>> 28; + + int[] table = pointPrecompute(p, 8); + PointExt q = new PointExt(); + + // Replace first 4 doublings (2^4 * P) with 1 addition (P + 15 * P) + pointCopy(p, r); + pointLookup(table, 7, q); + pointAdd(q, r); + + int w = 62; + for (;;) + { + pointLookup(n, w, table, q); + pointAdd(q, r); + + pointDouble(r); + pointDouble(r); + pointDouble(r); + + if (--w < 0) + { + break; + } + + pointDouble(r); + } + } + + protected static void scalarMultBase(byte[] k, PointAccum r) + { + precompute(); + + int[] n = new int[SCALAR_INTS]; + decodeScalar(k, 0, n); + + // Recode the scalar into signed-digit form, then group comb bits in each block + { + //int c1 = + Nat.cadd(SCALAR_INTS, ~n[0] & 1, n, L, n); //assert c1 == 0; + //int c2 = + Nat.shiftDownBit(SCALAR_INTS, n, 1); //assert c2 == (1 << 31); + + for (int i = 0; i < SCALAR_INTS; ++i) + { + n[i] = Interleave.shuffle2(n[i]); + } + } + + PointPrecomp p = new PointPrecomp(); + + pointSetNeutral(r); + + int cOff = (PRECOMP_SPACING - 1) * PRECOMP_TEETH; + for (;;) + { + for (int b = 0; b < PRECOMP_BLOCKS; ++b) + { + int w = n[b] >>> cOff; + int sign = (w >>> (PRECOMP_TEETH - 1)) & 1; + int abs = (w ^ -sign) & PRECOMP_MASK; + +// assert sign == 0 || sign == 1; +// assert 0 <= abs && abs < PRECOMP_POINTS; + + pointLookup(b, abs, p); + + F.cswap(sign, p.ypx_h, p.ymx_h); + F.cnegate(sign, p.xyd); + + pointAddPrecomp(p, r); + } + + if ((cOff -= PRECOMP_TEETH) < 0) + { + break; + } + + pointDouble(r); + } + } + + protected static void scalarMultBaseEncoded(byte[] k, byte[] r, int rOff) + { + PointAccum p = new PointAccum(); + scalarMultBase(k, p); + if (0 == encodePoint(p, r, rOff)) + { + throw new IllegalStateException(); + } + } + + /** + * NOTE: Only for use by X25519 + */ + public static void scalarMultBaseYZ(X25519.Friend friend, byte[] k, int kOff, int[] y, int[] z) + { + if (null == friend) + { + throw new NullPointerException("This method is only for use by X25519"); + } + + byte[] n = new byte[SCALAR_BYTES]; + pruneScalar(k, kOff, n); + + PointAccum p = new PointAccum(); + scalarMultBase(n, p); + if (0 == checkPoint(p.x, p.y, p.z)) + { + throw new IllegalStateException(); + } + F.copy(p.y, 0, y, 0); + F.copy(p.z, 0, z, 0); + } + + protected static void scalarMultStrausVar(int[] nb, int[] np, PointAffine p, PointAccum r) + { + precompute(); + + final int width = 5; + + byte[] ws_b = getWnafVar(nb, WNAF_WIDTH_BASE); + byte[] ws_p = getWnafVar(np, width); + + PointExt[] tp = pointPrecomputeVar(pointCopy(p), 1 << (width - 2)); + + pointSetNeutral(r); + + for (int bit = 252;;) + { + int wb = ws_b[bit]; + if (wb != 0) + { + int sign = wb >> 31; + int index = (wb ^ sign) >>> 1; + + pointAddVar((sign != 0), precompBaseTable[index], r); + } + + int wp = ws_p[bit]; + if (wp != 0) + { + int sign = wp >> 31; + int index = (wp ^ sign) >>> 1; + + pointAddVar((sign != 0), tp[index], r); + } + + if (--bit < 0) + { + break; + } + + pointDouble(r); + } + } + + public static void sign(byte[] sk, int skOff, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte[] ctx = null; + byte phflag = 0x00; + + implSign(sk, skOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte[] ctx = null; + byte phflag = 0x00; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] ctx, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte phflag = 0x00; + + implSign(sk, skOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte phflag = 0x00; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] ctx, byte[] ph, int phOff, byte[] sig, int sigOff) + { + byte phflag = 0x01; + + implSign(sk, skOff, ctx, phflag, ph, phOff, PREHASH_SIZE, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte[] ph, int phOff, byte[] sig, int sigOff) + { + byte phflag = 0x01; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, ph, phOff, PREHASH_SIZE, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] ctx, Digest ph, byte[] sig, int sigOff) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + implSign(sk, skOff, ctx, phflag, m, 0, m.length, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, Digest ph, byte[] sig, int sigOff) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, 0, m.length, sig, sigOff); + } + + public static boolean verify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] m, int mOff, int mLen) + { + byte[] ctx = null; + byte phflag = 0x00; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, mOff, mLen); + } + + public static boolean verify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte[] m, int mOff, int mLen) + { + byte phflag = 0x00; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, mOff, mLen); + } + + public static boolean verifyPrehash(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte[] ph, int phOff) + { + byte phflag = 0x01; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, ph, phOff, PREHASH_SIZE); + } + + public static boolean verifyPrehash(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, Digest ph) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, 0, m.length); + } +} diff --git a/src/main/java/org/qortal/crypto/Crypto.java b/src/main/java/org/qortal/crypto/Crypto.java index 5d91781c..75e5028e 100644 --- a/src/main/java/org/qortal/crypto/Crypto.java +++ b/src/main/java/org/qortal/crypto/Crypto.java @@ -253,6 +253,10 @@ public abstract class Crypto { return false; } + public static byte[] toPublicKey(byte[] privateKey) { + return new Ed25519PrivateKeyParameters(privateKey, 0).generatePublicKey().getEncoded(); + } + public static boolean verify(byte[] publicKey, byte[] signature, byte[] message) { try { return Ed25519.verify(signature, 0, publicKey, 0, message, 0, message.length); @@ -264,16 +268,24 @@ public abstract class Crypto { public static byte[] sign(Ed25519PrivateKeyParameters edPrivateKeyParams, byte[] message) { byte[] signature = new byte[SIGNATURE_LENGTH]; - edPrivateKeyParams.sign(Ed25519.Algorithm.Ed25519, edPrivateKeyParams.generatePublicKey(), null, message, 0, message.length, signature, 0); + edPrivateKeyParams.sign(Ed25519.Algorithm.Ed25519,null, message, 0, message.length, signature, 0); + + return signature; + } + + public static byte[] sign(byte[] privateKey, byte[] message) { + byte[] signature = new byte[SIGNATURE_LENGTH]; + + new Ed25519PrivateKeyParameters(privateKey, 0).sign(Ed25519.Algorithm.Ed25519,null, message, 0, message.length, signature, 0); return signature; } public static byte[] getSharedSecret(byte[] privateKey, byte[] publicKey) { - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(privateKey); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(privateKey); X25519PrivateKeyParameters xPrivateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); - byte[] x25519PublicKey = BouncyCastle25519.toX25519PublicKey(publicKey); + byte[] x25519PublicKey = Qortal25519Extras.toX25519PublicKey(publicKey); X25519PublicKeyParameters xPublicKeyParams = new X25519PublicKeyParameters(x25519PublicKey, 0); byte[] sharedSecret = new byte[SHARED_SECRET_LENGTH]; @@ -281,5 +293,4 @@ public abstract class Crypto { return sharedSecret; } - } diff --git a/src/main/java/org/qortal/crypto/Qortal25519Extras.java b/src/main/java/org/qortal/crypto/Qortal25519Extras.java new file mode 100644 index 00000000..42cca93e --- /dev/null +++ b/src/main/java/org/qortal/crypto/Qortal25519Extras.java @@ -0,0 +1,234 @@ +package org.qortal.crypto; + +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.digests.SHA512Digest; +import org.bouncycastle.math.ec.rfc7748.X25519; +import org.bouncycastle.math.ec.rfc7748.X25519Field; +import org.bouncycastle.math.ec.rfc8032.Ed25519; +import org.bouncycastle.math.raw.Nat256; + +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.Collection; + +/** + * Additions to BouncyCastle providing: + *

+ *
    + *
  • Ed25519 to X25519 key conversion
  • + *
  • Aggregate public keys
  • + *
  • Aggregate signatures
  • + *
+ */ +public abstract class Qortal25519Extras extends BouncyCastleEd25519 { + + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); + + public static byte[] toX25519PublicKey(byte[] ed25519PublicKey) { + int[] one = new int[X25519Field.SIZE]; + X25519Field.one(one); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(ed25519PublicKey, 0, true, pA)) + return null; + + int[] y = pA.y; + + int[] oneMinusY = new int[X25519Field.SIZE]; + X25519Field.sub(one, y, oneMinusY); + + int[] onePlusY = new int[X25519Field.SIZE]; + X25519Field.add(one, y, onePlusY); + + int[] oneMinusYInverted = new int[X25519Field.SIZE]; + X25519Field.inv(oneMinusY, oneMinusYInverted); + + int[] u = new int[X25519Field.SIZE]; + X25519Field.mul(onePlusY, oneMinusYInverted, u); + + X25519Field.normalize(u); + + byte[] x25519PublicKey = new byte[X25519.SCALAR_SIZE]; + X25519Field.encode(u, x25519PublicKey, 0); + + return x25519PublicKey; + } + + public static byte[] toX25519PrivateKey(byte[] ed25519PrivateKey) { + Digest d = Ed25519.createPrehash(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(ed25519PrivateKey, 0, ed25519PrivateKey.length); + d.doFinal(h, 0); + + byte[] s = new byte[X25519.SCALAR_SIZE]; + + System.arraycopy(h, 0, s, 0, X25519.SCALAR_SIZE); + s[0] &= 0xF8; + s[X25519.SCALAR_SIZE - 1] &= 0x7F; + s[X25519.SCALAR_SIZE - 1] |= 0x40; + + return s; + } + + // Mostly for test support + public static PointAccum newPointAccum() { + return new PointAccum(); + } + + public static byte[] aggregatePublicKeys(Collection publicKeys) { + PointAccum rAccum = null; + + for (byte[] publicKey : publicKeys) { + PointAffine pA = new PointAffine(); + if (!decodePointVar(publicKey, 0, false, pA)) + // Failed to decode + return null; + + if (rAccum == null) { + rAccum = new PointAccum(); + pointCopy(pA, rAccum); + } else { + pointAdd(pointCopy(pA), rAccum); + } + } + + byte[] publicKey = new byte[SCALAR_BYTES]; + if (0 == encodePoint(rAccum, publicKey, 0)) + // Failed to encode + return null; + + return publicKey; + } + + public static byte[] aggregateSignatures(Collection signatures) { + // Signatures are (R, s) + // R is a point + // s is a scalar + PointAccum rAccum = null; + int[] sAccum = new int[SCALAR_INTS]; + + byte[] rEncoded = new byte[POINT_BYTES]; + int[] sPart = new int[SCALAR_INTS]; + for (byte[] signature : signatures) { + System.arraycopy(signature,0, rEncoded, 0, rEncoded.length); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(rEncoded, 0, false, pA)) + // Failed to decode + return null; + + if (rAccum == null) { + rAccum = new PointAccum(); + pointCopy(pA, rAccum); + + decode32(signature, rEncoded.length, sAccum, 0, SCALAR_INTS); + } else { + pointAdd(pointCopy(pA), rAccum); + + decode32(signature, rEncoded.length, sPart, 0, SCALAR_INTS); + Nat256.addTo(sPart, sAccum); + + // "mod L" on sAccum + if (Nat256.gte(sAccum, L)) + Nat256.subFrom(L, sAccum); + } + } + + byte[] signature = new byte[SIGNATURE_SIZE]; + if (0 == encodePoint(rAccum, signature, 0)) + // Failed to encode + return null; + + for (int i = 0; i < sAccum.length; ++i) { + encode32(sAccum[i], signature, POINT_BYTES + i * 4); + } + + return signature; + } + + public static byte[] signForAggregation(byte[] privateKey, byte[] message) { + // Very similar to BouncyCastle's implementation except we use secure random nonce and different hash + Digest d = new SHA512Digest(); + byte[] h = new byte[d.getDigestSize()]; + + d.reset(); + d.update(privateKey, 0, privateKey.length); + d.doFinal(h, 0); + + byte[] sH = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, sH); + + byte[] publicKey = new byte[SCALAR_BYTES]; + scalarMultBaseEncoded(sH, publicKey, 0); + + byte[] rSeed = new byte[d.getDigestSize()]; + SECURE_RANDOM.nextBytes(rSeed); + + byte[] r = new byte[SCALAR_BYTES]; + pruneScalar(rSeed, 0, r); + + byte[] R = new byte[POINT_BYTES]; + scalarMultBaseEncoded(r, R, 0); + + d.reset(); + d.update(message, 0, message.length); + d.doFinal(h, 0); + byte[] k = reduceScalar(h); + + byte[] s = calculateS(r, k, sH); + + byte[] signature = new byte[SIGNATURE_SIZE]; + System.arraycopy(R, 0, signature, 0, POINT_BYTES); + System.arraycopy(s, 0, signature, POINT_BYTES, SCALAR_BYTES); + + return signature; + } + + public static boolean verifyAggregated(byte[] publicKey, byte[] signature, byte[] message) { + byte[] R = Arrays.copyOfRange(signature, 0, POINT_BYTES); + + byte[] s = Arrays.copyOfRange(signature, POINT_BYTES, POINT_BYTES + SCALAR_BYTES); + + if (!checkPointVar(R)) + // R out of bounds + return false; + + if (!checkScalarVar(s)) + // s out of bounds + return false; + + byte[] S = new byte[POINT_BYTES]; + scalarMultBaseEncoded(s, S, 0); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(publicKey, 0, true, pA)) + // Failed to decode + return false; + + Digest d = new SHA512Digest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(message, 0, message.length); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + + int[] nS = new int[SCALAR_INTS]; + decodeScalar(s, 0, nS); + + int[] nA = new int[SCALAR_INTS]; + decodeScalar(k, 0, nA); + + /*PointAccum*/ + PointAccum pR = new PointAccum(); + scalarMultStrausVar(nS, nA, pA, pR); + + byte[] check = new byte[POINT_BYTES]; + if (0 == encodePoint(pR, check, 0)) + // Failed to encode + return false; + + return Arrays.equals(check, R); + } +} diff --git a/src/main/java/org/qortal/data/network/OnlineAccountData.java b/src/main/java/org/qortal/data/network/OnlineAccountData.java index 15792307..28c454b5 100644 --- a/src/main/java/org/qortal/data/network/OnlineAccountData.java +++ b/src/main/java/org/qortal/data/network/OnlineAccountData.java @@ -5,6 +5,7 @@ import java.util.Arrays; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlTransient; import org.qortal.account.PublicKeyAccount; @@ -16,6 +17,9 @@ public class OnlineAccountData { protected byte[] signature; protected byte[] publicKey; + @XmlTransient + private int hash; + // Constructors // necessary for JAXB serialization @@ -62,20 +66,23 @@ public class OnlineAccountData { if (otherOnlineAccountData.timestamp != this.timestamp) return false; - // Signature more likely to be unique than public key - if (!Arrays.equals(otherOnlineAccountData.signature, this.signature)) - return false; - if (!Arrays.equals(otherOnlineAccountData.publicKey, this.publicKey)) return false; + // We don't compare signature because it's not our remit to verify and newer aggregate signatures use random nonces + return true; } @Override public int hashCode() { - // Pretty lazy implementation - return (int) this.timestamp; + int h = this.hash; + if (h == 0) { + this.hash = h = Long.hashCode(this.timestamp) + ^ Arrays.hashCode(this.publicKey); + // We don't use signature because newer aggregate signatures use random nonces + } + return h; } } diff --git a/src/main/java/org/qortal/network/Network.java b/src/main/java/org/qortal/network/Network.java index 9648f153..10f02d52 100644 --- a/src/main/java/org/qortal/network/Network.java +++ b/src/main/java/org/qortal/network/Network.java @@ -469,6 +469,8 @@ public class Network { class NetworkProcessor extends ExecuteProduceConsume { + private final Logger LOGGER = LogManager.getLogger(NetworkProcessor.class); + private final AtomicLong nextConnectTaskTimestamp = new AtomicLong(0L); // ms - try first connect once NTP syncs private final AtomicLong nextBroadcastTimestamp = new AtomicLong(0L); // ms - try first broadcast once NTP syncs diff --git a/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java b/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java new file mode 100644 index 00000000..66c7c47a --- /dev/null +++ b/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java @@ -0,0 +1,112 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Longs; +import org.qortal.transform.Transformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.*; + +/** + * For requesting online accounts info from remote peer, given our list of online accounts. + *

+ * Different format to V1 and V2:
+ *
    + *
  • V1 is: number of entries, then timestamp + pubkey for each entry
  • + *
  • V2 is: groups of: number of entries, timestamp, then pubkey for each entry
  • + *
  • V3 is: groups of: timestamp, number of entries (one per leading byte), then hash(pubkeys) for each entry
  • + *
+ *

+ * End + */ +public class GetOnlineAccountsV3Message extends Message { + + private static final Map> EMPTY_ONLINE_ACCOUNTS = Collections.emptyMap(); + private Map> hashesByTimestampThenByte; + + public GetOnlineAccountsV3Message(Map> hashesByTimestampThenByte) { + super(MessageType.GET_ONLINE_ACCOUNTS_V3); + + // If we don't have ANY online accounts then it's an easier construction... + if (hashesByTimestampThenByte.isEmpty()) { + this.dataBytes = EMPTY_DATA_BYTES; + return; + } + + // We should know exactly how many bytes to allocate now + int byteSize = hashesByTimestampThenByte.size() * (Transformer.TIMESTAMP_LENGTH + Transformer.BYTE_LENGTH); + + byteSize += hashesByTimestampThenByte.values() + .stream() + .mapToInt(map -> map.size() * Transformer.PUBLIC_KEY_LENGTH) + .sum(); + + ByteArrayOutputStream bytes = new ByteArrayOutputStream(byteSize); + + // Warning: no double-checking/fetching! We must be ConcurrentMap compatible. + // So no contains() then get() or multiple get()s on the same key/map. + try { + for (var outerMapEntry : hashesByTimestampThenByte.entrySet()) { + bytes.write(Longs.toByteArray(outerMapEntry.getKey())); + + var innerMap = outerMapEntry.getValue(); + + // Number of entries: 1 - 256, where 256 is represented by 0 + bytes.write(innerMap.size() & 0xFF); + + for (byte[] hashBytes : innerMap.values()) { + bytes.write(hashBytes); + } + } + } catch (IOException e) { + throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream"); + } + + this.dataBytes = bytes.toByteArray(); + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + private GetOnlineAccountsV3Message(int id, Map> hashesByTimestampThenByte) { + super(id, MessageType.GET_ONLINE_ACCOUNTS_V3); + + this.hashesByTimestampThenByte = hashesByTimestampThenByte; + } + + public Map> getHashesByTimestampThenByte() { + return this.hashesByTimestampThenByte; + } + + public static Message fromByteBuffer(int id, ByteBuffer bytes) { + // 'empty' case + if (!bytes.hasRemaining()) { + return new GetOnlineAccountsV3Message(id, EMPTY_ONLINE_ACCOUNTS); + } + + Map> hashesByTimestampThenByte = new HashMap<>(); + + while (bytes.hasRemaining()) { + long timestamp = bytes.getLong(); + + int hashCount = bytes.get(); + if (hashCount <= 0) + // 256 is represented by 0. + // Also converts negative signed value (e.g. -1) to proper positive unsigned value (255) + hashCount += 256; + + Map hashesByByte = new HashMap<>(); + + for (int i = 0; i < hashCount; ++i) { + byte[] publicKeyHash = new byte[Transformer.PUBLIC_KEY_LENGTH]; + bytes.get(publicKeyHash); + + hashesByByte.put(publicKeyHash[0], publicKeyHash); + } + + hashesByTimestampThenByte.put(timestamp, hashesByByte); + } + + return new GetOnlineAccountsV3Message(id, hashesByTimestampThenByte); + } + +} diff --git a/src/main/java/org/qortal/network/message/Message.java b/src/main/java/org/qortal/network/message/Message.java index f752b5b9..d8467d90 100644 --- a/src/main/java/org/qortal/network/message/Message.java +++ b/src/main/java/org/qortal/network/message/Message.java @@ -46,6 +46,7 @@ public abstract class Message { private static final int MAX_DATA_SIZE = 10 * 1024 * 1024; // 10MB protected static final byte[] EMPTY_DATA_BYTES = new byte[0]; + private static final ByteBuffer EMPTY_READ_ONLY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_DATA_BYTES).asReadOnlyBuffer(); protected int id; protected final MessageType type; @@ -126,7 +127,7 @@ public abstract class Message { if (dataSize > 0 && dataSize + CHECKSUM_LENGTH > readOnlyBuffer.remaining()) return null; - ByteBuffer dataSlice = null; + ByteBuffer dataSlice = EMPTY_READ_ONLY_BYTE_BUFFER; if (dataSize > 0) { byte[] expectedChecksum = new byte[CHECKSUM_LENGTH]; readOnlyBuffer.get(expectedChecksum); diff --git a/src/main/java/org/qortal/network/message/MessageType.java b/src/main/java/org/qortal/network/message/MessageType.java index c2ae7676..de711dc3 100644 --- a/src/main/java/org/qortal/network/message/MessageType.java +++ b/src/main/java/org/qortal/network/message/MessageType.java @@ -46,6 +46,8 @@ public enum MessageType { GET_ONLINE_ACCOUNTS(81, GetOnlineAccountsMessage::fromByteBuffer), ONLINE_ACCOUNTS_V2(82, OnlineAccountsV2Message::fromByteBuffer), GET_ONLINE_ACCOUNTS_V2(83, GetOnlineAccountsV2Message::fromByteBuffer), + // ONLINE_ACCOUNTS_V3(84, OnlineAccountsV3Message::fromByteBuffer), + GET_ONLINE_ACCOUNTS_V3(85, GetOnlineAccountsV3Message::fromByteBuffer), ARBITRARY_DATA(90, ArbitraryDataMessage::fromByteBuffer), GET_ARBITRARY_DATA(91, GetArbitraryDataMessage::fromByteBuffer), diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java index 61f4b76f..6ec30e20 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java @@ -23,7 +23,6 @@ import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.qortal.account.PrivateKeyAccount; import org.qortal.crypto.Crypto; import org.qortal.globalization.Translator; import org.qortal.gui.SysTray; @@ -1003,7 +1002,7 @@ public class HSQLDBRepository implements Repository { if (privateKey == null) return null; - return PrivateKeyAccount.toPublicKey(privateKey); + return Crypto.toPublicKey(privateKey); } public static String ed25519PublicKeyToAddress(byte[] publicKey) { diff --git a/src/main/resources/blockchain.json b/src/main/resources/blockchain.json index d2104e65..62a1147d 100644 --- a/src/main/resources/blockchain.json +++ b/src/main/resources/blockchain.json @@ -60,7 +60,8 @@ "calcChainWeightTimestamp": 1620579600000, "transactionV5Timestamp": 1642176000000, "transactionV6Timestamp": 9999999999999, - "disableReferenceTimestamp": 1655222400000 + "disableReferenceTimestamp": 1655222400000, + "aggregateSignatureTimestamp": 9999999999999 }, "genesisInfo": { "version": 4, diff --git a/src/test/java/org/qortal/test/CryptoTests.java b/src/test/java/org/qortal/test/CryptoTests.java index 6a0133d2..2cc73182 100644 --- a/src/test/java/org/qortal/test/CryptoTests.java +++ b/src/test/java/org/qortal/test/CryptoTests.java @@ -4,7 +4,7 @@ import org.junit.Test; import org.qortal.account.PrivateKeyAccount; import org.qortal.block.BlockChain; import org.qortal.crypto.AES; -import org.qortal.crypto.BouncyCastle25519; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.crypto.Crypto; import org.qortal.test.common.Common; import org.qortal.utils.Base58; @@ -123,14 +123,14 @@ public class CryptoTests extends Common { random.nextBytes(ed25519PrivateKey); PrivateKeyAccount account = new PrivateKeyAccount(null, ed25519PrivateKey); - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(account.getPrivateKey()); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(account.getPrivateKey()); X25519PrivateKeyParameters x25519PrivateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); // Derive X25519 public key from X25519 private key byte[] x25519PublicKeyFromPrivate = x25519PrivateKeyParams.generatePublicKey().getEncoded(); // Derive X25519 public key from Ed25519 public key - byte[] x25519PublicKeyFromEd25519 = BouncyCastle25519.toX25519PublicKey(account.getPublicKey()); + byte[] x25519PublicKeyFromEd25519 = Qortal25519Extras.toX25519PublicKey(account.getPublicKey()); assertEquals(String.format("Public keys do not match, from private key %s", Base58.encode(ed25519PrivateKey)), Base58.encode(x25519PublicKeyFromPrivate), Base58.encode(x25519PublicKeyFromEd25519)); } @@ -162,10 +162,10 @@ public class CryptoTests extends Common { } private static byte[] calcBCSharedSecret(byte[] ed25519PrivateKey, byte[] ed25519PublicKey) { - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(ed25519PrivateKey); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(ed25519PrivateKey); X25519PrivateKeyParameters privateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); - byte[] x25519PublicKey = BouncyCastle25519.toX25519PublicKey(ed25519PublicKey); + byte[] x25519PublicKey = Qortal25519Extras.toX25519PublicKey(ed25519PublicKey); X25519PublicKeyParameters publicKeyParams = new X25519PublicKeyParameters(x25519PublicKey, 0); byte[] sharedSecret = new byte[32]; @@ -186,10 +186,10 @@ public class CryptoTests extends Common { final String expectedTheirX25519PublicKey = "ANjnZLRSzW9B1aVamiYGKP3XtBooU9tGGDjUiibUfzp2"; final String expectedSharedSecret = "DTMZYG96x8XZuGzDvHFByVLsXedimqtjiXHhXPVe58Ap"; - byte[] ourX25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(ourPrivateKey); + byte[] ourX25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(ourPrivateKey); assertEquals("X25519 private key incorrect", expectedOurX25519PrivateKey, Base58.encode(ourX25519PrivateKey)); - byte[] theirX25519PublicKey = BouncyCastle25519.toX25519PublicKey(theirPublicKey); + byte[] theirX25519PublicKey = Qortal25519Extras.toX25519PublicKey(theirPublicKey); assertEquals("X25519 public key incorrect", expectedTheirX25519PublicKey, Base58.encode(theirX25519PublicKey)); byte[] sharedSecret = calcBCSharedSecret(ourPrivateKey, theirPublicKey); diff --git a/src/test/java/org/qortal/test/SchnorrTests.java b/src/test/java/org/qortal/test/SchnorrTests.java new file mode 100644 index 00000000..03c92d2f --- /dev/null +++ b/src/test/java/org/qortal/test/SchnorrTests.java @@ -0,0 +1,190 @@ +package org.qortal.test; + +import com.google.common.hash.HashCode; +import com.google.common.primitives.Bytes; +import com.google.common.primitives.Longs; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; +import org.junit.Test; +import org.qortal.crypto.Qortal25519Extras; +import org.qortal.data.network.OnlineAccountData; +import org.qortal.transform.Transformer; + +import java.math.BigInteger; +import java.security.SecureRandom; +import java.security.Security; +import java.util.*; +import java.util.stream.Collectors; + +import static org.junit.Assert.*; + +public class SchnorrTests extends Qortal25519Extras { + + static { + // This must go before any calls to LogManager/Logger + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); + + Security.insertProviderAt(new BouncyCastleProvider(), 0); + Security.insertProviderAt(new BouncyCastleJsseProvider(), 1); + } + + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); + + @Test + public void testConversion() { + // Scalar form + byte[] scalarA = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + System.out.printf("a: %s%n", HashCode.fromBytes(scalarA)); + + byte[] pointA = HashCode.fromString("5866666666666666666666666666666666666666666666666666666666666666".toLowerCase()).asBytes(); + + BigInteger expectedY = new BigInteger("46316835694926478169428394003475163141307993866256225615783033603165251855960"); + + PointAccum pointAccum = Qortal25519Extras.newPointAccum(); + scalarMultBase(scalarA, pointAccum); + + byte[] encoded = new byte[POINT_BYTES]; + if (0 == encodePoint(pointAccum, encoded, 0)) + fail("Point encoding failed"); + + System.out.printf("aG: %s%n", HashCode.fromBytes(encoded)); + assertArrayEquals(pointA, encoded); + + byte[] yBytes = new byte[POINT_BYTES]; + System.arraycopy(encoded,0, yBytes, 0, encoded.length); + Bytes.reverse(yBytes); + + System.out.printf("yBytes: %s%n", HashCode.fromBytes(yBytes)); + BigInteger yBI = new BigInteger(yBytes); + + System.out.printf("aG y: %s%n", yBI); + assertEquals(expectedY, yBI); + } + + @Test + public void testAddition() { + /* + * 1G: b'5866666666666666666666666666666666666666666666666666666666666666' + * 2G: b'c9a3f86aae465f0e56513864510f3997561fa2c9e85ea21dc2292309f3cd6022' + * 3G: b'd4b4f5784868c3020403246717ec169ff79e26608ea126a1ab69ee77d1b16712' + */ + + // Scalar form + byte[] s1 = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] s2 = HashCode.fromString("0200000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + + // Point form + byte[] g1 = HashCode.fromString("5866666666666666666666666666666666666666666666666666666666666666".toLowerCase()).asBytes(); + byte[] g2 = HashCode.fromString("c9a3f86aae465f0e56513864510f3997561fa2c9e85ea21dc2292309f3cd6022".toLowerCase()).asBytes(); + byte[] g3 = HashCode.fromString("d4b4f5784868c3020403246717ec169ff79e26608ea126a1ab69ee77d1b16712".toLowerCase()).asBytes(); + + PointAccum p1 = Qortal25519Extras.newPointAccum(); + scalarMultBase(s1, p1); + + PointAccum p2 = Qortal25519Extras.newPointAccum(); + scalarMultBase(s2, p2); + + pointAdd(pointCopy(p1), p2); + + byte[] encoded = new byte[POINT_BYTES]; + if (0 == encodePoint(p2, encoded, 0)) + fail("Point encoding failed"); + + System.out.printf("sum: %s%n", HashCode.fromBytes(encoded)); + assertArrayEquals(g3, encoded); + } + + @Test + public void testSimpleSign() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + + byte[] signature = signForAggregation(privateKey, message); + System.out.printf("signature: %s%n", HashCode.fromBytes(signature)); + } + + @Test + public void testSimpleVerify() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + byte[] signature = HashCode.fromString("13e58e88f3df9e06637d2d5bbb814c028e3ba135494530b9d3b120bdb31168d62c70a37ae9cfba816fe6038ee1ce2fb521b95c4a91c7ff0bb1dd2e67733f2b0d".toLowerCase()).asBytes(); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + assertTrue(verifyAggregated(publicKey, signature, message)); + } + + @Test + public void testSimpleSignAndVerify() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + + byte[] signature = signForAggregation(privateKey, message); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + assertTrue(verifyAggregated(publicKey, signature, message)); + } + + @Test + public void testSimpleAggregate() { + List onlineAccounts = generateOnlineAccounts(1); + + byte[] aggregatePublicKey = aggregatePublicKeys(onlineAccounts.stream().map(OnlineAccountData::getPublicKey).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate public key: %s%n", HashCode.fromBytes(aggregatePublicKey)); + + byte[] aggregateSignature = aggregateSignatures(onlineAccounts.stream().map(OnlineAccountData::getSignature).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate signature: %s%n", HashCode.fromBytes(aggregateSignature)); + + OnlineAccountData onlineAccount = onlineAccounts.get(0); + + assertArrayEquals(String.format("expected: %s, actual: %s", HashCode.fromBytes(onlineAccount.getPublicKey()), HashCode.fromBytes(aggregatePublicKey)), onlineAccount.getPublicKey(), aggregatePublicKey); + assertArrayEquals(String.format("expected: %s, actual: %s", HashCode.fromBytes(onlineAccount.getSignature()), HashCode.fromBytes(aggregateSignature)), onlineAccount.getSignature(), aggregateSignature); + + // This is the crucial test: + long timestamp = onlineAccount.getTimestamp(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + assertTrue(verifyAggregated(aggregatePublicKey, aggregateSignature, timestampBytes)); + } + + @Test + public void testMultipleAggregate() { + List onlineAccounts = generateOnlineAccounts(5000); + + byte[] aggregatePublicKey = aggregatePublicKeys(onlineAccounts.stream().map(OnlineAccountData::getPublicKey).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate public key: %s%n", HashCode.fromBytes(aggregatePublicKey)); + + byte[] aggregateSignature = aggregateSignatures(onlineAccounts.stream().map(OnlineAccountData::getSignature).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate signature: %s%n", HashCode.fromBytes(aggregateSignature)); + + OnlineAccountData onlineAccount = onlineAccounts.get(0); + + // This is the crucial test: + long timestamp = onlineAccount.getTimestamp(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + assertTrue(verifyAggregated(aggregatePublicKey, aggregateSignature, timestampBytes)); + } + + private List generateOnlineAccounts(int numAccounts) { + List onlineAccounts = new ArrayList<>(); + + long timestamp = System.currentTimeMillis(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + + for (int a = 0; a < numAccounts; ++a) { + byte[] privateKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + SECURE_RANDOM.nextBytes(privateKey); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + byte[] signature = signForAggregation(privateKey, timestampBytes); + + onlineAccounts.add(new OnlineAccountData(timestamp, signature, publicKey)); + } + + return onlineAccounts; + } +} diff --git a/src/test/java/org/qortal/test/apps/RewardShareKeys.java b/src/test/java/org/qortal/test/apps/RewardShareKeys.java index e0bfc1cf..5ba1aab4 100644 --- a/src/test/java/org/qortal/test/apps/RewardShareKeys.java +++ b/src/test/java/org/qortal/test/apps/RewardShareKeys.java @@ -6,6 +6,7 @@ import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; import org.qortal.account.PrivateKeyAccount; import org.qortal.account.PublicKeyAccount; +import org.qortal.crypto.Crypto; import org.qortal.utils.Base58; public class RewardShareKeys { @@ -28,7 +29,7 @@ public class RewardShareKeys { PublicKeyAccount recipientAccount = new PublicKeyAccount(null, args.length > 1 ? Base58.decode(args[1]) : minterAccount.getPublicKey()); byte[] rewardSharePrivateKey = minterAccount.getRewardSharePrivateKey(recipientAccount.getPublicKey()); - byte[] rewardSharePublicKey = PrivateKeyAccount.toPublicKey(rewardSharePrivateKey); + byte[] rewardSharePublicKey = Crypto.toPublicKey(rewardSharePrivateKey); System.out.println(String.format("Minter account: %s", minterAccount.getAddress())); System.out.println(String.format("Minter's public key: %s", Base58.encode(minterAccount.getPublicKey()))); diff --git a/src/test/java/org/qortal/test/common/AccountUtils.java b/src/test/java/org/qortal/test/common/AccountUtils.java index 0e7ef020..bda1ae61 100644 --- a/src/test/java/org/qortal/test/common/AccountUtils.java +++ b/src/test/java/org/qortal/test/common/AccountUtils.java @@ -6,6 +6,7 @@ import java.util.HashMap; import java.util.Map; import org.qortal.account.PrivateKeyAccount; +import org.qortal.crypto.Crypto; import org.qortal.data.transaction.BaseTransactionData; import org.qortal.data.transaction.PaymentTransactionData; import org.qortal.data.transaction.RewardShareTransactionData; @@ -45,7 +46,7 @@ public class AccountUtils { long timestamp = repository.getTransactionRepository().fromSignature(reference).getTimestamp() + 1; byte[] rewardSharePrivateKey = mintingAccount.getRewardSharePrivateKey(recipientAccount.getPublicKey()); - byte[] rewardSharePublicKey = PrivateKeyAccount.toPublicKey(rewardSharePrivateKey); + byte[] rewardSharePublicKey = Crypto.toPublicKey(rewardSharePrivateKey); BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, mintingAccount.getPublicKey(), fee, null); TransactionData transactionData = new RewardShareTransactionData(baseTransactionData, recipientAccount.getAddress(), rewardSharePublicKey, sharePercent); diff --git a/src/test/java/org/qortal/test/minting/BlockTimestampTests.java b/src/test/java/org/qortal/test/minting/BlockTimestampTests.java new file mode 100644 index 00000000..0f91408f --- /dev/null +++ b/src/test/java/org/qortal/test/minting/BlockTimestampTests.java @@ -0,0 +1,63 @@ +package org.qortal.test.minting; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.block.Block; +import org.qortal.data.block.BlockData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.BlockUtils; +import org.qortal.test.common.Common; +import org.qortal.transform.Transformer; +import org.qortal.utils.NTP; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +public class BlockTimestampTests extends Common { + + private static class BlockTimestampDataPoint { + public byte[] minterPublicKey; + public int minterAccountLevel; + public long blockTimestamp; + } + + private static final Random RANDOM = new Random(); + + @Before + public void beforeTest() throws DataException { + Common.useSettings("test-settings-v2-block-timestamps.json"); + NTP.setFixedOffset(0L); + } + + @Test + public void testTimestamps() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + Block parentBlock = BlockUtils.mintBlock(repository); + BlockData parentBlockData = parentBlock.getBlockData(); + + // Generate lots of test minters + List dataPoints = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + BlockTimestampDataPoint dataPoint = new BlockTimestampDataPoint(); + + dataPoint.minterPublicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + RANDOM.nextBytes(dataPoint.minterPublicKey); + + dataPoint.minterAccountLevel = RANDOM.nextInt(5) + 5; + + dataPoint.blockTimestamp = Block.calcTimestamp(parentBlockData, dataPoint.minterPublicKey, dataPoint.minterAccountLevel); + + System.out.printf("[%d] level %d, blockTimestamp %d - parentTimestamp %d = %d%n", + i, + dataPoint.minterAccountLevel, + dataPoint.blockTimestamp, + parentBlockData.getTimestamp(), + dataPoint.blockTimestamp - parentBlockData.getTimestamp() + ); + } + } + } +} diff --git a/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java b/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java new file mode 100644 index 00000000..6136c1e1 --- /dev/null +++ b/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java @@ -0,0 +1,210 @@ +package org.qortal.test.network; + +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; +import org.junit.Ignore; +import org.junit.Test; +import org.qortal.controller.OnlineAccountsManager; +import org.qortal.data.network.OnlineAccountData; +import org.qortal.network.message.*; +import org.qortal.transform.Transformer; + +import java.nio.ByteBuffer; +import java.security.Security; +import java.util.*; + +import static org.junit.Assert.*; + +public class OnlineAccountsV3Tests { + + private static final Random RANDOM = new Random(); + static { + // This must go before any calls to LogManager/Logger + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); + + Security.insertProviderAt(new BouncyCastleProvider(), 0); + Security.insertProviderAt(new BouncyCastleJsseProvider(), 1); + } + + @Ignore("For informational use") + @Test + public void compareV2ToV3() throws MessageException { + List onlineAccounts = generateOnlineAccounts(false); + + // How many of each timestamp and leading byte (of public key) + Map> hashesByTimestampThenByte = convertToHashMaps(onlineAccounts); + + byte[] v3DataBytes = new GetOnlineAccountsV3Message(hashesByTimestampThenByte).toBytes(); + int v3ByteSize = v3DataBytes.length; + + byte[] v2DataBytes = new GetOnlineAccountsV2Message(onlineAccounts).toBytes(); + int v2ByteSize = v2DataBytes.length; + + int numTimestamps = hashesByTimestampThenByte.size(); + System.out.printf("For %d accounts split across %d timestamp%s: V2 size %d vs V3 size %d%n", + onlineAccounts.size(), + numTimestamps, + numTimestamps != 1 ? "s" : "", + v2ByteSize, + v3ByteSize + ); + + for (var outerMapEntry : hashesByTimestampThenByte.entrySet()) { + long timestamp = outerMapEntry.getKey(); + + var innerMap = outerMapEntry.getValue(); + + System.out.printf("For timestamp %d: %d / 256 slots used.%n", + timestamp, + innerMap.size() + ); + } + } + + private Map> convertToHashMaps(List onlineAccounts) { + // How many of each timestamp and leading byte (of public key) + Map> hashesByTimestampThenByte = new HashMap<>(); + + for (OnlineAccountData onlineAccountData : onlineAccounts) { + Long timestamp = onlineAccountData.getTimestamp(); + Byte leadingByte = onlineAccountData.getPublicKey()[0]; + + hashesByTimestampThenByte + .computeIfAbsent(timestamp, k -> new HashMap<>()) + .compute(leadingByte, (k, v) -> OnlineAccountsManager.xorByteArrayInPlace(v, onlineAccountData.getPublicKey())); + } + + return hashesByTimestampThenByte; + } + + @Test + public void testOnGetOnlineAccountsV3() { + List ourOnlineAccounts = generateOnlineAccounts(false); + List peersOnlineAccounts = generateOnlineAccounts(false); + + Map> ourConvertedHashes = convertToHashMaps(ourOnlineAccounts); + Map> peersConvertedHashes = convertToHashMaps(peersOnlineAccounts); + + List mockReply = new ArrayList<>(); + + // Warning: no double-checking/fetching - we must be ConcurrentMap compatible! + // So no contains()-then-get() or multiple get()s on the same key/map. + for (var ourOuterMapEntry : ourConvertedHashes.entrySet()) { + Long timestamp = ourOuterMapEntry.getKey(); + + var ourInnerMap = ourOuterMapEntry.getValue(); + var peersInnerMap = peersConvertedHashes.get(timestamp); + + if (peersInnerMap == null) { + // Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours + for (Byte leadingByte : ourInnerMap.keySet()) + mockReply.add(timestamp + ":" + leadingByte); + } else { + // We have entries for this timestamp so compare against peer's entries + for (var ourInnerMapEntry : ourInnerMap.entrySet()) { + Byte leadingByte = ourInnerMapEntry.getKey(); + byte[] peersHash = peersInnerMap.get(leadingByte); + + if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) { + // We don't match peer, or peer doesn't have - send all online accounts for this timestamp and leading byte + mockReply.add(timestamp + ":" + leadingByte); + } + } + } + } + + int numOurTimestamps = ourConvertedHashes.size(); + System.out.printf("We have %d accounts split across %d timestamp%s%n", + ourOnlineAccounts.size(), + numOurTimestamps, + numOurTimestamps != 1 ? "s" : "" + ); + + int numPeerTimestamps = peersConvertedHashes.size(); + System.out.printf("Peer sent %d accounts split across %d timestamp%s%n", + peersOnlineAccounts.size(), + numPeerTimestamps, + numPeerTimestamps != 1 ? "s" : "" + ); + + System.out.printf("We need to send: %d%n%s%n", mockReply.size(), String.join(", ", mockReply)); + } + + @Test + public void testSerialization() throws MessageException { + List onlineAccountsOut = generateOnlineAccounts(true); + Map> hashesByTimestampThenByteOut = convertToHashMaps(onlineAccountsOut); + + validateSerialization(hashesByTimestampThenByteOut); + } + + @Test + public void testEmptySerialization() throws MessageException { + Map> hashesByTimestampThenByteOut = Collections.emptyMap(); + validateSerialization(hashesByTimestampThenByteOut); + + hashesByTimestampThenByteOut = new HashMap<>(); + validateSerialization(hashesByTimestampThenByteOut); + } + + private void validateSerialization(Map> hashesByTimestampThenByteOut) throws MessageException { + Message messageOut = new GetOnlineAccountsV3Message(hashesByTimestampThenByteOut); + byte[] messageBytes = messageOut.toBytes(); + + ByteBuffer byteBuffer = ByteBuffer.wrap(messageBytes).asReadOnlyBuffer(); + + GetOnlineAccountsV3Message messageIn = (GetOnlineAccountsV3Message) Message.fromByteBuffer(byteBuffer); + + Map> hashesByTimestampThenByteIn = messageIn.getHashesByTimestampThenByte(); + + Set timestampsIn = hashesByTimestampThenByteIn.keySet(); + Set timestampsOut = hashesByTimestampThenByteOut.keySet(); + assertEquals("timestamp count mismatch", timestampsOut.size(), timestampsIn.size()); + assertTrue("timestamps mismatch", timestampsIn.containsAll(timestampsOut)); + + for (Long timestamp : timestampsIn) { + Map hashesByByteIn = hashesByTimestampThenByteIn.get(timestamp); + Map hashesByByteOut = hashesByTimestampThenByteOut.get(timestamp); + assertNotNull("timestamp entry missing", hashesByByteOut); + + Set leadingBytesIn = hashesByByteIn.keySet(); + Set leadingBytesOut = hashesByByteOut.keySet(); + assertEquals("leading byte entry count mismatch", leadingBytesOut.size(), leadingBytesIn.size()); + assertTrue("leading byte entry mismatch", leadingBytesIn.containsAll(leadingBytesOut)); + + for (Byte leadingByte : leadingBytesOut) { + byte[] bytesIn = hashesByByteIn.get(leadingByte); + byte[] bytesOut = hashesByByteOut.get(leadingByte); + + assertTrue("pubkey hash mismatch", Arrays.equals(bytesOut, bytesIn)); + } + } + } + + private List generateOnlineAccounts(boolean withSignatures) { + List onlineAccounts = new ArrayList<>(); + + int numTimestamps = RANDOM.nextInt(2) + 1; // 1 or 2 + + for (int t = 0; t < numTimestamps; ++t) { + long timestamp = 1 << 31 + (t + 1) << 12; + int numAccounts = RANDOM.nextInt(3000); + + for (int a = 0; a < numAccounts; ++a) { + byte[] sig = null; + if (withSignatures) { + sig = new byte[Transformer.SIGNATURE_LENGTH]; + RANDOM.nextBytes(sig); + } + + byte[] pubkey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + RANDOM.nextBytes(pubkey); + + onlineAccounts.add(new OnlineAccountData(timestamp, sig, pubkey)); + } + } + + return onlineAccounts; + } + +} diff --git a/src/test/resources/test-chain-v2-block-timestamps.json b/src/test/resources/test-chain-v2-block-timestamps.json new file mode 100644 index 00000000..072283f0 --- /dev/null +++ b/src/test/resources/test-chain-v2-block-timestamps.json @@ -0,0 +1,85 @@ +{ + "isTestChain": true, + "blockTimestampMargin": 500, + "transactionExpiryPeriod": 86400000, + "maxBlockSize": 2097152, + "maxBytesPerUnitFee": 1024, + "unitFee": "0.1", + "nameRegistrationUnitFees": [ + { "timestamp": 1645372800000, "fee": "5" } + ], + "requireGroupForApproval": false, + "minAccountLevelToRewardShare": 5, + "maxRewardSharesPerMintingAccount": 20, + "founderEffectiveMintingLevel": 10, + "onlineAccountSignaturesMinLifetime": 3600000, + "onlineAccountSignaturesMaxLifetime": 86400000, + "rewardsByHeight": [ + { "height": 1, "reward": 100 }, + { "height": 11, "reward": 10 }, + { "height": 21, "reward": 1 } + ], + "sharesByLevel": [ + { "levels": [ 1, 2 ], "share": 0.05 }, + { "levels": [ 3, 4 ], "share": 0.10 }, + { "levels": [ 5, 6 ], "share": 0.15 }, + { "levels": [ 7, 8 ], "share": 0.20 }, + { "levels": [ 9, 10 ], "share": 0.25 } + ], + "qoraHoldersShare": 0.20, + "qoraPerQortReward": 250, + "blocksNeededByLevel": [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 ], + "blockTimingsByHeight": [ + { "height": 1, "target": 60000, "deviation": 30000, "power": 0.2 }, + { "height": 2, "target": 70000, "deviation": 10000, "power": 0.8 } + ], + "ciyamAtSettings": { + "feePerStep": "0.0001", + "maxStepsPerRound": 500, + "stepsPerFunctionCall": 10, + "minutesPerBlock": 1 + }, + "featureTriggers": { + "messageHeight": 0, + "atHeight": 0, + "assetsTimestamp": 0, + "votingTimestamp": 0, + "arbitraryTimestamp": 0, + "powfixTimestamp": 0, + "qortalTimestamp": 0, + "newAssetPricingTimestamp": 0, + "groupApprovalTimestamp": 0, + "atFindNextTransactionFix": 0, + "newBlockSigHeight": 999999, + "shareBinFix": 999999, + "calcChainWeightTimestamp": 0, + "transactionV5Timestamp": 0, + "transactionV6Timestamp": 9999999999999, + "aggregateSignatureTimestamp": 9999999999999 + }, + "genesisInfo": { + "version": 4, + "timestamp": 0, + "transactions": [ + { "type": "ISSUE_ASSET", "assetName": "QORT", "description": "QORT native coin", "data": "", "quantity": 0, "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "assetName": "Legacy-QORA", "description": "Representative legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true }, + { "type": "ISSUE_ASSET", "assetName": "QORT-from-QORA", "description": "QORT gained from holding legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true }, + + { "type": "GENESIS", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "amount": "1000000000" }, + { "type": "GENESIS", "recipient": "QixPbJUwsaHsVEofJdozU9zgVqkK6aYhrK", "amount": "1000000" }, + { "type": "GENESIS", "recipient": "QaUpHNhT3Ygx6avRiKobuLdusppR5biXjL", "amount": "1000000" }, + { "type": "GENESIS", "recipient": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "amount": "1000000" }, + + { "type": "CREATE_GROUP", "creatorPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "groupName": "dev-group", "description": "developer group", "isOpen": false, "approvalThreshold": "PCT100", "minimumBlockDelay": 0, "maximumBlockDelay": 1440 }, + + { "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "TEST", "description": "test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "issuerPublicKey": "C6wuddsBV3HzRrXUtezE7P5MoRXp5m3mEDokRDGZB6ry", "assetName": "OTHER", "description": "other test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "GOLD", "description": "gold test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + + { "type": "ACCOUNT_FLAGS", "target": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "andMask": -1, "orMask": 1, "xorMask": 0 }, + { "type": "REWARD_SHARE", "minterPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "rewardSharePublicKey": "7PpfnvLSG7y4HPh8hE7KoqAjLCkv7Ui6xw4mKAkbZtox", "sharePercent": "100" }, + + { "type": "ACCOUNT_LEVEL", "target": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "level": 5 } + ] + } +} diff --git a/src/test/resources/test-chain-v2.json b/src/test/resources/test-chain-v2.json index f897e38d..14cd8b34 100644 --- a/src/test/resources/test-chain-v2.json +++ b/src/test/resources/test-chain-v2.json @@ -55,6 +55,7 @@ "transactionV5Timestamp": 0, "transactionV6Timestamp": 0, "disableReferenceTimestamp": 9999999999999 + "aggregateSignatureTimestamp": 2652560000000 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-settings-v2-block-timestamps.json b/src/test/resources/test-settings-v2-block-timestamps.json new file mode 100644 index 00000000..dbbbebbe --- /dev/null +++ b/src/test/resources/test-settings-v2-block-timestamps.json @@ -0,0 +1,19 @@ +{ + "repositoryPath": "testdb", + "bitcoinNet": "TEST3", + "litecoinNet": "TEST3", + "restrictedApi": false, + "blockchainConfig": "src/test/resources/test-chain-v2-block-timestamps.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, + "wipeUnconfirmedOnStart": false, + "testNtpOffset": 0, + "minPeers": 0, + "pruneBlockLimit": 100, + "bootstrapFilenamePrefix": "test-", + "dataPath": "data-test", + "tempDataPath": "data-test/_temp", + "listsPath": "lists-test", + "storagePolicy": "FOLLOWED_OR_VIEWED", + "maxStorageCapacity": 104857600 +}