From 6950c6bf6985984bbe2aafa0fbfcbcde9b29d8e3 Mon Sep 17 00:00:00 2001 From: catbref Date: Sun, 17 Apr 2022 19:22:29 +0100 Subject: [PATCH 01/18] Initial work on reducing network load for transferring blocks. Reduced AT state info from per-AT address + state hash + fees to AT count + total AT fees + hash of all AT states. Modified Block and Controller to support above. Controller needs more work regarding CachedBlockMessages. Note that blocks fetched from archive are in old V1 format. Changed Triple, List> to BlockTransformation to support both V1 and V2 forms. Set min peer version to 3.3.203 in BlockV2Message class. --- src/main/java/org/qortal/block/Block.java | 67 +++++++- .../org/qortal/controller/Controller.java | 12 ++ .../org/qortal/controller/Synchronizer.java | 26 +-- .../qortal/network/message/BlockMessage.java | 7 +- .../network/message/BlockV2Message.java | 87 ++++++++++ .../qortal/network/message/MessageType.java | 1 + .../qortal/repository/BlockArchiveReader.java | 19 ++- .../hsqldb/HSQLDBBlockArchiveRepository.java | 29 ++-- .../transform/block/BlockTransformation.java | 44 +++++ .../transform/block/BlockTransformer.java | 150 ++++++++++++------ .../org/qortal/utils/BlockArchiveUtils.java | 21 ++- .../org/qortal/test/BlockArchiveTests.java | 19 +-- src/test/java/org/qortal/test/BlockTests.java | 5 +- 13 files changed, 371 insertions(+), 116 deletions(-) create mode 100644 src/main/java/org/qortal/network/message/BlockV2Message.java create mode 100644 src/main/java/org/qortal/transform/block/BlockTransformation.java diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index ea5a6b49..5fe005d6 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -3,9 +3,12 @@ package org.qortal.block; import static java.util.Arrays.stream; import static java.util.stream.Collectors.toMap; +import java.io.ByteArrayOutputStream; +import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; +import java.nio.charset.StandardCharsets; import java.text.DecimalFormat; import java.text.NumberFormat; import java.util.*; @@ -118,6 +121,8 @@ public class Block { /** Remote/imported/loaded AT states */ protected List atStates; + /** Remote hash of AT states - in lieu of full AT state data in {@code atStates} */ + protected byte[] atStatesHash; /** Locally-generated AT states */ protected List ourAtStates; /** Locally-generated AT fees */ @@ -255,7 +260,7 @@ public class Block { * Constructs new Block using passed transaction and AT states. *

* This constructor typically used when receiving a serialized block over the network. - * + * * @param repository * @param blockData * @param transactions @@ -281,6 +286,35 @@ public class Block { this.blockData.setTotalFees(totalFees); } + /** + * Constructs new Block using passed transaction and minimal AT state info. + *

+ * This constructor typically used when receiving a serialized block over the network. + * + * @param repository + * @param blockData + * @param transactions + * @param atStatesHash + */ + public Block(Repository repository, BlockData blockData, List transactions, byte[] atStatesHash) { + this(repository, blockData); + + this.transactions = new ArrayList<>(); + + long totalFees = 0; + + // We have to sum fees too + for (TransactionData transactionData : transactions) { + this.transactions.add(Transaction.fromData(repository, transactionData)); + totalFees += transactionData.getFee(); + } + + this.atStatesHash = atStatesHash; + totalFees += this.blockData.getATFees(); + + this.blockData.setTotalFees(totalFees); + } + /** * Constructs new Block with empty transaction list, using passed minter account. * @@ -1194,7 +1228,7 @@ public class Block { */ private ValidationResult areAtsValid() throws DataException { // Locally generated AT states should be valid so no need to re-execute them - if (this.ourAtStates == this.getATStates()) // Note object reference compare + if (this.ourAtStates != null && this.ourAtStates == this.atStates) // Note object reference compare return ValidationResult.OK; // Generate local AT states for comparison @@ -1208,8 +1242,33 @@ public class Block { if (this.ourAtFees != this.blockData.getATFees()) return ValidationResult.AT_STATES_MISMATCH; - // Note: this.atStates fully loaded thanks to this.getATStates() call above - for (int s = 0; s < this.atStates.size(); ++s) { + // If we have a single AT states hash then compare that in preference + if (this.atStatesHash != null) { + int atBytesLength = blockData.getATCount() * BlockTransformer.AT_ENTRY_LENGTH; + ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength); + + try { + for (ATStateData atStateData : this.ourAtStates) { + atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8)); + atHashBytes.write(atStateData.getStateHash()); + atHashBytes.write(Longs.toByteArray(atStateData.getFees())); + } + } catch (IOException e) { + throw new DataException("Couldn't validate AT states hash due to serialization issue?", e); + } + + byte[] ourAtStatesHash = Crypto.digest(atHashBytes.toByteArray()); + if (!Arrays.equals(ourAtStatesHash, this.atStatesHash)) + return ValidationResult.AT_STATES_MISMATCH; + + // Use our AT state data from now on + this.atStates = this.ourAtStates; + return ValidationResult.OK; + } + + // Note: this.atStates fully loaded thanks to this.getATStates() call: + this.getATStates(); + for (int s = 0; s < this.ourAtStates.size(); ++s) { ATStateData ourAtState = this.ourAtStates.get(s); ATStateData theirAtState = this.atStates.get(s); diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index a5ada0c2..0a011db5 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -1362,6 +1362,18 @@ public class Controller extends Thread { Block block = new Block(repository, blockData); + // V2 support + if (peer.getPeersVersion() >= BlockV2Message.MIN_PEER_VERSION) { + Message blockMessage = new BlockV2Message(block); + blockMessage.setId(message.getId()); + if (!peer.sendMessage(blockMessage)) { + peer.disconnect("failed to send block"); + // Don't fall-through to caching because failure to send might be from failure to build message + return; + } + return; + } + CachedBlockMessage blockMessage = new CachedBlockMessage(block); blockMessage.setId(message.getId()); diff --git a/src/main/java/org/qortal/controller/Synchronizer.java b/src/main/java/org/qortal/controller/Synchronizer.java index 8f3a34bb..4c1985a1 100644 --- a/src/main/java/org/qortal/controller/Synchronizer.java +++ b/src/main/java/org/qortal/controller/Synchronizer.java @@ -26,14 +26,7 @@ import org.qortal.event.Event; import org.qortal.event.EventBus; import org.qortal.network.Network; import org.qortal.network.Peer; -import org.qortal.network.message.BlockMessage; -import org.qortal.network.message.BlockSummariesMessage; -import org.qortal.network.message.GetBlockMessage; -import org.qortal.network.message.GetBlockSummariesMessage; -import org.qortal.network.message.GetSignaturesV2Message; -import org.qortal.network.message.Message; -import org.qortal.network.message.SignaturesMessage; -import org.qortal.network.message.MessageType; +import org.qortal.network.message.*; import org.qortal.repository.DataException; import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; @@ -1579,12 +1572,23 @@ public class Synchronizer extends Thread { Message getBlockMessage = new GetBlockMessage(signature); Message message = peer.getResponse(getBlockMessage); - if (message == null || message.getType() != MessageType.BLOCK) + if (message == null) return null; - BlockMessage blockMessage = (BlockMessage) message; + switch (message.getType()) { + case BLOCK: { + BlockMessage blockMessage = (BlockMessage) message; + return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates()); + } - return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates()); + case BLOCK_V2: { + BlockV2Message blockMessage = (BlockV2Message) message; + return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStatesHash()); + } + + default: + return null; + } } public void populateBlockSummariesMinterLevels(Repository repository, List blockSummaries) throws DataException { diff --git a/src/main/java/org/qortal/network/message/BlockMessage.java b/src/main/java/org/qortal/network/message/BlockMessage.java index 2dd4db87..0a8a23de 100644 --- a/src/main/java/org/qortal/network/message/BlockMessage.java +++ b/src/main/java/org/qortal/network/message/BlockMessage.java @@ -9,6 +9,7 @@ import org.qortal.data.at.ATStateData; import org.qortal.data.block.BlockData; import org.qortal.data.transaction.TransactionData; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.transform.block.BlockTransformer; import org.qortal.utils.Triple; @@ -46,12 +47,12 @@ public class BlockMessage extends Message { try { int height = byteBuffer.getInt(); - Triple, List> blockInfo = BlockTransformer.fromByteBuffer(byteBuffer); + BlockTransformation blockTransformation = BlockTransformer.fromByteBuffer(byteBuffer); - BlockData blockData = blockInfo.getA(); + BlockData blockData = blockTransformation.getBlockData(); blockData.setHeight(height); - return new BlockMessage(id, blockData, blockInfo.getB(), blockInfo.getC()); + return new BlockMessage(id, blockData, blockTransformation.getTransactions(), blockTransformation.getAtStates()); } catch (TransformationException e) { LOGGER.info(String.format("Received garbled BLOCK message: %s", e.getMessage())); throw new MessageException(e.getMessage(), e); diff --git a/src/main/java/org/qortal/network/message/BlockV2Message.java b/src/main/java/org/qortal/network/message/BlockV2Message.java new file mode 100644 index 00000000..815892e2 --- /dev/null +++ b/src/main/java/org/qortal/network/message/BlockV2Message.java @@ -0,0 +1,87 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Ints; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.qortal.block.Block; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; +import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; +import org.qortal.transform.block.BlockTransformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; + +public class BlockV2Message extends Message { + + private static final Logger LOGGER = LogManager.getLogger(BlockV2Message.class); + public static final long MIN_PEER_VERSION = 0x3000300cbL; // 3.3.203 + + private BlockData blockData; + private List transactions; + private byte[] atStatesHash; + + public BlockV2Message(Block block) throws TransformationException { + super(MessageType.BLOCK_V2); + + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + + try { + bytes.write(Ints.toByteArray(block.getBlockData().getHeight())); + + bytes.write(BlockTransformer.toBytesV2(block)); + } catch (IOException e) { + throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream"); + } + + this.dataBytes = bytes.toByteArray(); + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + public BlockV2Message(byte[] cachedBytes) { + super(MessageType.BLOCK_V2); + + this.dataBytes = cachedBytes; + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + private BlockV2Message(int id, BlockData blockData, List transactions, byte[] atStatesHash) { + super(id, MessageType.BLOCK_V2); + + this.blockData = blockData; + this.transactions = transactions; + this.atStatesHash = atStatesHash; + } + + public BlockData getBlockData() { + return this.blockData; + } + + public List getTransactions() { + return this.transactions; + } + + public byte[] getAtStatesHash() { + return this.atStatesHash; + } + + public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) throws MessageException { + try { + int height = byteBuffer.getInt(); + + BlockTransformation blockTransformation = BlockTransformer.fromByteBufferV2(byteBuffer); + + BlockData blockData = blockTransformation.getBlockData(); + blockData.setHeight(height); + + return new BlockV2Message(id, blockData, blockTransformation.getTransactions(), blockTransformation.getAtStatesHash()); + } catch (TransformationException e) { + LOGGER.info(String.format("Received garbled BLOCK_V2 message: %s", e.getMessage())); + throw new MessageException(e.getMessage(), e); + } + } + +} diff --git a/src/main/java/org/qortal/network/message/MessageType.java b/src/main/java/org/qortal/network/message/MessageType.java index a2637dfd..c2ae7676 100644 --- a/src/main/java/org/qortal/network/message/MessageType.java +++ b/src/main/java/org/qortal/network/message/MessageType.java @@ -34,6 +34,7 @@ public enum MessageType { BLOCK(50, BlockMessage::fromByteBuffer), GET_BLOCK(51, GetBlockMessage::fromByteBuffer), + BLOCK_V2(52, BlockV2Message::fromByteBuffer), SIGNATURES(60, SignaturesMessage::fromByteBuffer), GET_SIGNATURES_V2(61, GetSignaturesV2Message::fromByteBuffer), diff --git a/src/main/java/org/qortal/repository/BlockArchiveReader.java b/src/main/java/org/qortal/repository/BlockArchiveReader.java index 83508152..311d21c7 100644 --- a/src/main/java/org/qortal/repository/BlockArchiveReader.java +++ b/src/main/java/org/qortal/repository/BlockArchiveReader.java @@ -9,6 +9,7 @@ import org.qortal.data.block.BlockData; import org.qortal.data.transaction.TransactionData; import org.qortal.settings.Settings; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.transform.block.BlockTransformer; import org.qortal.utils.Triple; @@ -66,7 +67,7 @@ public class BlockArchiveReader { this.fileListCache = Map.copyOf(map); } - public Triple, List> fetchBlockAtHeight(int height) { + public BlockTransformation fetchBlockAtHeight(int height) { if (this.fileListCache == null) { this.fetchFileList(); } @@ -77,13 +78,13 @@ public class BlockArchiveReader { } ByteBuffer byteBuffer = ByteBuffer.wrap(serializedBytes); - Triple, List> blockInfo = null; + BlockTransformation blockInfo = null; try { blockInfo = BlockTransformer.fromByteBuffer(byteBuffer); - if (blockInfo != null && blockInfo.getA() != null) { + if (blockInfo != null && blockInfo.getBlockData() != null) { // Block height is stored outside of the main serialized bytes, so it // won't be set automatically. - blockInfo.getA().setHeight(height); + blockInfo.getBlockData().setHeight(height); } } catch (TransformationException e) { return null; @@ -91,8 +92,7 @@ public class BlockArchiveReader { return blockInfo; } - public Triple, List> fetchBlockWithSignature( - byte[] signature, Repository repository) { + public BlockTransformation fetchBlockWithSignature(byte[] signature, Repository repository) { if (this.fileListCache == null) { this.fetchFileList(); @@ -105,13 +105,12 @@ public class BlockArchiveReader { return null; } - public List, List>> fetchBlocksFromRange( - int startHeight, int endHeight) { + public List fetchBlocksFromRange(int startHeight, int endHeight) { - List, List>> blockInfoList = new ArrayList<>(); + List blockInfoList = new ArrayList<>(); for (int height = startHeight; height <= endHeight; height++) { - Triple, List> blockInfo = this.fetchBlockAtHeight(height); + BlockTransformation blockInfo = this.fetchBlockAtHeight(height); if (blockInfo == null) { return blockInfoList; } diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java index d8738f0d..cc7e1611 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBBlockArchiveRepository.java @@ -1,16 +1,13 @@ package org.qortal.repository.hsqldb; -import org.qortal.api.ApiError; -import org.qortal.api.ApiExceptionFactory; import org.qortal.api.model.BlockSignerSummary; -import org.qortal.block.Block; import org.qortal.data.block.BlockArchiveData; import org.qortal.data.block.BlockData; import org.qortal.data.block.BlockSummaryData; import org.qortal.repository.BlockArchiveReader; import org.qortal.repository.BlockArchiveRepository; import org.qortal.repository.DataException; -import org.qortal.utils.Triple; +import org.qortal.transform.block.BlockTransformation; import java.sql.ResultSet; import java.sql.SQLException; @@ -29,11 +26,11 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository { @Override public BlockData fromSignature(byte[] signature) throws DataException { - Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockWithSignature(signature, this.repository); - if (blockInfo != null) { - return (BlockData) blockInfo.getA(); - } - return null; + BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockWithSignature(signature, this.repository); + if (blockInfo == null) + return null; + + return blockInfo.getBlockData(); } @Override @@ -47,11 +44,11 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository { @Override public BlockData fromHeight(int height) throws DataException { - Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height); - if (blockInfo != null) { - return (BlockData) blockInfo.getA(); - } - return null; + BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height); + if (blockInfo == null) + return null; + + return blockInfo.getBlockData(); } @Override @@ -79,9 +76,9 @@ public class HSQLDBBlockArchiveRepository implements BlockArchiveRepository { int height = referenceBlock.getHeight(); if (height > 0) { // Request the block at height + 1 - Triple blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height + 1); + BlockTransformation blockInfo = BlockArchiveReader.getInstance().fetchBlockAtHeight(height + 1); if (blockInfo != null) { - return (BlockData) blockInfo.getA(); + return blockInfo.getBlockData(); } } } diff --git a/src/main/java/org/qortal/transform/block/BlockTransformation.java b/src/main/java/org/qortal/transform/block/BlockTransformation.java new file mode 100644 index 00000000..6aee8cf9 --- /dev/null +++ b/src/main/java/org/qortal/transform/block/BlockTransformation.java @@ -0,0 +1,44 @@ +package org.qortal.transform.block; + +import org.qortal.data.at.ATStateData; +import org.qortal.data.block.BlockData; +import org.qortal.data.transaction.TransactionData; + +import java.util.List; + +public class BlockTransformation { + private final BlockData blockData; + private final List transactions; + private final List atStates; + private final byte[] atStatesHash; + + /*package*/ BlockTransformation(BlockData blockData, List transactions, List atStates) { + this.blockData = blockData; + this.transactions = transactions; + this.atStates = atStates; + this.atStatesHash = null; + } + + /*package*/ BlockTransformation(BlockData blockData, List transactions, byte[] atStatesHash) { + this.blockData = blockData; + this.transactions = transactions; + this.atStates = null; + this.atStatesHash = atStatesHash; + } + + public BlockData getBlockData() { + return blockData; + } + + public List getTransactions() { + return transactions; + } + + public List getAtStates() { + return atStates; + } + + public byte[] getAtStatesHash() { + return atStatesHash; + } +} diff --git a/src/main/java/org/qortal/transform/block/BlockTransformer.java b/src/main/java/org/qortal/transform/block/BlockTransformer.java index cce3e7d7..b61d6900 100644 --- a/src/main/java/org/qortal/transform/block/BlockTransformer.java +++ b/src/main/java/org/qortal/transform/block/BlockTransformer.java @@ -3,12 +3,14 @@ package org.qortal.transform.block; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.qortal.block.Block; import org.qortal.block.BlockChain; +import org.qortal.crypto.Crypto; import org.qortal.data.at.ATStateData; import org.qortal.data.block.BlockData; import org.qortal.data.transaction.TransactionData; @@ -20,7 +22,6 @@ import org.qortal.transform.Transformer; import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.utils.Base58; import org.qortal.utils.Serialization; -import org.qortal.utils.Triple; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; @@ -45,14 +46,13 @@ public class BlockTransformer extends Transformer { protected static final int AT_BYTES_LENGTH = INT_LENGTH; protected static final int AT_FEES_LENGTH = AMOUNT_LENGTH; - protected static final int AT_LENGTH = AT_FEES_LENGTH + AT_BYTES_LENGTH; protected static final int ONLINE_ACCOUNTS_COUNT_LENGTH = INT_LENGTH; protected static final int ONLINE_ACCOUNTS_SIZE_LENGTH = INT_LENGTH; protected static final int ONLINE_ACCOUNTS_TIMESTAMP_LENGTH = TIMESTAMP_LENGTH; protected static final int ONLINE_ACCOUNTS_SIGNATURES_COUNT_LENGTH = INT_LENGTH; - protected static final int AT_ENTRY_LENGTH = ADDRESS_LENGTH + SHA256_LENGTH + AMOUNT_LENGTH; + public static final int AT_ENTRY_LENGTH = ADDRESS_LENGTH + SHA256_LENGTH + AMOUNT_LENGTH; /** * Extract block data and transaction data from serialized bytes. @@ -61,7 +61,7 @@ public class BlockTransformer extends Transformer { * @return BlockData and a List of transactions. * @throws TransformationException */ - public static Triple, List> fromBytes(byte[] bytes) throws TransformationException { + public static BlockTransformation fromBytes(byte[] bytes) throws TransformationException { if (bytes == null) return null; @@ -76,28 +76,40 @@ public class BlockTransformer extends Transformer { /** * Extract block data and transaction data from serialized bytes containing a single block. * - * @param bytes + * @param byteBuffer source of serialized block bytes * @return BlockData and a List of transactions. * @throws TransformationException */ - public static Triple, List> fromByteBuffer(ByteBuffer byteBuffer) throws TransformationException { + public static BlockTransformation fromByteBuffer(ByteBuffer byteBuffer) throws TransformationException { + return BlockTransformer.fromByteBuffer(byteBuffer, false); + } + + /** + * Extract block data and transaction data from serialized bytes containing a single block. + * + * @param byteBuffer source of serialized block bytes + * @return BlockData and a List of transactions. + * @throws TransformationException + */ + public static BlockTransformation fromByteBufferV2(ByteBuffer byteBuffer) throws TransformationException { return BlockTransformer.fromByteBuffer(byteBuffer, true); } /** - * Extract block data and transaction data from serialized bytes containing one or more blocks. - * - * @param bytes + * Extract block data and transaction data from serialized bytes containing a single block, in one of two forms. + * + * @param byteBuffer source of serialized block bytes + * @param isV2 set to true if AT state info is represented by a single hash, false if serialized as per-AT address+state hash+fees * @return the next block's BlockData and a List of transactions. * @throws TransformationException */ - public static Triple, List> fromByteBuffer(ByteBuffer byteBuffer, boolean finalBlockInBuffer) throws TransformationException { + private static BlockTransformation fromByteBuffer(ByteBuffer byteBuffer, boolean isV2) throws TransformationException { int version = byteBuffer.getInt(); - if (finalBlockInBuffer && byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH) + if (byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH) throw new TransformationException("Byte data too short for Block"); - if (finalBlockInBuffer && byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize()) + if (byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize()) throw new TransformationException("Byte data too long for Block"); long timestamp = byteBuffer.getLong(); @@ -117,42 +129,52 @@ public class BlockTransformer extends Transformer { int atCount = 0; long atFees = 0; - List atStates = new ArrayList<>(); + byte[] atStatesHash = null; + List atStates = null; - int atBytesLength = byteBuffer.getInt(); + if (isV2) { + // Simply: AT count, AT total fees, hash(all AT states) + atCount = byteBuffer.getInt(); + atFees = byteBuffer.getLong(); + atStatesHash = new byte[Transformer.SHA256_LENGTH]; + byteBuffer.get(atStatesHash); + } else { + // V1: AT info byte length, then per-AT entries of AT address + state hash + fees + int atBytesLength = byteBuffer.getInt(); + if (atBytesLength > BlockChain.getInstance().getMaxBlockSize()) + throw new TransformationException("Byte data too long for Block's AT info"); - if (atBytesLength > BlockChain.getInstance().getMaxBlockSize()) - throw new TransformationException("Byte data too long for Block's AT info"); + // Read AT-address, SHA256 hash and fees + if (atBytesLength % AT_ENTRY_LENGTH != 0) + throw new TransformationException("AT byte data not a multiple of AT entry length"); - ByteBuffer atByteBuffer = byteBuffer.slice(); - atByteBuffer.limit(atBytesLength); + ByteBuffer atByteBuffer = byteBuffer.slice(); + atByteBuffer.limit(atBytesLength); - // Read AT-address, SHA256 hash and fees - if (atBytesLength % AT_ENTRY_LENGTH != 0) - throw new TransformationException("AT byte data not a multiple of AT entry length"); + atStates = new ArrayList<>(); + while (atByteBuffer.hasRemaining()) { + byte[] atAddressBytes = new byte[ADDRESS_LENGTH]; + atByteBuffer.get(atAddressBytes); + String atAddress = Base58.encode(atAddressBytes); - while (atByteBuffer.hasRemaining()) { - byte[] atAddressBytes = new byte[ADDRESS_LENGTH]; - atByteBuffer.get(atAddressBytes); - String atAddress = Base58.encode(atAddressBytes); + byte[] stateHash = new byte[SHA256_LENGTH]; + atByteBuffer.get(stateHash); - byte[] stateHash = new byte[SHA256_LENGTH]; - atByteBuffer.get(stateHash); + long fees = atByteBuffer.getLong(); - long fees = atByteBuffer.getLong(); + // Add this AT's fees to our total + atFees += fees; - // Add this AT's fees to our total - atFees += fees; + atStates.add(new ATStateData(atAddress, stateHash, fees)); + } - atStates.add(new ATStateData(atAddress, stateHash, fees)); + // Bump byteBuffer over AT states just read in slice + byteBuffer.position(byteBuffer.position() + atBytesLength); + + // AT count to reflect the number of states we have + atCount = atStates.size(); } - // Bump byteBuffer over AT states just read in slice - byteBuffer.position(byteBuffer.position() + atBytesLength); - - // AT count to reflect the number of states we have - atCount = atStates.size(); - // Add AT fees to totalFees totalFees += atFees; @@ -221,16 +243,15 @@ public class BlockTransformer extends Transformer { byteBuffer.get(onlineAccountsSignatures); } - // We should only complain about excess byte data if we aren't expecting more blocks in this ByteBuffer - if (finalBlockInBuffer && byteBuffer.hasRemaining()) - throw new TransformationException("Excess byte data found after parsing Block"); - // We don't have a height! Integer height = null; BlockData blockData = new BlockData(version, reference, transactionCount, totalFees, transactionsSignature, height, timestamp, minterPublicKey, minterSignature, atCount, atFees, encodedOnlineAccounts, onlineAccountsCount, onlineAccountsTimestamp, onlineAccountsSignatures); - return new Triple<>(blockData, transactions, atStates); + if (isV2) + return new BlockTransformation(blockData, transactions, atStatesHash); + else + return new BlockTransformation(blockData, transactions, atStates); } public static int getDataLength(Block block) throws TransformationException { @@ -266,6 +287,14 @@ public class BlockTransformer extends Transformer { } public static byte[] toBytes(Block block) throws TransformationException { + return toBytes(block, false); + } + + public static byte[] toBytesV2(Block block) throws TransformationException { + return toBytes(block, true); + } + + private static byte[] toBytes(Block block, boolean isV2) throws TransformationException { BlockData blockData = block.getBlockData(); try { @@ -279,16 +308,37 @@ public class BlockTransformer extends Transformer { bytes.write(blockData.getMinterSignature()); int atBytesLength = blockData.getATCount() * AT_ENTRY_LENGTH; - bytes.write(Ints.toByteArray(atBytesLength)); + if (isV2) { + ByteArrayOutputStream atHashBytes = new ByteArrayOutputStream(atBytesLength); + long atFees = 0; - for (ATStateData atStateData : block.getATStates()) { - // Skip initial states generated by DEPLOY_AT transactions in the same block - if (atStateData.isInitial()) - continue; + for (ATStateData atStateData : block.getATStates()) { + // Skip initial states generated by DEPLOY_AT transactions in the same block + if (atStateData.isInitial()) + continue; - bytes.write(Base58.decode(atStateData.getATAddress())); - bytes.write(atStateData.getStateHash()); - bytes.write(Longs.toByteArray(atStateData.getFees())); + atHashBytes.write(atStateData.getATAddress().getBytes(StandardCharsets.UTF_8)); + atHashBytes.write(atStateData.getStateHash()); + atHashBytes.write(Longs.toByteArray(atStateData.getFees())); + + atFees += atStateData.getFees(); + } + + bytes.write(Ints.toByteArray(blockData.getATCount())); + bytes.write(Longs.toByteArray(atFees)); + bytes.write(Crypto.digest(atHashBytes.toByteArray())); + } else { + bytes.write(Ints.toByteArray(atBytesLength)); + + for (ATStateData atStateData : block.getATStates()) { + // Skip initial states generated by DEPLOY_AT transactions in the same block + if (atStateData.isInitial()) + continue; + + bytes.write(Base58.decode(atStateData.getATAddress())); + bytes.write(atStateData.getStateHash()); + bytes.write(Longs.toByteArray(atStateData.getFees())); + } } // Transactions diff --git a/src/main/java/org/qortal/utils/BlockArchiveUtils.java b/src/main/java/org/qortal/utils/BlockArchiveUtils.java index 0beff026..84de1a31 100644 --- a/src/main/java/org/qortal/utils/BlockArchiveUtils.java +++ b/src/main/java/org/qortal/utils/BlockArchiveUtils.java @@ -6,6 +6,7 @@ import org.qortal.data.transaction.TransactionData; import org.qortal.repository.BlockArchiveReader; import org.qortal.repository.DataException; import org.qortal.repository.Repository; +import org.qortal.transform.block.BlockTransformation; import java.util.List; @@ -33,8 +34,7 @@ public class BlockArchiveUtils { repository.discardChanges(); final int requestedRange = endHeight+1-startHeight; - List, List>> blockInfoList = - BlockArchiveReader.getInstance().fetchBlocksFromRange(startHeight, endHeight); + List blockInfoList = BlockArchiveReader.getInstance().fetchBlocksFromRange(startHeight, endHeight); // Ensure that we have received all of the requested blocks if (blockInfoList == null || blockInfoList.isEmpty()) { @@ -43,27 +43,26 @@ public class BlockArchiveUtils { if (blockInfoList.size() != requestedRange) { throw new IllegalStateException("Non matching block count when importing from archive"); } - Triple, List> firstBlock = blockInfoList.get(0); - if (firstBlock == null || firstBlock.getA().getHeight() != startHeight) { + BlockTransformation firstBlock = blockInfoList.get(0); + if (firstBlock == null || firstBlock.getBlockData().getHeight() != startHeight) { throw new IllegalStateException("Non matching first block when importing from archive"); } if (blockInfoList.size() > 0) { - Triple, List> lastBlock = - blockInfoList.get(blockInfoList.size() - 1); - if (lastBlock == null || lastBlock.getA().getHeight() != endHeight) { + BlockTransformation lastBlock = blockInfoList.get(blockInfoList.size() - 1); + if (lastBlock == null || lastBlock.getBlockData().getHeight() != endHeight) { throw new IllegalStateException("Non matching last block when importing from archive"); } } // Everything seems okay, so go ahead with the import - for (Triple, List> blockInfo : blockInfoList) { + for (BlockTransformation blockInfo : blockInfoList) { try { // Save block - repository.getBlockRepository().save(blockInfo.getA()); + repository.getBlockRepository().save(blockInfo.getBlockData()); // Save AT state data hashes - for (ATStateData atStateData : blockInfo.getC()) { - atStateData.setHeight(blockInfo.getA().getHeight()); + for (ATStateData atStateData : blockInfo.getAtStates()) { + atStateData.setHeight(blockInfo.getBlockData().getHeight()); repository.getATRepository().save(atStateData); } diff --git a/src/test/java/org/qortal/test/BlockArchiveTests.java b/src/test/java/org/qortal/test/BlockArchiveTests.java index e2f2ed1c..32fd0283 100644 --- a/src/test/java/org/qortal/test/BlockArchiveTests.java +++ b/src/test/java/org/qortal/test/BlockArchiveTests.java @@ -20,6 +20,7 @@ import org.qortal.test.common.Common; import org.qortal.transaction.DeployAtTransaction; import org.qortal.transaction.Transaction; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.utils.BlockArchiveUtils; import org.qortal.utils.NTP; import org.qortal.utils.Triple; @@ -123,8 +124,8 @@ public class BlockArchiveTests extends Common { // Read block 2 from the archive BlockArchiveReader reader = BlockArchiveReader.getInstance(); - Triple, List> block2Info = reader.fetchBlockAtHeight(2); - BlockData block2ArchiveData = block2Info.getA(); + BlockTransformation block2Info = reader.fetchBlockAtHeight(2); + BlockData block2ArchiveData = block2Info.getBlockData(); // Read block 2 from the repository BlockData block2RepositoryData = repository.getBlockRepository().fromHeight(2); @@ -137,8 +138,8 @@ public class BlockArchiveTests extends Common { assertEquals(1, block2ArchiveData.getOnlineAccountsCount()); // Read block 900 from the archive - Triple, List> block900Info = reader.fetchBlockAtHeight(900); - BlockData block900ArchiveData = block900Info.getA(); + BlockTransformation block900Info = reader.fetchBlockAtHeight(900); + BlockData block900ArchiveData = block900Info.getBlockData(); // Read block 900 from the repository BlockData block900RepositoryData = repository.getBlockRepository().fromHeight(900); @@ -200,10 +201,10 @@ public class BlockArchiveTests extends Common { // Read a block from the archive BlockArchiveReader reader = BlockArchiveReader.getInstance(); - Triple, List> blockInfo = reader.fetchBlockAtHeight(testHeight); - BlockData archivedBlockData = blockInfo.getA(); - ATStateData archivedAtStateData = blockInfo.getC().isEmpty() ? null : blockInfo.getC().get(0); - List archivedTransactions = blockInfo.getB(); + BlockTransformation blockInfo = reader.fetchBlockAtHeight(testHeight); + BlockData archivedBlockData = blockInfo.getBlockData(); + ATStateData archivedAtStateData = blockInfo.getAtStates().isEmpty() ? null : blockInfo.getAtStates().get(0); + List archivedTransactions = blockInfo.getTransactions(); // Read the same block from the repository BlockData repositoryBlockData = repository.getBlockRepository().fromHeight(testHeight); @@ -255,7 +256,7 @@ public class BlockArchiveTests extends Common { // Check block 10 (unarchived) BlockArchiveReader reader = BlockArchiveReader.getInstance(); - Triple, List> blockInfo = reader.fetchBlockAtHeight(10); + BlockTransformation blockInfo = reader.fetchBlockAtHeight(10); assertNull(blockInfo); } diff --git a/src/test/java/org/qortal/test/BlockTests.java b/src/test/java/org/qortal/test/BlockTests.java index d6fdac02..53b216ec 100644 --- a/src/test/java/org/qortal/test/BlockTests.java +++ b/src/test/java/org/qortal/test/BlockTests.java @@ -24,6 +24,7 @@ import org.qortal.test.common.TransactionUtils; import org.qortal.transaction.Transaction; import org.qortal.transaction.Transaction.TransactionType; import org.qortal.transform.TransformationException; +import org.qortal.transform.block.BlockTransformation; import org.qortal.transform.block.BlockTransformer; import org.qortal.transform.transaction.TransactionTransformer; import org.qortal.utils.Base58; @@ -121,10 +122,10 @@ public class BlockTests extends Common { assertEquals(BlockTransformer.getDataLength(block), bytes.length); - Triple, List> blockInfo = BlockTransformer.fromBytes(bytes); + BlockTransformation blockInfo = BlockTransformer.fromBytes(bytes); // Compare transactions - List deserializedTransactions = blockInfo.getB(); + List deserializedTransactions = blockInfo.getTransactions(); assertEquals("Transaction count differs", blockData.getTransactionCount(), deserializedTransactions.size()); for (int i = 0; i < blockData.getTransactionCount(); ++i) { From f2060fe7a1c7da7c29e7ab4a335dd9c1097390ba Mon Sep 17 00:00:00 2001 From: catbref Date: Sat, 23 Apr 2022 16:04:35 +0100 Subject: [PATCH 02/18] Initial work on online-accounts-v3 network messages to drastically reduce network load. Lots of TODOs to action. --- .../controller/OnlineAccountsManager.java | 20 +- .../message/GetOnlineAccountsV3Message.java | 110 +++++++++ .../org/qortal/network/message/Message.java | 3 +- .../qortal/network/message/MessageType.java | 2 + .../test/network/OnlineAccountsV3Tests.java | 225 ++++++++++++++++++ 5 files changed, 358 insertions(+), 2 deletions(-) create mode 100644 src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java create mode 100644 src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index 70b04e56..58e4f64e 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -67,9 +67,14 @@ public class OnlineAccountsManager extends Thread { Deque> latestBlocksOnlineAccounts = new ArrayDeque<>(MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS); public OnlineAccountsManager() { - + // TODO: make private, add these tasks to scheduled executor: + // send our online accounts every 10s + // expireOnlineAccounts every ONLINE_ACCOUNTS_CHECK_INTERVAL + // broadcastOnlineAccountsQuery every ONLINE_ACCOUNTS_BROADCAST_INTERVAL + // processOnlineAccountsImportQueue every 100ms? } + // TODO: convert to SingletonContainer a-la Network public static synchronized OnlineAccountsManager getInstance() { if (instance == null) { instance = new OnlineAccountsManager(); @@ -78,6 +83,7 @@ public class OnlineAccountsManager extends Thread { return instance; } + // TODO: see constructor for more info public void run() { // Start separate thread to prepare our online accounts @@ -113,6 +119,7 @@ public class OnlineAccountsManager extends Thread { public void shutdown() { isStopping = true; + // TODO: convert interrrupt to executor.shutdownNow(); this.interrupt(); } @@ -151,11 +158,14 @@ public class OnlineAccountsManager extends Thread { // Utilities + // TODO: split this into validateAccount() and addAccount() private void verifyAndAddAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { final Long now = NTP.getTime(); if (now == null) return; + // TODO: don't create otherAccount, instead: + // byte[] rewardSharePublicKey = onlineAccountData.getPublicKey(); PublicKeyAccount otherAccount = new PublicKeyAccount(repository, onlineAccountData.getPublicKey()); // Check timestamp is 'recent' here @@ -166,12 +176,14 @@ public class OnlineAccountsManager extends Thread { // Verify byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); + // TODO: use Crypto.verify() static method directly if (!otherAccount.verify(onlineAccountData.getSignature(), data)) { LOGGER.trace(() -> String.format("Rejecting invalid online account %s", otherAccount.getAddress())); return; } // Qortal: check online account is actually reward-share + // TODO: use "rewardSharePublicKey" from above TODO RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(onlineAccountData.getPublicKey()); if (rewardShareData == null) { // Reward-share doesn't even exist - probably not a good sign @@ -186,6 +198,7 @@ public class OnlineAccountsManager extends Thread { return; } + // TODO: change this.onlineAccounts to a ConcurrentMap? Keyed by timestamp? synchronized (this.onlineAccounts) { OnlineAccountData existingAccountData = this.onlineAccounts.stream().filter(account -> Arrays.equals(account.getPublicKey(), onlineAccountData.getPublicKey())).findFirst().orElse(null); @@ -193,17 +206,21 @@ public class OnlineAccountsManager extends Thread { if (existingAccountData.getTimestamp() < onlineAccountData.getTimestamp()) { this.onlineAccounts.remove(existingAccountData); + // TODO: change otherAccount.getAddress() to rewardSharePublicKey in Base58? LOGGER.trace(() -> String.format("Updated online account %s with timestamp %d (was %d)", otherAccount.getAddress(), onlineAccountData.getTimestamp(), existingAccountData.getTimestamp())); } else { + // TODO: change otherAccount.getAddress() to rewardSharePublicKey in Base58? LOGGER.trace(() -> String.format("Not updating existing online account %s", otherAccount.getAddress())); return; } } else { + // TODO: change otherAccount.getAddress() to rewardSharePublicKey in Base58? LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp())); } this.onlineAccounts.add(onlineAccountData); + // TODO: if we actually added a new account, then we need to rebuild our hashes-by-timestamp-then-byte for rewardSharePublicKey's leading byte also } } @@ -220,6 +237,7 @@ public class OnlineAccountsManager extends Thread { final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now); byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); + // TODO: use new addAccount() method synchronized (this.onlineAccounts) { this.onlineAccounts.clear(); diff --git a/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java b/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java new file mode 100644 index 00000000..02fed2a9 --- /dev/null +++ b/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java @@ -0,0 +1,110 @@ +package org.qortal.network.message; + +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import org.qortal.transform.Transformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.*; + +/** + * For requesting online accounts info from remote peer, given our list of online accounts. + * + * Different format to V1 and V2: + * V1 is: number of entries, then timestamp + pubkey for each entry + * V2 is: groups of: number of entries, timestamp, then pubkey for each entry + * V3 is: groups of: timestamp, number of entries (one per leading byte), then hash(pubkeys) for each entry + */ +public class GetOnlineAccountsV3Message extends Message { + + private static final Map> EMPTY_ONLINE_ACCOUNTS = Collections.emptyMap(); + private Map> hashesByTimestampThenByte; + + public GetOnlineAccountsV3Message(Map> hashesByTimestampThenByte) { + super(MessageType.GET_ONLINE_ACCOUNTS_V3); + + // If we don't have ANY online accounts then it's an easier construction... + if (hashesByTimestampThenByte.isEmpty()) { + this.dataBytes = EMPTY_DATA_BYTES; + return; + } + + // We should know exactly how many bytes to allocate now + int byteSize = hashesByTimestampThenByte.size() * (Transformer.TIMESTAMP_LENGTH + Transformer.INT_LENGTH) + + Transformer.TIMESTAMP_LENGTH /* trailing zero entry indicates end of entries */; + + byteSize += hashesByTimestampThenByte.values() + .stream() + .mapToInt(map -> map.size() * Transformer.PUBLIC_KEY_LENGTH) + .sum(); + + ByteArrayOutputStream bytes = new ByteArrayOutputStream(byteSize); + + // Warning: no double-checking/fetching! We must be ConcurrentMap compatible. + // So no contains() then get() or multiple get()s on the same key/map. + try { + for (var outerMapEntry : hashesByTimestampThenByte.entrySet()) { + bytes.write(Longs.toByteArray(outerMapEntry.getKey())); + + var innerMap = outerMapEntry.getValue(); + + bytes.write(Ints.toByteArray(innerMap.size())); + + for (byte[] hashBytes : innerMap.values()) { + bytes.write(hashBytes); + } + } + + // end of records + bytes.write(Longs.toByteArray(0L)); + } catch (IOException e) { + throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream"); + } + + this.dataBytes = bytes.toByteArray(); + this.checksumBytes = Message.generateChecksum(this.dataBytes); + } + + private GetOnlineAccountsV3Message(int id, Map> hashesByTimestampThenByte) { + super(id, MessageType.GET_ONLINE_ACCOUNTS_V3); + + this.hashesByTimestampThenByte = hashesByTimestampThenByte; + } + + public Map> getHashesByTimestampThenByte() { + return this.hashesByTimestampThenByte; + } + + public static Message fromByteBuffer(int id, ByteBuffer bytes) { + // 'empty' case + if (!bytes.hasRemaining()) { + return new GetOnlineAccountsV3Message(id, EMPTY_ONLINE_ACCOUNTS); + } + + Map> hashesByTimestampThenByte = new HashMap<>(); + + while (true) { + long timestamp = bytes.getLong(); + if (timestamp == 0) + // Zero timestamp indicates end of records + break; + + int hashCount = bytes.getInt(); + Map hashesByByte = new HashMap<>(); + + for (int i = 0; i < hashCount; ++i) { + byte[] publicKeyHash = new byte[Transformer.PUBLIC_KEY_LENGTH]; + bytes.get(publicKeyHash); + + hashesByByte.put(publicKeyHash[0], publicKeyHash); + } + + hashesByTimestampThenByte.put(timestamp, hashesByByte); + } + + return new GetOnlineAccountsV3Message(id, hashesByTimestampThenByte); + } + +} diff --git a/src/main/java/org/qortal/network/message/Message.java b/src/main/java/org/qortal/network/message/Message.java index f752b5b9..d8467d90 100644 --- a/src/main/java/org/qortal/network/message/Message.java +++ b/src/main/java/org/qortal/network/message/Message.java @@ -46,6 +46,7 @@ public abstract class Message { private static final int MAX_DATA_SIZE = 10 * 1024 * 1024; // 10MB protected static final byte[] EMPTY_DATA_BYTES = new byte[0]; + private static final ByteBuffer EMPTY_READ_ONLY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_DATA_BYTES).asReadOnlyBuffer(); protected int id; protected final MessageType type; @@ -126,7 +127,7 @@ public abstract class Message { if (dataSize > 0 && dataSize + CHECKSUM_LENGTH > readOnlyBuffer.remaining()) return null; - ByteBuffer dataSlice = null; + ByteBuffer dataSlice = EMPTY_READ_ONLY_BYTE_BUFFER; if (dataSize > 0) { byte[] expectedChecksum = new byte[CHECKSUM_LENGTH]; readOnlyBuffer.get(expectedChecksum); diff --git a/src/main/java/org/qortal/network/message/MessageType.java b/src/main/java/org/qortal/network/message/MessageType.java index c2ae7676..de711dc3 100644 --- a/src/main/java/org/qortal/network/message/MessageType.java +++ b/src/main/java/org/qortal/network/message/MessageType.java @@ -46,6 +46,8 @@ public enum MessageType { GET_ONLINE_ACCOUNTS(81, GetOnlineAccountsMessage::fromByteBuffer), ONLINE_ACCOUNTS_V2(82, OnlineAccountsV2Message::fromByteBuffer), GET_ONLINE_ACCOUNTS_V2(83, GetOnlineAccountsV2Message::fromByteBuffer), + // ONLINE_ACCOUNTS_V3(84, OnlineAccountsV3Message::fromByteBuffer), + GET_ONLINE_ACCOUNTS_V3(85, GetOnlineAccountsV3Message::fromByteBuffer), ARBITRARY_DATA(90, ArbitraryDataMessage::fromByteBuffer), GET_ARBITRARY_DATA(91, GetArbitraryDataMessage::fromByteBuffer), diff --git a/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java b/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java new file mode 100644 index 00000000..3394213b --- /dev/null +++ b/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java @@ -0,0 +1,225 @@ +package org.qortal.test.network; + +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; +import org.junit.Ignore; +import org.junit.Test; +import org.qortal.data.network.OnlineAccountData; +import org.qortal.network.message.*; +import org.qortal.transform.Transformer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.Security; +import java.util.*; + +import static org.junit.Assert.*; + +public class OnlineAccountsV3Tests { + + private static final Random RANDOM = new Random(); + static { + // This must go before any calls to LogManager/Logger + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); + + Security.insertProviderAt(new BouncyCastleProvider(), 0); + Security.insertProviderAt(new BouncyCastleJsseProvider(), 1); + } + + @Ignore("For informational use") + @Test + public void compareV2ToV3() throws MessageException { + List onlineAccounts = generateOnlineAccounts(false); + + // How many of each timestamp and leading byte (of public key) + Map> hashesByTimestampThenByte = convertToHashMaps(onlineAccounts); + + byte[] v3DataBytes = new GetOnlineAccountsV3Message(hashesByTimestampThenByte).toBytes(); + int v3ByteSize = v3DataBytes.length; + + byte[] v2DataBytes = new GetOnlineAccountsV2Message(onlineAccounts).toBytes(); + int v2ByteSize = v2DataBytes.length; + + int numTimestamps = hashesByTimestampThenByte.size(); + System.out.printf("For %d accounts split across %d timestamp%s: V2 size %d vs V3 size %d%n", + onlineAccounts.size(), + numTimestamps, + numTimestamps != 1 ? "s" : "", + v2ByteSize, + v3ByteSize + ); + + for (var outerMapEntry : hashesByTimestampThenByte.entrySet()) { + long timestamp = outerMapEntry.getKey(); + + var innerMap = outerMapEntry.getValue(); + + System.out.printf("For timestamp %d: %d / 256 slots used.%n", + timestamp, + innerMap.size() + ); + } + } + + private Map> convertToHashMaps(List onlineAccounts) { + // How many of each timestamp and leading byte (of public key) + Map> hashesByTimestampThenByte = new HashMap<>(); + + for (OnlineAccountData onlineAccountData : onlineAccounts) { + Long timestamp = onlineAccountData.getTimestamp(); + Byte leadingByte = onlineAccountData.getPublicKey()[0]; + + hashesByTimestampThenByte + .computeIfAbsent(timestamp, k -> new HashMap<>()) + .compute(leadingByte, (k, v) -> xorByteArrayInPlace(v, onlineAccountData.getPublicKey())); + } + + return hashesByTimestampThenByte; + } + + // TODO: This needs to be moved - probably to be OnlineAccountsManager + private static byte[] xorByteArrayInPlace(byte[] inplaceArray, byte[] otherArray) { + if (inplaceArray == null) + return Arrays.copyOf(otherArray, otherArray.length); + + // Start from index 1 to enforce static leading byte + for (int i = 1; i < otherArray.length; i++) + inplaceArray[i] ^= otherArray[otherArray.length - i - 1]; + + return inplaceArray; + } + + @Test + public void testOnGetOnlineAccountsV3() { + List ourOnlineAccounts = generateOnlineAccounts(false); + List peersOnlineAccounts = generateOnlineAccounts(false); + + Map> ourConvertedHashes = convertToHashMaps(ourOnlineAccounts); + Map> peersConvertedHashes = convertToHashMaps(peersOnlineAccounts); + + List mockReply = new ArrayList<>(); + + // Warning: no double-checking/fetching - we must be ConcurrentMap compatible! + // So no contains()-then-get() or multiple get()s on the same key/map. + for (var ourOuterMapEntry : ourConvertedHashes.entrySet()) { + Long timestamp = ourOuterMapEntry.getKey(); + + var ourInnerMap = ourOuterMapEntry.getValue(); + var peersInnerMap = peersConvertedHashes.get(timestamp); + + if (peersInnerMap == null) { + // Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours + for (Byte leadingByte : ourInnerMap.keySet()) + mockReply.add(timestamp + ":" + leadingByte); + } else { + // We have entries for this timestamp so compare against peer's entries + for (var ourInnerMapEntry : ourInnerMap.entrySet()) { + Byte leadingByte = ourInnerMapEntry.getKey(); + byte[] peersHash = peersInnerMap.get(leadingByte); + + if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) { + // We don't match peer, or peer doesn't have - send all online accounts for this timestamp and leading byte + mockReply.add(timestamp + ":" + leadingByte); + } + } + } + } + + int numOurTimestamps = ourConvertedHashes.size(); + System.out.printf("We have %d accounts split across %d timestamp%s%n", + ourOnlineAccounts.size(), + numOurTimestamps, + numOurTimestamps != 1 ? "s" : "" + ); + + int numPeerTimestamps = peersConvertedHashes.size(); + System.out.printf("Peer sent %d accounts split across %d timestamp%s%n", + peersOnlineAccounts.size(), + numPeerTimestamps, + numPeerTimestamps != 1 ? "s" : "" + ); + + System.out.printf("We need to send: %d%n%s%n", mockReply.size(), String.join(", ", mockReply)); + } + + @Test + public void testSerialization() throws MessageException { + List onlineAccountsOut = generateOnlineAccounts(true); + Map> hashesByTimestampThenByteOut = convertToHashMaps(onlineAccountsOut); + + validateSerialization(hashesByTimestampThenByteOut); + } + + @Test + public void testEmptySerialization() throws MessageException { + Map> hashesByTimestampThenByteOut = Collections.emptyMap(); + validateSerialization(hashesByTimestampThenByteOut); + + hashesByTimestampThenByteOut = new HashMap<>(); + validateSerialization(hashesByTimestampThenByteOut); + } + + private void validateSerialization(Map> hashesByTimestampThenByteOut) throws MessageException { + Message messageOut = new GetOnlineAccountsV3Message(hashesByTimestampThenByteOut); + byte[] messageBytes = messageOut.toBytes(); + + ByteBuffer byteBuffer = ByteBuffer.wrap(messageBytes).asReadOnlyBuffer(); + + GetOnlineAccountsV3Message messageIn = (GetOnlineAccountsV3Message) Message.fromByteBuffer(byteBuffer); + + Map> hashesByTimestampThenByteIn = messageIn.getHashesByTimestampThenByte(); + + Set timestampsIn = hashesByTimestampThenByteIn.keySet(); + Set timestampsOut = hashesByTimestampThenByteOut.keySet(); + assertEquals("timestamp count mismatch", timestampsOut.size(), timestampsIn.size()); + assertTrue("timestamps mismatch", timestampsIn.containsAll(timestampsOut)); + + for (Long timestamp : timestampsIn) { + Map hashesByByteIn = hashesByTimestampThenByteIn.get(timestamp); + Map hashesByByteOut = hashesByTimestampThenByteOut.get(timestamp); + assertNotNull("timestamp entry missing", hashesByByteOut); + + Set leadingBytesIn = hashesByByteIn.keySet(); + Set leadingBytesOut = hashesByByteOut.keySet(); + assertEquals("leading byte entry count mismatch", leadingBytesOut.size(), leadingBytesIn.size()); + assertTrue("leading byte entry mismatch", leadingBytesIn.containsAll(leadingBytesOut)); + + for (Byte leadingByte : leadingBytesOut) { + byte[] bytesIn = hashesByByteIn.get(leadingByte); + byte[] bytesOut = hashesByByteOut.get(leadingByte); + + assertTrue("pubkey hash mismatch", Arrays.equals(bytesOut, bytesIn)); + } + } + } + + private List generateOnlineAccounts(boolean withSignatures) { + List onlineAccounts = new ArrayList<>(); + + int numTimestamps = RANDOM.nextInt(2) + 1; // 1 or 2 + + for (int t = 0; t < numTimestamps; ++t) { + long timestamp = 1 << 31 + (t + 1) << 12; + int numAccounts = RANDOM.nextInt(3000); + + for (int a = 0; a < numAccounts; ++a) { + byte[] sig = null; + if (withSignatures) { + sig = new byte[Transformer.SIGNATURE_LENGTH]; + RANDOM.nextBytes(sig); + } + + byte[] pubkey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + RANDOM.nextBytes(pubkey); + + onlineAccounts.add(new OnlineAccountData(timestamp, sig, pubkey)); + } + } + + return onlineAccounts; + } + +} From fbdc1e1cdb178728c796a2465003bf62578ce23c Mon Sep 17 00:00:00 2001 From: catbref Date: Sat, 30 Apr 2022 17:17:55 +0100 Subject: [PATCH 03/18] OnlineAccountsV3: Adding support for GET_ONLINE_ACCOUNTS_V3 to Controller, which calls OnlineAccountsManager. With OnlineAccountsV3, instead of nodes sending their list of known online accounts (public keys), nodes now send a summary which contains hashes of known online accounts, one per timestamp + leading-byte combo. Thus outgoing messages are much smaller and scale better with more users. Remote peers compare the hashes and send back lists of online accounts (for that timestamp + leading-byte combo) where hashes do not match. Massive rewrite of OnlineAccountsManager to maintain online accounts. Now there are three caches: 1. all online accounts, but split into sets by timestamp 2. 'hashes' of all online accounts, one hash per timestamp+leading-byte combination Mainly for efficient use by GetOnlineAccountsV3 message constructor. 3. online accounts for the highest blocks on our chain to speed up block processing Note that highest blocks might be way older than 'current' blocks if we're somewhat behind in syncing. Other OnlineAccountsManager changes: * Use scheduling executor service to manage subtasks * Switch from 'synchronized' to 'concurrent' collections * Generally switch from Lists to Sets - requires improved OnlineAccountData.hashCode() - further work needed * Only send V3 messages to peers with version >= 3.2.203 (for testing) * More info on which online accounts lists are returned depending on use-cases To test, change your peer's version (in pom.xml?) to v3.2.203. --- src/main/java/org/qortal/block/Block.java | 4 +- .../org/qortal/controller/Controller.java | 4 + .../controller/OnlineAccountsManager.java | 747 ++++++++++-------- .../data/network/OnlineAccountData.java | 13 +- .../test/network/OnlineAccountsV3Tests.java | 19 +- 5 files changed, 447 insertions(+), 340 deletions(-) diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index 5fe005d6..a0cba9bb 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -1023,8 +1023,8 @@ public class Block { // If this block is much older than current online timestamp, then there's no point checking current online accounts List currentOnlineAccounts = onlineTimestamp < NTP.getTime() - OnlineAccountsManager.ONLINE_TIMESTAMP_MODULUS ? null - : OnlineAccountsManager.getInstance().getOnlineAccounts(); - List latestBlocksOnlineAccounts = OnlineAccountsManager.getInstance().getLatestBlocksOnlineAccounts(); + : OnlineAccountsManager.getInstance().getOnlineAccounts(onlineTimestamp); + List latestBlocksOnlineAccounts = OnlineAccountsManager.getInstance().getLatestBlocksOnlineAccounts(onlineTimestamp); // Extract online accounts' timestamp signatures from block data List onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(this.blockData.getOnlineAccountsSignatures()); diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java index 0a011db5..d6be1b07 100644 --- a/src/main/java/org/qortal/controller/Controller.java +++ b/src/main/java/org/qortal/controller/Controller.java @@ -1229,6 +1229,10 @@ public class Controller extends Thread { OnlineAccountsManager.getInstance().onNetworkOnlineAccountsV2Message(peer, message); break; + case GET_ONLINE_ACCOUNTS_V3: + OnlineAccountsManager.getInstance().onNetworkGetOnlineAccountsV3Message(peer, message); + break; + case GET_ARBITRARY_DATA: // Not currently supported break; diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index 58e4f64e..4e8b3c77 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -1,12 +1,13 @@ package org.qortal.controller; +import com.google.common.hash.HashCode; import com.google.common.primitives.Longs; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; -import org.qortal.account.PublicKeyAccount; import org.qortal.block.BlockChain; +import org.qortal.crypto.Crypto; import org.qortal.data.account.MintingAccountData; import org.qortal.data.account.RewardShareData; import org.qortal.data.network.OnlineAccountData; @@ -18,212 +19,101 @@ import org.qortal.repository.Repository; import org.qortal.repository.RepositoryManager; import org.qortal.utils.Base58; import org.qortal.utils.NTP; +import org.qortal.utils.NamedThreadFactory; import java.util.*; +import java.util.concurrent.*; import java.util.stream.Collectors; -public class OnlineAccountsManager extends Thread { - - private class OurOnlineAccountsThread extends Thread { - - public void run() { - try { - while (!isStopping) { - Thread.sleep(10000L); - - // Refresh our online accounts signatures? - sendOurOnlineAccountsInfo(); - - } - } catch (InterruptedException e) { - // Fall through to exit thread - } - } - } - +public class OnlineAccountsManager { private static final Logger LOGGER = LogManager.getLogger(OnlineAccountsManager.class); - private static OnlineAccountsManager instance; + // 'Current' as in 'now' + + /** + * How long online accounts signatures last before they expire. + */ + public static final long ONLINE_TIMESTAMP_MODULUS = 5 * 60 * 1000L; + + /** + * How many 'current' timestamp-sets of online accounts we cache. + */ + private static final int MAX_CACHED_TIMESTAMP_SETS = 2; + + /** + * How many timestamp-sets of online accounts we cache for 'latest blocks'. + */ + private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 3; + + private static final long ONLINE_ACCOUNTS_QUEUE_INTERVAL = 100L; //ms + private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms + private static final long ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL = 60 * 1000L; // ms + private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 10 * 1000L; // ms + + private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; // v3.2.0 + private static final long ONLINE_ACCOUNTS_V3_PEER_VERSION = 0x03000200cbL; // v3.2.203 + + private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts")); private volatile boolean isStopping = false; - // To do with online accounts list - private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms - private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 1 * 60 * 1000L; // ms - public static final long ONLINE_TIMESTAMP_MODULUS = 5 * 60 * 1000L; - private static final long LAST_SEEN_EXPIRY_PERIOD = (ONLINE_TIMESTAMP_MODULUS * 2) + (1 * 60 * 1000L); - /** How many (latest) blocks' worth of online accounts we cache */ - private static final int MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS = 2; - private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; + private final Set onlineAccountsImportQueue = ConcurrentHashMap.newKeySet(); - private long onlineAccountsTasksTimestamp = Controller.startTime + ONLINE_ACCOUNTS_TASKS_INTERVAL; // ms + /** + * Cache of 'current' online accounts, keyed by timestamp + */ + private final Map> currentOnlineAccounts = new ConcurrentHashMap<>(); + /** + * Cache of hash-summary of 'current' online accounts, keyed by timestamp, then leading byte of public key. + *

+ * Inner map is also sorted using {@code Byte::compareUnsigned} as a comparator. + * This is critical for proper function of GET_ONLINE_ACCOUNTS_V3 protocol. + */ + private final Map> currentOnlineAccountsHashes = new ConcurrentHashMap<>(); - private final List onlineAccountsImportQueue = Collections.synchronizedList(new ArrayList<>()); + /** + * Cache of online accounts for latest blocks - not necessarily 'current' / now. + * Probably only accessed / modified by a single Synchronizer thread. + */ + private final Map> latestBlocksOnlineAccounts = new ConcurrentHashMap<>(); - - /** Cache of current 'online accounts' */ - List onlineAccounts = new ArrayList<>(); - /** Cache of latest blocks' online accounts */ - Deque> latestBlocksOnlineAccounts = new ArrayDeque<>(MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS); - - public OnlineAccountsManager() { - // TODO: make private, add these tasks to scheduled executor: - // send our online accounts every 10s - // expireOnlineAccounts every ONLINE_ACCOUNTS_CHECK_INTERVAL - // broadcastOnlineAccountsQuery every ONLINE_ACCOUNTS_BROADCAST_INTERVAL - // processOnlineAccountsImportQueue every 100ms? + public static long toOnlineAccountTimestamp(long timestamp) { + return (timestamp / ONLINE_TIMESTAMP_MODULUS) * ONLINE_TIMESTAMP_MODULUS; } - // TODO: convert to SingletonContainer a-la Network - public static synchronized OnlineAccountsManager getInstance() { - if (instance == null) { - instance = new OnlineAccountsManager(); - } - - return instance; + private OnlineAccountsManager() { } - // TODO: see constructor for more info - public void run() { + private static class SingletonContainer { + private static final OnlineAccountsManager INSTANCE = new OnlineAccountsManager(); + } - // Start separate thread to prepare our online accounts - // This could be converted to a thread pool later if more concurrency is needed - OurOnlineAccountsThread ourOnlineAccountsThread = new OurOnlineAccountsThread(); - ourOnlineAccountsThread.start(); + public static OnlineAccountsManager getInstance() { + return SingletonContainer.INSTANCE; + } - try { - while (!Controller.isStopping()) { - Thread.sleep(100L); + public void start() { + // Expire old online accounts signatures + executor.scheduleAtFixedRate(this::expireOldOnlineAccounts, ONLINE_ACCOUNTS_TASKS_INTERVAL, ONLINE_ACCOUNTS_TASKS_INTERVAL, TimeUnit.MILLISECONDS); - final Long now = NTP.getTime(); - if (now == null) { - continue; - } + // Send our online accounts + executor.scheduleAtFixedRate(this::sendOurOnlineAccountsInfo, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); - // Perform tasks to do with managing online accounts list - if (now >= onlineAccountsTasksTimestamp) { - onlineAccountsTasksTimestamp = now + ONLINE_ACCOUNTS_TASKS_INTERVAL; - performOnlineAccountsTasks(); - } + // Request online accounts from peers (legacy) + executor.scheduleAtFixedRate(this::requestLegacyRemoteOnlineAccounts, ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); + // Request online accounts from peers (V3+) + executor.scheduleAtFixedRate(this::requestRemoteOnlineAccounts, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, ONLINE_ACCOUNTS_BROADCAST_INTERVAL, TimeUnit.MILLISECONDS); - // Process queued online account verifications - this.processOnlineAccountsImportQueue(); - - } - } catch (InterruptedException e) { - // Fall through to exit thread - } - - ourOnlineAccountsThread.interrupt(); + // Process import queue + executor.scheduleWithFixedDelay(this::processOnlineAccountsImportQueue, ONLINE_ACCOUNTS_QUEUE_INTERVAL, ONLINE_ACCOUNTS_QUEUE_INTERVAL, TimeUnit.MILLISECONDS); } public void shutdown() { isStopping = true; - // TODO: convert interrrupt to executor.shutdownNow(); - this.interrupt(); - } - - - // Online accounts import queue - - private void processOnlineAccountsImportQueue() { - if (this.onlineAccountsImportQueue.isEmpty()) { - // Nothing to do - return; - } - - LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size()); - - try (final Repository repository = RepositoryManager.getRepository()) { - - List onlineAccountDataCopy = new ArrayList<>(this.onlineAccountsImportQueue); - for (OnlineAccountData onlineAccountData : onlineAccountDataCopy) { - if (isStopping) { - return; - } - - this.verifyAndAddAccount(repository, onlineAccountData); - - // Remove from queue - onlineAccountsImportQueue.remove(onlineAccountData); - } - - LOGGER.debug("Finished processing online accounts import queue"); - - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while verifying online accounts"), e); - } - } - - - // Utilities - - // TODO: split this into validateAccount() and addAccount() - private void verifyAndAddAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { - final Long now = NTP.getTime(); - if (now == null) - return; - - // TODO: don't create otherAccount, instead: - // byte[] rewardSharePublicKey = onlineAccountData.getPublicKey(); - PublicKeyAccount otherAccount = new PublicKeyAccount(repository, onlineAccountData.getPublicKey()); - - // Check timestamp is 'recent' here - if (Math.abs(onlineAccountData.getTimestamp() - now) > ONLINE_TIMESTAMP_MODULUS * 2) { - LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp())); - return; - } - - // Verify - byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); - // TODO: use Crypto.verify() static method directly - if (!otherAccount.verify(onlineAccountData.getSignature(), data)) { - LOGGER.trace(() -> String.format("Rejecting invalid online account %s", otherAccount.getAddress())); - return; - } - - // Qortal: check online account is actually reward-share - // TODO: use "rewardSharePublicKey" from above TODO - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(onlineAccountData.getPublicKey()); - if (rewardShareData == null) { - // Reward-share doesn't even exist - probably not a good sign - LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(onlineAccountData.getPublicKey()))); - return; - } - - Account mintingAccount = new Account(repository, rewardShareData.getMinter()); - if (!mintingAccount.canMint()) { - // Minting-account component of reward-share can no longer mint - disregard - LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress())); - return; - } - - // TODO: change this.onlineAccounts to a ConcurrentMap? Keyed by timestamp? - synchronized (this.onlineAccounts) { - OnlineAccountData existingAccountData = this.onlineAccounts.stream().filter(account -> Arrays.equals(account.getPublicKey(), onlineAccountData.getPublicKey())).findFirst().orElse(null); - - if (existingAccountData != null) { - if (existingAccountData.getTimestamp() < onlineAccountData.getTimestamp()) { - this.onlineAccounts.remove(existingAccountData); - - // TODO: change otherAccount.getAddress() to rewardSharePublicKey in Base58? - LOGGER.trace(() -> String.format("Updated online account %s with timestamp %d (was %d)", otherAccount.getAddress(), onlineAccountData.getTimestamp(), existingAccountData.getTimestamp())); - } else { - // TODO: change otherAccount.getAddress() to rewardSharePublicKey in Base58? - LOGGER.trace(() -> String.format("Not updating existing online account %s", otherAccount.getAddress())); - - return; - } - } else { - // TODO: change otherAccount.getAddress() to rewardSharePublicKey in Base58? - LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp())); - } - - this.onlineAccounts.add(onlineAccountData); - // TODO: if we actually added a new account, then we need to rebuild our hashes-by-timestamp-then-byte for rewardSharePublicKey's leading byte also - } + executor.shutdownNow(); } + // Testing support public void ensureTestingAccountsOnline(PrivateKeyAccount... onlineAccounts) { if (!BlockChain.getInstance().isTestChain()) { LOGGER.warn("Ignoring attempt to ensure test account is online for non-test chain!"); @@ -237,61 +127,222 @@ public class OnlineAccountsManager extends Thread { final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now); byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); - // TODO: use new addAccount() method - synchronized (this.onlineAccounts) { - this.onlineAccounts.clear(); + Set replacementAccounts = new HashSet<>(); + for (PrivateKeyAccount onlineAccount : onlineAccounts) { + // Check mintingAccount is actually reward-share? - for (PrivateKeyAccount onlineAccount : onlineAccounts) { - // Check mintingAccount is actually reward-share? + byte[] signature = onlineAccount.sign(timestampBytes); + byte[] publicKey = onlineAccount.getPublicKey(); - byte[] signature = onlineAccount.sign(timestampBytes); - byte[] publicKey = onlineAccount.getPublicKey(); + OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); + replacementAccounts.add(ourOnlineAccountData); + } - OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); - this.onlineAccounts.add(ourOnlineAccountData); + this.currentOnlineAccounts.clear(); + addAccounts(replacementAccounts); + } + + // Online accounts import queue + + private void processOnlineAccountsImportQueue() { + if (this.onlineAccountsImportQueue.isEmpty()) + // Nothing to do + return; + + LOGGER.debug("Processing online accounts import queue (size: {})", this.onlineAccountsImportQueue.size()); + + Set onlineAccountsToAdd = new HashSet<>(); + try (final Repository repository = RepositoryManager.getRepository()) { + for (OnlineAccountData onlineAccountData : this.onlineAccountsImportQueue) { + if (isStopping) + return; + + boolean isValid = this.validateAccount(repository, onlineAccountData); + if (isValid) + onlineAccountsToAdd.add(onlineAccountData); + + // Remove from queue + onlineAccountsImportQueue.remove(onlineAccountData); + } + + LOGGER.debug("Finished validating online accounts import queue"); + } catch (DataException e) { + LOGGER.error("Repository issue while verifying online accounts", e); + } + + LOGGER.debug("Adding {} validated online accounts from import queue", onlineAccountsToAdd.size()); + addAccounts(onlineAccountsToAdd); + } + + // Utilities + + public static byte[] xorByteArrayInPlace(byte[] inplaceArray, byte[] otherArray) { + if (inplaceArray == null) + return Arrays.copyOf(otherArray, otherArray.length); + + // Start from index 1 to enforce static leading byte + for (int i = 1; i < otherArray.length; i++) + // inplaceArray[i] ^= otherArray[otherArray.length - i - 1]; + inplaceArray[i] ^= otherArray[i]; + + return inplaceArray; + } + + private boolean validateAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { + final Long now = NTP.getTime(); + if (now == null) + return false; + + byte[] rewardSharePublicKey = onlineAccountData.getPublicKey(); + long onlineAccountTimestamp = onlineAccountData.getTimestamp(); + + // Check timestamp is 'recent' here + if (Math.abs(onlineAccountTimestamp - now) > ONLINE_TIMESTAMP_MODULUS * 2) { + LOGGER.trace(() -> String.format("Rejecting online account %s with out of range timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + return false; + } + + // Verify + byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); + if (!Crypto.verify(rewardSharePublicKey, onlineAccountData.getSignature(), data)) { + LOGGER.trace(() -> String.format("Rejecting invalid online account %s", Base58.encode(rewardSharePublicKey))); + return false; + } + + // Qortal: check online account is actually reward-share + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(rewardSharePublicKey); + if (rewardShareData == null) { + // Reward-share doesn't even exist - probably not a good sign + LOGGER.trace(() -> String.format("Rejecting unknown online reward-share public key %s", Base58.encode(rewardSharePublicKey))); + return false; + } + + Account mintingAccount = new Account(repository, rewardShareData.getMinter()); + if (!mintingAccount.canMint()) { + // Minting-account component of reward-share can no longer mint - disregard + LOGGER.trace(() -> String.format("Rejecting online reward-share with non-minting account %s", mintingAccount.getAddress())); + return false; + } + + return true; + } + + private void addAccounts(Set onlineAccountsToAdd) { + // For keeping track of which hashes to rebuild + Map> hashesToRebuild = new HashMap<>(); + + for (OnlineAccountData onlineAccountData : onlineAccountsToAdd) { + boolean isNewEntry = this.addAccount(onlineAccountData); + + if (isNewEntry) + hashesToRebuild.computeIfAbsent(onlineAccountData.getTimestamp(), k -> new HashSet<>()).add(onlineAccountData.getPublicKey()[0]); + } + + for (var entry : hashesToRebuild.entrySet()) { + Long timestamp = entry.getKey(); + + LOGGER.debug(String.format("Rehashing for timestamp %d and leading bytes %s", + timestamp, + entry.getValue().stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", ")) + ) + ); + + for (Byte leadingByte : entry.getValue()) { + byte[] pubkeyHash = currentOnlineAccounts.get(timestamp).stream() + .map(OnlineAccountData::getPublicKey) + .filter(publicKey -> leadingByte == publicKey[0]) + .reduce(null, OnlineAccountsManager::xorByteArrayInPlace); + + currentOnlineAccountsHashes.computeIfAbsent(timestamp, k -> new ConcurrentSkipListMap<>(Byte::compareUnsigned)).put(leadingByte, pubkeyHash); + + LOGGER.trace(() -> String.format("Rebuilt hash %s for timestamp %d and leading byte %02x using %d public keys", + HashCode.fromBytes(pubkeyHash), + timestamp, + leadingByte, + currentOnlineAccounts.get(timestamp).stream() + .map(OnlineAccountData::getPublicKey) + .filter(publicKey -> leadingByte == publicKey[0]) + .count() + )); } } } - private void performOnlineAccountsTasks() { + private boolean addAccount(OnlineAccountData onlineAccountData) { + byte[] rewardSharePublicKey = onlineAccountData.getPublicKey(); + long onlineAccountTimestamp = onlineAccountData.getTimestamp(); + + Set onlineAccounts = this.currentOnlineAccounts.computeIfAbsent(onlineAccountTimestamp, k -> ConcurrentHashMap.newKeySet()); + boolean isNewEntry = onlineAccounts.add(onlineAccountData); + + if (isNewEntry) + LOGGER.trace(() -> String.format("Added online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + else + LOGGER.trace(() -> String.format("Not updating existing online account %s with timestamp %d", Base58.encode(rewardSharePublicKey), onlineAccountTimestamp)); + + return isNewEntry; + } + + /** + * Expire old entries. + */ + private void expireOldOnlineAccounts() { final Long now = NTP.getTime(); if (now == null) return; - // Expire old entries - final long cutoffThreshold = now - LAST_SEEN_EXPIRY_PERIOD; - synchronized (this.onlineAccounts) { - Iterator iterator = this.onlineAccounts.iterator(); - while (iterator.hasNext()) { - OnlineAccountData onlineAccountData = iterator.next(); - - if (onlineAccountData.getTimestamp() < cutoffThreshold) { - iterator.remove(); - - LOGGER.trace(() -> { - PublicKeyAccount otherAccount = new PublicKeyAccount(null, onlineAccountData.getPublicKey()); - return String.format("Removed expired online account %s with timestamp %d", otherAccount.getAddress(), onlineAccountData.getTimestamp()); - }); - } - } - } - - // Request data from other peers? - if ((this.onlineAccountsTasksTimestamp % ONLINE_ACCOUNTS_BROADCAST_INTERVAL) < ONLINE_ACCOUNTS_TASKS_INTERVAL) { - List safeOnlineAccounts; - synchronized (this.onlineAccounts) { - safeOnlineAccounts = new ArrayList<>(this.onlineAccounts); - } - - Message messageV1 = new GetOnlineAccountsMessage(safeOnlineAccounts); - Message messageV2 = new GetOnlineAccountsV2Message(safeOnlineAccounts); - - Network.getInstance().broadcast(peer -> - peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1 - ); - } + final long cutoffThreshold = now - MAX_CACHED_TIMESTAMP_SETS * ONLINE_TIMESTAMP_MODULUS; + this.currentOnlineAccounts.keySet().removeIf(timestamp -> timestamp < cutoffThreshold); + this.currentOnlineAccountsHashes.keySet().removeIf(timestamp -> timestamp < cutoffThreshold); } + /** + * Request data from other peers. (Pre-V3) + */ + private void requestLegacyRemoteOnlineAccounts() { + final Long now = NTP.getTime(); + if (now == null) + return; + + // Don't bother if we're not up to date + if (!Controller.getInstance().isUpToDate()) + return; + + List mergedOnlineAccounts = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + + Message messageV2 = new GetOnlineAccountsV2Message(mergedOnlineAccounts); + + Network.getInstance().broadcast(peer -> + peer.getPeersVersion() < ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV2 + : null + ); + } + + /** + * Request data from other peers. V3+ + */ + private void requestRemoteOnlineAccounts() { + final Long now = NTP.getTime(); + if (now == null) + return; + + // Don't bother if we're not up to date + if (!Controller.getInstance().isUpToDate()) + return; + + Message messageV3 = new GetOnlineAccountsV3Message(currentOnlineAccountsHashes); + + Network.getInstance().broadcast(peer -> + peer.getPeersVersion() >= ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV3 + : null + ); + } + + /** + * Send online accounts that are minting on this node. + */ private void sendOurOnlineAccountsInfo() { final Long now = NTP.getTime(); if (now == null) { @@ -302,13 +353,12 @@ public class OnlineAccountsManager extends Thread { try (final Repository repository = RepositoryManager.getRepository()) { mintingAccounts = repository.getAccountRepository().getMintingAccounts(); - // We have no accounts, but don't reset timestamp + // We have no accounts to send if (mintingAccounts.isEmpty()) return; - // Only reward-share accounts allowed + // Only active reward-shares allowed Iterator iterator = mintingAccounts.iterator(); - int i = 0; while (iterator.hasNext()) { MintingAccountData mintingAccountData = iterator.next(); @@ -325,11 +375,6 @@ public class OnlineAccountsManager extends Thread { iterator.remove(); continue; } - - if (++i > 1+1) { - iterator.remove(); - continue; - } } } catch (DataException e) { LOGGER.warn(String.format("Repository issue trying to fetch minting accounts: %s", e.getMessage())); @@ -343,7 +388,6 @@ public class OnlineAccountsManager extends Thread { byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); List ourOnlineAccounts = new ArrayList<>(); - MINTING_ACCOUNTS: for (MintingAccountData mintingAccountData : mintingAccounts) { PrivateKeyAccount mintingAccount = new PrivateKeyAccount(null, mintingAccountData.getPrivateKey()); @@ -352,28 +396,13 @@ public class OnlineAccountsManager extends Thread { // Our account is online OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); - synchronized (this.onlineAccounts) { - Iterator iterator = this.onlineAccounts.iterator(); - while (iterator.hasNext()) { - OnlineAccountData existingOnlineAccountData = iterator.next(); - if (Arrays.equals(existingOnlineAccountData.getPublicKey(), ourOnlineAccountData.getPublicKey())) { - // If our online account is already present, with same timestamp, then move on to next mintingAccount - if (existingOnlineAccountData.getTimestamp() == onlineAccountsTimestamp) - continue MINTING_ACCOUNTS; - - // If our online account is already present, but with older timestamp, then remove it - iterator.remove(); - break; - } - } - - this.onlineAccounts.add(ourOnlineAccountData); + boolean isNewEntry = addAccount(ourOnlineAccountData); + if (isNewEntry) { + LOGGER.trace(() -> String.format("Added our online account %s with timestamp %d", Base58.encode(mintingAccount.getPublicKey()), onlineAccountsTimestamp)); + ourOnlineAccounts.add(ourOnlineAccountData); + hasInfoChanged = true; } - - LOGGER.trace(() -> String.format("Added our online account %s with timestamp %d", mintingAccount.getAddress(), onlineAccountsTimestamp)); - ourOnlineAccounts.add(ourOnlineAccountData); - hasInfoChanged = true; } if (!hasInfoChanged) @@ -381,52 +410,81 @@ public class OnlineAccountsManager extends Thread { Message messageV1 = new OnlineAccountsMessage(ourOnlineAccounts); Message messageV2 = new OnlineAccountsV2Message(ourOnlineAccounts); + Message messageV3 = new OnlineAccountsV2Message(ourOnlineAccounts); // TODO: V3 message Network.getInstance().broadcast(peer -> - peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1 + peer.getPeersVersion() >= ONLINE_ACCOUNTS_V3_PEER_VERSION + ? messageV3 + : peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION + ? messageV2 + : messageV1 ); - LOGGER.trace(() -> String.format("Broadcasted %d online account%s with timestamp %d", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp)); + LOGGER.debug("Broadcasted {} online account{} with timestamp {}", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp); } - public static long toOnlineAccountTimestamp(long timestamp) { - return (timestamp / ONLINE_TIMESTAMP_MODULUS) * ONLINE_TIMESTAMP_MODULUS; + /** + * Returns list of online accounts matching given timestamp. + */ + // Block::mint() - only wants online accounts with timestamp that matches block's timestamp so they can be added to new block + // Block::areOnlineAccountsValid() - only wants online accounts with timestamp that matches block's timestamp to avoid re-verifying sigs + public List getOnlineAccounts(long onlineTimestamp) { + return new ArrayList<>(Set.copyOf(this.currentOnlineAccounts.getOrDefault(onlineTimestamp, Collections.emptySet()))); } - /** Returns list of online accounts with timestamp recent enough to be considered currently online. */ + /** + * Returns list of online accounts with timestamp recent enough to be considered currently online. + */ + // API: calls this to return list of online accounts - probably expects ALL timestamps - but going to get 'current' from now on + // BlockMinter: only calls this to check whether returned list is empty or not, to determine whether minting is even possible or not public List getOnlineAccounts() { - final long onlineTimestamp = toOnlineAccountTimestamp(NTP.getTime()); + final Long now = NTP.getTime(); + if (now == null) + return Collections.emptyList(); - synchronized (this.onlineAccounts) { - return this.onlineAccounts.stream().filter(account -> account.getTimestamp() == onlineTimestamp).collect(Collectors.toList()); - } + final long onlineTimestamp = toOnlineAccountTimestamp(now); + + return getOnlineAccounts(onlineTimestamp); } + /** + * Returns cached, unmodifiable list of latest block's online accounts. + */ + // TODO: this needs tidying up - do we change method to only return latest timestamp's set? + // Block::areOnlineAccountsValid() - only wants online accounts with timestamp that matches latest / previous block's timestamp to avoid re-verifying sigs + public List getLatestBlocksOnlineAccounts(long blockOnlineTimestamp) { + Set onlineAccounts = this.latestBlocksOnlineAccounts.getOrDefault(blockOnlineTimestamp, Collections.emptySet()); - /** Returns cached, unmodifiable list of latest block's online accounts. */ - public List getLatestBlocksOnlineAccounts() { - synchronized (this.latestBlocksOnlineAccounts) { - return this.latestBlocksOnlineAccounts.peekFirst(); - } + return List.copyOf(onlineAccounts); } - /** Caches list of latest block's online accounts. Typically called by Block.process() */ + /** + * Caches list of latest block's online accounts. Typically called by Block.process() + */ + // TODO: is this simply a bulk add, like the import queue but blocking? Used by Synchronizer but could be for blocks that are quite historic? + // Block::process() - basically for adding latest block's online accounts to cache to avoid re-verifying when processing another block in the future public void pushLatestBlocksOnlineAccounts(List latestBlocksOnlineAccounts) { - synchronized (this.latestBlocksOnlineAccounts) { - if (this.latestBlocksOnlineAccounts.size() == MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) - this.latestBlocksOnlineAccounts.pollLast(); + if (latestBlocksOnlineAccounts == null || latestBlocksOnlineAccounts.isEmpty()) + return; - this.latestBlocksOnlineAccounts.addFirst(latestBlocksOnlineAccounts == null - ? Collections.emptyList() - : Collections.unmodifiableList(latestBlocksOnlineAccounts)); - } + long timestamp = latestBlocksOnlineAccounts.get(0).getTimestamp(); + + this.latestBlocksOnlineAccounts.computeIfAbsent(timestamp, k -> ConcurrentHashMap.newKeySet()).addAll(latestBlocksOnlineAccounts); + + if (this.latestBlocksOnlineAccounts.size() > MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) + this.latestBlocksOnlineAccounts.keySet().stream() + .sorted() + .findFirst() + .ifPresent(this.latestBlocksOnlineAccounts::remove); } - /** Reverts list of latest block's online accounts. Typically called by Block.orphan() */ + /** + * Reverts list of latest block's online accounts. Typically called by Block.orphan() + */ + // TODO: see above + // Block::orphan() - for removing latest block's online accounts from cache public void popLatestBlocksOnlineAccounts() { - synchronized (this.latestBlocksOnlineAccounts) { - this.latestBlocksOnlineAccounts.pollFirst(); - } + // NO-OP } @@ -438,45 +496,48 @@ public class OnlineAccountsManager extends Thread { List excludeAccounts = getOnlineAccountsMessage.getOnlineAccounts(); // Send online accounts info, excluding entries with matching timestamp & public key from excludeAccounts - List accountsToSend; - synchronized (this.onlineAccounts) { - accountsToSend = new ArrayList<>(this.onlineAccounts); - } + List accountsToSend = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + int prefilterSize = accountsToSend.size(); Iterator iterator = accountsToSend.iterator(); - - SEND_ITERATOR: while (iterator.hasNext()) { OnlineAccountData onlineAccountData = iterator.next(); - for (int i = 0; i < excludeAccounts.size(); ++i) { - OnlineAccountData excludeAccountData = excludeAccounts.get(i); - + for (OnlineAccountData excludeAccountData : excludeAccounts) { if (onlineAccountData.getTimestamp() == excludeAccountData.getTimestamp() && Arrays.equals(onlineAccountData.getPublicKey(), excludeAccountData.getPublicKey())) { iterator.remove(); - continue SEND_ITERATOR; + break; } } } + if (accountsToSend.isEmpty()) + return; + Message onlineAccountsMessage = new OnlineAccountsMessage(accountsToSend); peer.sendMessage(onlineAccountsMessage); - LOGGER.trace(() -> String.format("Sent %d of our %d online accounts to %s", accountsToSend.size(), this.onlineAccounts.size(), peer)); + LOGGER.debug("Sent {} of our {} online accounts to {}", accountsToSend.size(), prefilterSize, peer); } public void onNetworkOnlineAccountsMessage(Peer peer, Message message) { OnlineAccountsMessage onlineAccountsMessage = (OnlineAccountsMessage) message; List peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts(); - LOGGER.trace(() -> String.format("Received %d online accounts from %s", peersOnlineAccounts.size(), peer)); + LOGGER.debug("Received {} online accounts from {}", peersOnlineAccounts.size(), peer); - try (final Repository repository = RepositoryManager.getRepository()) { - for (OnlineAccountData onlineAccountData : peersOnlineAccounts) - this.verifyAndAddAccount(repository, onlineAccountData); - } catch (DataException e) { - LOGGER.error(String.format("Repository issue while verifying online accounts from peer %s", peer), e); + int importCount = 0; + + // Add any online accounts to the queue that aren't already present + for (OnlineAccountData onlineAccountData : peersOnlineAccounts) { + boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData); + + if (isNewEntry) + importCount++; } + + if (importCount > 0) + LOGGER.debug("Added {} online accounts to queue", importCount); } public void onNetworkGetOnlineAccountsV2Message(Peer peer, Message message) { @@ -485,58 +546,106 @@ public class OnlineAccountsManager extends Thread { List excludeAccounts = getOnlineAccountsMessage.getOnlineAccounts(); // Send online accounts info, excluding entries with matching timestamp & public key from excludeAccounts - List accountsToSend; - synchronized (this.onlineAccounts) { - accountsToSend = new ArrayList<>(this.onlineAccounts); - } + List accountsToSend = Set.copyOf(this.currentOnlineAccounts.values()).stream().flatMap(Set::stream).collect(Collectors.toList()); + int prefilterSize = accountsToSend.size(); Iterator iterator = accountsToSend.iterator(); - - SEND_ITERATOR: while (iterator.hasNext()) { OnlineAccountData onlineAccountData = iterator.next(); - for (int i = 0; i < excludeAccounts.size(); ++i) { - OnlineAccountData excludeAccountData = excludeAccounts.get(i); - + for (OnlineAccountData excludeAccountData : excludeAccounts) { if (onlineAccountData.getTimestamp() == excludeAccountData.getTimestamp() && Arrays.equals(onlineAccountData.getPublicKey(), excludeAccountData.getPublicKey())) { iterator.remove(); - continue SEND_ITERATOR; + break; } } } + if (accountsToSend.isEmpty()) + return; + Message onlineAccountsMessage = new OnlineAccountsV2Message(accountsToSend); peer.sendMessage(onlineAccountsMessage); - LOGGER.trace(() -> String.format("Sent %d of our %d online accounts to %s", accountsToSend.size(), this.onlineAccounts.size(), peer)); + LOGGER.debug("Sent {} of our {} online accounts to {}", accountsToSend.size(), prefilterSize, peer); } public void onNetworkOnlineAccountsV2Message(Peer peer, Message message) { OnlineAccountsV2Message onlineAccountsMessage = (OnlineAccountsV2Message) message; List peersOnlineAccounts = onlineAccountsMessage.getOnlineAccounts(); - LOGGER.debug(String.format("Received %d online accounts from %s", peersOnlineAccounts.size(), peer)); + LOGGER.debug("Received {} online accounts from {}", peersOnlineAccounts.size(), peer); int importCount = 0; // Add any online accounts to the queue that aren't already present for (OnlineAccountData onlineAccountData : peersOnlineAccounts) { + boolean isNewEntry = onlineAccountsImportQueue.add(onlineAccountData); - // Do we already know about this online account data? - if (onlineAccounts.contains(onlineAccountData)) { - continue; - } - - // Is it already in the import queue? - if (onlineAccountsImportQueue.contains(onlineAccountData)) { - continue; - } - - onlineAccountsImportQueue.add(onlineAccountData); - importCount++; + if (isNewEntry) + importCount++; } - LOGGER.debug(String.format("Added %d online accounts to queue", importCount)); + if (importCount > 0) + LOGGER.debug("Added {} online accounts to queue", importCount); + } + + public void onNetworkGetOnlineAccountsV3Message(Peer peer, Message message) { + GetOnlineAccountsV3Message getOnlineAccountsMessage = (GetOnlineAccountsV3Message) message; + + Map> peersHashes = getOnlineAccountsMessage.getHashesByTimestampThenByte(); + List outgoingOnlineAccounts = new ArrayList<>(); + + // Warning: no double-checking/fetching - we must be ConcurrentMap compatible! + // So no contains()-then-get() or multiple get()s on the same key/map. + // We also use getOrDefault() with emptySet() on currentOnlineAccounts in case corresponding timestamp entry isn't there. + for (var ourOuterMapEntry : currentOnlineAccountsHashes.entrySet()) { + Long timestamp = ourOuterMapEntry.getKey(); + + var ourInnerMap = ourOuterMapEntry.getValue(); + var peersInnerMap = peersHashes.get(timestamp); + + if (peersInnerMap == null) { + // Peer doesn't have this timestamp, so if it's valid (i.e. not too old) then we'd have to send all of ours + Set timestampsOnlineAccounts = this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet()); + outgoingOnlineAccounts.addAll(timestampsOnlineAccounts); + + LOGGER.debug(() -> String.format("Going to send all %d online accounts for timestamp %d", timestampsOnlineAccounts.size(), timestamp)); + } else { + // Quick cache of which leading bytes to send so we only have to filter once + Set outgoingLeadingBytes = new HashSet<>(); + + // We have entries for this timestamp so compare against peer's entries + for (var ourInnerMapEntry : ourInnerMap.entrySet()) { + Byte leadingByte = ourInnerMapEntry.getKey(); + byte[] peersHash = peersInnerMap.get(leadingByte); + + if (!Arrays.equals(ourInnerMapEntry.getValue(), peersHash)) { + // For this leading byte: hashes don't match or peer doesn't have entry + // Send all online accounts for this timestamp and leading byte + outgoingLeadingBytes.add(leadingByte); + } + } + + int beforeAddSize = outgoingOnlineAccounts.size(); + + this.currentOnlineAccounts.getOrDefault(timestamp, Collections.emptySet()).stream() + .filter(account -> outgoingLeadingBytes.contains(account.getPublicKey()[0])) + .forEach(outgoingOnlineAccounts::add); + + if (outgoingLeadingBytes.size() > beforeAddSize) + LOGGER.debug(String.format("Going to send %d online accounts for timestamp %d and leading bytes %s", + outgoingOnlineAccounts.size() - beforeAddSize, + timestamp, + outgoingLeadingBytes.stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", ")) + ) + ); + } + } + + Message onlineAccountsMessage = new OnlineAccountsV2Message(outgoingOnlineAccounts); // TODO: V3 message + peer.sendMessage(onlineAccountsMessage); + + LOGGER.debug("Sent {} online accounts to {}", outgoingOnlineAccounts.size(), peer); } } diff --git a/src/main/java/org/qortal/data/network/OnlineAccountData.java b/src/main/java/org/qortal/data/network/OnlineAccountData.java index 15792307..99c181ba 100644 --- a/src/main/java/org/qortal/data/network/OnlineAccountData.java +++ b/src/main/java/org/qortal/data/network/OnlineAccountData.java @@ -5,6 +5,7 @@ import java.util.Arrays; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlTransient; import org.qortal.account.PublicKeyAccount; @@ -16,6 +17,9 @@ public class OnlineAccountData { protected byte[] signature; protected byte[] publicKey; + @XmlTransient + private int hash; + // Constructors // necessary for JAXB serialization @@ -74,8 +78,13 @@ public class OnlineAccountData { @Override public int hashCode() { - // Pretty lazy implementation - return (int) this.timestamp; + int h = this.hash; + if (h == 0) { + this.hash = h = Long.hashCode(this.timestamp) + ^ Arrays.hashCode(this.publicKey) + ^ Arrays.hashCode(this.signature); + } + return h; } } diff --git a/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java b/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java index 3394213b..6136c1e1 100644 --- a/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java +++ b/src/test/java/org/qortal/test/network/OnlineAccountsV3Tests.java @@ -1,17 +1,14 @@ package org.qortal.test.network; -import com.google.common.primitives.Ints; -import com.google.common.primitives.Longs; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; import org.junit.Ignore; import org.junit.Test; +import org.qortal.controller.OnlineAccountsManager; import org.qortal.data.network.OnlineAccountData; import org.qortal.network.message.*; import org.qortal.transform.Transformer; -import java.io.ByteArrayOutputStream; -import java.io.IOException; import java.nio.ByteBuffer; import java.security.Security; import java.util.*; @@ -74,24 +71,12 @@ public class OnlineAccountsV3Tests { hashesByTimestampThenByte .computeIfAbsent(timestamp, k -> new HashMap<>()) - .compute(leadingByte, (k, v) -> xorByteArrayInPlace(v, onlineAccountData.getPublicKey())); + .compute(leadingByte, (k, v) -> OnlineAccountsManager.xorByteArrayInPlace(v, onlineAccountData.getPublicKey())); } return hashesByTimestampThenByte; } - // TODO: This needs to be moved - probably to be OnlineAccountsManager - private static byte[] xorByteArrayInPlace(byte[] inplaceArray, byte[] otherArray) { - if (inplaceArray == null) - return Arrays.copyOf(otherArray, otherArray.length); - - // Start from index 1 to enforce static leading byte - for (int i = 1; i < otherArray.length; i++) - inplaceArray[i] ^= otherArray[otherArray.length - i - 1]; - - return inplaceArray; - } - @Test public void testOnGetOnlineAccountsV3() { List ourOnlineAccounts = generateOnlineAccounts(false); From 712c4463f79234051f00566ba575c7b3646982fc Mon Sep 17 00:00:00 2001 From: catbref Date: Sun, 1 May 2022 14:41:22 +0100 Subject: [PATCH 04/18] OnlineAccountsV3: Move online account cache code from Block into OnlineAccountsManager, simplifying Block code and removing duplicated caches from Block also. This tidies up those remaining set-based getters in OnlineAccountsManager. No need for currentOnlineAccountsHashes's inner Map to be sorted so addAccounts() creates new ConcurentHashMap insteaad of ConcurrentSkipListMap. Changed GetOnlineAccountsV3Message to use a single byte for count of hashes as it can only be 1 to 256. 256 is represented by 0. Comments tidy-up. Change v3 broadcast interval from 10s to 15s. --- src/main/java/org/qortal/block/Block.java | 48 +++------- .../controller/OnlineAccountsManager.java | 92 ++++++++++--------- .../message/GetOnlineAccountsV3Message.java | 36 ++++---- 3 files changed, 83 insertions(+), 93 deletions(-) diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index a0cba9bb..9938db42 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -221,11 +221,10 @@ public class Block { return accountAmount; } } + /** Always use getExpandedAccounts() to access this, as it's lazy-instantiated. */ private List cachedExpandedAccounts = null; - /** Opportunistic cache of this block's valid online accounts. Only created by call to isValid(). */ - private List cachedValidOnlineAccounts = null; /** Opportunistic cache of this block's valid online reward-shares. Only created by call to isValid(). */ private List cachedOnlineRewardShares = null; @@ -1020,42 +1019,31 @@ public class Block { long onlineTimestamp = this.blockData.getOnlineAccountsTimestamp(); byte[] onlineTimestampBytes = Longs.toByteArray(onlineTimestamp); - // If this block is much older than current online timestamp, then there's no point checking current online accounts - List currentOnlineAccounts = onlineTimestamp < NTP.getTime() - OnlineAccountsManager.ONLINE_TIMESTAMP_MODULUS - ? null - : OnlineAccountsManager.getInstance().getOnlineAccounts(onlineTimestamp); - List latestBlocksOnlineAccounts = OnlineAccountsManager.getInstance().getLatestBlocksOnlineAccounts(onlineTimestamp); - // Extract online accounts' timestamp signatures from block data List onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(this.blockData.getOnlineAccountsSignatures()); - // We'll build up a list of online accounts to hand over to Controller if block is added to chain - // and this will become latestBlocksOnlineAccounts (above) to reduce CPU load when we process next block... - List ourOnlineAccounts = new ArrayList<>(); - + // Convert + Set onlineAccounts = new HashSet<>(); for (int i = 0; i < onlineAccountsSignatures.size(); ++i) { byte[] signature = onlineAccountsSignatures.get(i); byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey(); OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey); - ourOnlineAccounts.add(onlineAccountData); - - // If signature is still current then no need to perform Ed25519 verify - if (currentOnlineAccounts != null && currentOnlineAccounts.remove(onlineAccountData)) - // remove() returned true, so online account still current - // and one less entry in currentOnlineAccounts to check next time - continue; - - // If signature was okay in latest block then no need to perform Ed25519 verify - if (latestBlocksOnlineAccounts != null && latestBlocksOnlineAccounts.contains(onlineAccountData)) - continue; - - if (!Crypto.verify(publicKey, signature, onlineTimestampBytes)) - return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT; + onlineAccounts.add(onlineAccountData); } + // Remove those already validated & cached by online accounts manager - no need to re-validate them + OnlineAccountsManager.getInstance().removeKnown(onlineAccounts, onlineTimestamp); + + // Validate the rest + for (OnlineAccountData onlineAccount : onlineAccounts) + if (!Crypto.verify(onlineAccount.getPublicKey(), onlineAccount.getSignature(), onlineTimestampBytes)) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT; + + // We've validated these, so allow online accounts manager to cache + OnlineAccountsManager.getInstance().addBlocksOnlineAccounts(onlineAccounts, onlineTimestamp); + // All online accounts valid, so save our list of online accounts for potential later use - this.cachedValidOnlineAccounts = ourOnlineAccounts; this.cachedOnlineRewardShares = onlineRewardShares; return ValidationResult.OK; @@ -1426,9 +1414,6 @@ public class Block { postBlockTidy(); - // Give Controller our cached, valid online accounts data (if any) to help reduce CPU load for next block - OnlineAccountsManager.getInstance().pushLatestBlocksOnlineAccounts(this.cachedValidOnlineAccounts); - // Log some debugging info relating to the block weight calculation this.logDebugInfo(); } @@ -1644,9 +1629,6 @@ public class Block { this.blockData.setHeight(null); postBlockTidy(); - - // Remove any cached, valid online accounts data from Controller - OnlineAccountsManager.getInstance().popLatestBlocksOnlineAccounts(); } protected void orphanTransactionsFromBlock() throws DataException { diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index 4e8b3c77..53fdef8b 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -6,6 +6,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.qortal.account.Account; import org.qortal.account.PrivateKeyAccount; +import org.qortal.block.Block; import org.qortal.block.BlockChain; import org.qortal.crypto.Crypto; import org.qortal.data.account.MintingAccountData; @@ -49,7 +50,7 @@ public class OnlineAccountsManager { private static final long ONLINE_ACCOUNTS_QUEUE_INTERVAL = 100L; //ms private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms private static final long ONLINE_ACCOUNTS_LEGACY_BROADCAST_INTERVAL = 60 * 1000L; // ms - private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 10 * 1000L; // ms + private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 15 * 1000L; // ms private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; // v3.2.0 private static final long ONLINE_ACCOUNTS_V3_PEER_VERSION = 0x03000200cbL; // v3.2.203 @@ -65,17 +66,14 @@ public class OnlineAccountsManager { private final Map> currentOnlineAccounts = new ConcurrentHashMap<>(); /** * Cache of hash-summary of 'current' online accounts, keyed by timestamp, then leading byte of public key. - *

- * Inner map is also sorted using {@code Byte::compareUnsigned} as a comparator. - * This is critical for proper function of GET_ONLINE_ACCOUNTS_V3 protocol. */ private final Map> currentOnlineAccountsHashes = new ConcurrentHashMap<>(); /** * Cache of online accounts for latest blocks - not necessarily 'current' / now. - * Probably only accessed / modified by a single Synchronizer thread. + * Probably only accessed / modified by a single Synchronizer thread. */ - private final Map> latestBlocksOnlineAccounts = new ConcurrentHashMap<>(); + private final SortedMap> latestBlocksOnlineAccounts = new ConcurrentSkipListMap<>(); public static long toOnlineAccountTimestamp(long timestamp) { return (timestamp / ONLINE_TIMESTAMP_MODULUS) * ONLINE_TIMESTAMP_MODULUS; @@ -157,7 +155,7 @@ public class OnlineAccountsManager { if (isStopping) return; - boolean isValid = this.validateAccount(repository, onlineAccountData); + boolean isValid = this.isValidCurrentAccount(repository, onlineAccountData); if (isValid) onlineAccountsToAdd.add(onlineAccountData); @@ -170,7 +168,7 @@ public class OnlineAccountsManager { LOGGER.error("Repository issue while verifying online accounts", e); } - LOGGER.debug("Adding {} validated online accounts from import queue", onlineAccountsToAdd.size()); + LOGGER.debug("Merging {} validated online accounts from import queue", onlineAccountsToAdd.size()); addAccounts(onlineAccountsToAdd); } @@ -182,13 +180,12 @@ public class OnlineAccountsManager { // Start from index 1 to enforce static leading byte for (int i = 1; i < otherArray.length; i++) - // inplaceArray[i] ^= otherArray[otherArray.length - i - 1]; inplaceArray[i] ^= otherArray[i]; return inplaceArray; } - private boolean validateAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { + private static boolean isValidCurrentAccount(Repository repository, OnlineAccountData onlineAccountData) throws DataException { final Long now = NTP.getTime(); if (now == null) return false; @@ -202,7 +199,7 @@ public class OnlineAccountsManager { return false; } - // Verify + // Verify signature byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); if (!Crypto.verify(rewardSharePublicKey, onlineAccountData.getSignature(), data)) { LOGGER.trace(() -> String.format("Rejecting invalid online account %s", Base58.encode(rewardSharePublicKey))); @@ -241,7 +238,7 @@ public class OnlineAccountsManager { for (var entry : hashesToRebuild.entrySet()) { Long timestamp = entry.getKey(); - LOGGER.debug(String.format("Rehashing for timestamp %d and leading bytes %s", + LOGGER.debug(() -> String.format("Rehashing for timestamp %d and leading bytes %s", timestamp, entry.getValue().stream().sorted(Byte::compareUnsigned).map(leadingByte -> String.format("%02x", leadingByte)).collect(Collectors.joining(", ")) ) @@ -253,7 +250,7 @@ public class OnlineAccountsManager { .filter(publicKey -> leadingByte == publicKey[0]) .reduce(null, OnlineAccountsManager::xorByteArrayInPlace); - currentOnlineAccountsHashes.computeIfAbsent(timestamp, k -> new ConcurrentSkipListMap<>(Byte::compareUnsigned)).put(leadingByte, pubkeyHash); + currentOnlineAccountsHashes.computeIfAbsent(timestamp, k -> new ConcurrentHashMap<>()).put(leadingByte, pubkeyHash); LOGGER.trace(() -> String.format("Rebuilt hash %s for timestamp %d and leading byte %02x using %d public keys", HashCode.fromBytes(pubkeyHash), @@ -447,44 +444,53 @@ public class OnlineAccountsManager { return getOnlineAccounts(onlineTimestamp); } - /** - * Returns cached, unmodifiable list of latest block's online accounts. - */ - // TODO: this needs tidying up - do we change method to only return latest timestamp's set? - // Block::areOnlineAccountsValid() - only wants online accounts with timestamp that matches latest / previous block's timestamp to avoid re-verifying sigs - public List getLatestBlocksOnlineAccounts(long blockOnlineTimestamp) { - Set onlineAccounts = this.latestBlocksOnlineAccounts.getOrDefault(blockOnlineTimestamp, Collections.emptySet()); + // Block processing - return List.copyOf(onlineAccounts); + /** + * Removes previously validated entries from block's online accounts. + *

+ * Checks both 'current' and block caches. + *

+ * Typically called by {@link Block#areOnlineAccountsValid()} + */ + public void removeKnown(Set blocksOnlineAccounts, Long timestamp) { + Set onlineAccounts = this.currentOnlineAccounts.get(timestamp); + + // If not 'current' timestamp - try block cache instead + if (onlineAccounts == null) + onlineAccounts = this.latestBlocksOnlineAccounts.get(timestamp); + + if (onlineAccounts != null) + blocksOnlineAccounts.removeAll(onlineAccounts); } /** - * Caches list of latest block's online accounts. Typically called by Block.process() + * Adds block's online accounts to one of OnlineAccountManager's caches. + *

+ * It is assumed that the online accounts have been verified. + *

+ * Typically called by {@link Block#areOnlineAccountsValid()} */ - // TODO: is this simply a bulk add, like the import queue but blocking? Used by Synchronizer but could be for blocks that are quite historic? - // Block::process() - basically for adding latest block's online accounts to cache to avoid re-verifying when processing another block in the future - public void pushLatestBlocksOnlineAccounts(List latestBlocksOnlineAccounts) { - if (latestBlocksOnlineAccounts == null || latestBlocksOnlineAccounts.isEmpty()) + public void addBlocksOnlineAccounts(Set blocksOnlineAccounts, Long timestamp) { + // We want to add to 'current' in preference if possible + if (this.currentOnlineAccounts.containsKey(timestamp)) { + addAccounts(blocksOnlineAccounts); return; + } - long timestamp = latestBlocksOnlineAccounts.get(0).getTimestamp(); + // Add to block cache instead + this.latestBlocksOnlineAccounts.computeIfAbsent(timestamp, k -> ConcurrentHashMap.newKeySet()) + .addAll(blocksOnlineAccounts); - this.latestBlocksOnlineAccounts.computeIfAbsent(timestamp, k -> ConcurrentHashMap.newKeySet()).addAll(latestBlocksOnlineAccounts); - - if (this.latestBlocksOnlineAccounts.size() > MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) - this.latestBlocksOnlineAccounts.keySet().stream() - .sorted() - .findFirst() - .ifPresent(this.latestBlocksOnlineAccounts::remove); - } - - /** - * Reverts list of latest block's online accounts. Typically called by Block.orphan() - */ - // TODO: see above - // Block::orphan() - for removing latest block's online accounts from cache - public void popLatestBlocksOnlineAccounts() { - // NO-OP + // If block cache has grown too large then we need to trim. + if (this.latestBlocksOnlineAccounts.size() > MAX_BLOCKS_CACHED_ONLINE_ACCOUNTS) { + // However, be careful to trim the opposite end to the entry we just added! + Long firstKey = this.latestBlocksOnlineAccounts.firstKey(); + if (!firstKey.equals(timestamp)) + this.latestBlocksOnlineAccounts.remove(firstKey); + else + this.latestBlocksOnlineAccounts.remove(this.latestBlocksOnlineAccounts.lastKey()); + } } diff --git a/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java b/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java index 02fed2a9..66c7c47a 100644 --- a/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java +++ b/src/main/java/org/qortal/network/message/GetOnlineAccountsV3Message.java @@ -1,6 +1,5 @@ package org.qortal.network.message; -import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import org.qortal.transform.Transformer; @@ -11,11 +10,15 @@ import java.util.*; /** * For requesting online accounts info from remote peer, given our list of online accounts. - * - * Different format to V1 and V2: - * V1 is: number of entries, then timestamp + pubkey for each entry - * V2 is: groups of: number of entries, timestamp, then pubkey for each entry - * V3 is: groups of: timestamp, number of entries (one per leading byte), then hash(pubkeys) for each entry + *

+ * Different format to V1 and V2:
+ *
    + *
  • V1 is: number of entries, then timestamp + pubkey for each entry
  • + *
  • V2 is: groups of: number of entries, timestamp, then pubkey for each entry
  • + *
  • V3 is: groups of: timestamp, number of entries (one per leading byte), then hash(pubkeys) for each entry
  • + *
+ *

+ * End */ public class GetOnlineAccountsV3Message extends Message { @@ -32,8 +35,7 @@ public class GetOnlineAccountsV3Message extends Message { } // We should know exactly how many bytes to allocate now - int byteSize = hashesByTimestampThenByte.size() * (Transformer.TIMESTAMP_LENGTH + Transformer.INT_LENGTH) - + Transformer.TIMESTAMP_LENGTH /* trailing zero entry indicates end of entries */; + int byteSize = hashesByTimestampThenByte.size() * (Transformer.TIMESTAMP_LENGTH + Transformer.BYTE_LENGTH); byteSize += hashesByTimestampThenByte.values() .stream() @@ -50,15 +52,13 @@ public class GetOnlineAccountsV3Message extends Message { var innerMap = outerMapEntry.getValue(); - bytes.write(Ints.toByteArray(innerMap.size())); + // Number of entries: 1 - 256, where 256 is represented by 0 + bytes.write(innerMap.size() & 0xFF); for (byte[] hashBytes : innerMap.values()) { bytes.write(hashBytes); } } - - // end of records - bytes.write(Longs.toByteArray(0L)); } catch (IOException e) { throw new AssertionError("IOException shouldn't occur with ByteArrayOutputStream"); } @@ -85,13 +85,15 @@ public class GetOnlineAccountsV3Message extends Message { Map> hashesByTimestampThenByte = new HashMap<>(); - while (true) { + while (bytes.hasRemaining()) { long timestamp = bytes.getLong(); - if (timestamp == 0) - // Zero timestamp indicates end of records - break; - int hashCount = bytes.getInt(); + int hashCount = bytes.get(); + if (hashCount <= 0) + // 256 is represented by 0. + // Also converts negative signed value (e.g. -1) to proper positive unsigned value (255) + hashCount += 256; + Map hashesByByte = new HashMap<>(); for (int i = 0; i < hashCount; ++i) { From ae92a6eed4d0dbd1ea3fbe90e59b544abaf86568 Mon Sep 17 00:00:00 2001 From: catbref Date: Mon, 2 May 2022 15:38:15 +0100 Subject: [PATCH 05/18] OnlineAccountsV3: slightly rework Block.mint() so it doesn't need to filter so many online accounts Slight optimization to BlockMinter by adding OnlineAccountsManager.hasOnlineAccounts():boolean instead of returning actual data, only to call isEmpty()! --- src/main/java/org/qortal/block/Block.java | 35 +++++++------------ .../org/qortal/controller/BlockMinter.java | 4 +-- .../controller/OnlineAccountsManager.java | 15 +++++++- 3 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index 9938db42..991c7601 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -346,18 +346,22 @@ public class Block { int version = parentBlock.getNextBlockVersion(); byte[] reference = parentBlockData.getSignature(); - // Fetch our list of online accounts - List onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(); - if (onlineAccounts.isEmpty()) { - LOGGER.error("No online accounts - not even our own?"); + // Qortal: minter is always a reward-share, so find actual minter and get their effective minting level + int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey()); + if (minterLevel == 0) { + LOGGER.error("Minter effective level returned zero?"); return null; } - // Find newest online accounts timestamp - long onlineAccountsTimestamp = 0; - for (OnlineAccountData onlineAccountData : onlineAccounts) { - if (onlineAccountData.getTimestamp() > onlineAccountsTimestamp) - onlineAccountsTimestamp = onlineAccountData.getTimestamp(); + long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel); + + long onlineAccountsTimestamp = OnlineAccountsManager.toOnlineAccountTimestamp(timestamp); + + // Fetch our list of online accounts + List onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(onlineAccountsTimestamp); + if (onlineAccounts.isEmpty()) { + LOGGER.error("No online accounts - not even our own?"); + return null; } // Load sorted list of reward share public keys into memory, so that the indexes can be obtained. @@ -368,10 +372,6 @@ public class Block { // Map using index into sorted list of reward-shares as key Map indexedOnlineAccounts = new HashMap<>(); for (OnlineAccountData onlineAccountData : onlineAccounts) { - // Disregard online accounts with different timestamps - if (onlineAccountData.getTimestamp() != onlineAccountsTimestamp) - continue; - Integer accountIndex = getRewardShareIndex(onlineAccountData.getPublicKey(), allRewardSharePublicKeys); if (accountIndex == null) // Online account (reward-share) with current timestamp but reward-share cancelled @@ -399,15 +399,6 @@ public class Block { byte[] minterSignature = minter.sign(BlockTransformer.getBytesForMinterSignature(parentBlockData, minter.getPublicKey(), encodedOnlineAccounts)); - // Qortal: minter is always a reward-share, so find actual minter and get their effective minting level - int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, minter.getPublicKey()); - if (minterLevel == 0) { - LOGGER.error("Minter effective level returned zero?"); - return null; - } - - long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel); - int transactionCount = 0; byte[] transactionsSignature = null; int height = parentBlockData.getHeight() + 1; diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java index 9966d6a9..76b57c44 100644 --- a/src/main/java/org/qortal/controller/BlockMinter.java +++ b/src/main/java/org/qortal/controller/BlockMinter.java @@ -114,8 +114,8 @@ public class BlockMinter extends Thread { if (minLatestBlockTimestamp == null) continue; - // No online accounts? (e.g. during startup) - if (OnlineAccountsManager.getInstance().getOnlineAccounts().isEmpty()) + // No online accounts for current timestamp? (e.g. during startup) + if (!OnlineAccountsManager.getInstance().hasOnlineAccounts()) continue; List mintingAccountsData = repository.getAccountRepository().getMintingAccounts(); diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index 53fdef8b..f9e479cf 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -420,6 +420,20 @@ public class OnlineAccountsManager { LOGGER.debug("Broadcasted {} online account{} with timestamp {}", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp); } + /** + * Returns whether online accounts manager has any online accounts with timestamp recent enough to be considered currently online. + */ + // BlockMinter: only calls this to check whether returned list is empty or not, to determine whether minting is even possible or not + public boolean hasOnlineAccounts() { + final Long now = NTP.getTime(); + if (now == null) + return false; + + final long onlineTimestamp = toOnlineAccountTimestamp(now); + + return this.currentOnlineAccounts.containsKey(onlineTimestamp); + } + /** * Returns list of online accounts matching given timestamp. */ @@ -433,7 +447,6 @@ public class OnlineAccountsManager { * Returns list of online accounts with timestamp recent enough to be considered currently online. */ // API: calls this to return list of online accounts - probably expects ALL timestamps - but going to get 'current' from now on - // BlockMinter: only calls this to check whether returned list is empty or not, to determine whether minting is even possible or not public List getOnlineAccounts() { final Long now = NTP.getTime(); if (now == null) From c032b92d0da458f3fbd7d405761812afac1144ca Mon Sep 17 00:00:00 2001 From: catbref Date: Sat, 14 May 2022 17:44:29 +0100 Subject: [PATCH 06/18] Logging fix: size() was called on wrong collection, leading to confusing logging output --- src/main/java/org/qortal/controller/OnlineAccountsManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index f9e479cf..d5b80f5e 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -652,7 +652,7 @@ public class OnlineAccountsManager { .filter(account -> outgoingLeadingBytes.contains(account.getPublicKey()[0])) .forEach(outgoingOnlineAccounts::add); - if (outgoingLeadingBytes.size() > beforeAddSize) + if (outgoingOnlineAccounts.size() > beforeAddSize) LOGGER.debug(String.format("Going to send %d online accounts for timestamp %d and leading bytes %s", outgoingOnlineAccounts.size() - beforeAddSize, timestamp, From d9b330b46a8b7b98afcec97a3e21cf6d0dc06c1c Mon Sep 17 00:00:00 2001 From: catbref Date: Tue, 17 May 2022 21:59:55 +0100 Subject: [PATCH 07/18] OnlineAccountData no longer uses signature in equals() or hashCode() because newer aggregate signatures use random nonces and OAD class doesn't care about / verify sigs --- .../org/qortal/data/network/OnlineAccountData.java | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/main/java/org/qortal/data/network/OnlineAccountData.java b/src/main/java/org/qortal/data/network/OnlineAccountData.java index 99c181ba..28c454b5 100644 --- a/src/main/java/org/qortal/data/network/OnlineAccountData.java +++ b/src/main/java/org/qortal/data/network/OnlineAccountData.java @@ -66,13 +66,11 @@ public class OnlineAccountData { if (otherOnlineAccountData.timestamp != this.timestamp) return false; - // Signature more likely to be unique than public key - if (!Arrays.equals(otherOnlineAccountData.signature, this.signature)) - return false; - if (!Arrays.equals(otherOnlineAccountData.publicKey, this.publicKey)) return false; + // We don't compare signature because it's not our remit to verify and newer aggregate signatures use random nonces + return true; } @@ -81,8 +79,8 @@ public class OnlineAccountData { int h = this.hash; if (h == 0) { this.hash = h = Long.hashCode(this.timestamp) - ^ Arrays.hashCode(this.publicKey) - ^ Arrays.hashCode(this.signature); + ^ Arrays.hashCode(this.publicKey); + // We don't use signature because newer aggregate signatures use random nonces } return h; } From 829ab1eb37852d52c0ca0899203253f85710b9ce Mon Sep 17 00:00:00 2001 From: catbref Date: Sat, 4 Jun 2022 12:28:24 +0100 Subject: [PATCH 08/18] Cherry-pick minor fixes from another branch to resolve "No online accounts - not even our own?" issues --- src/main/java/org/qortal/block/Block.java | 2 +- .../controller/OnlineAccountsManager.java | 71 +++++++++---------- 2 files changed, 35 insertions(+), 38 deletions(-) diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index 991c7601..41700714 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -355,7 +355,7 @@ public class Block { long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel); - long onlineAccountsTimestamp = OnlineAccountsManager.toOnlineAccountTimestamp(timestamp); + long onlineAccountsTimestamp = OnlineAccountsManager.getCurrentOnlineAccountTimestamp(); // Fetch our list of online accounts List onlineAccounts = OnlineAccountsManager.getInstance().getOnlineAccounts(onlineAccountsTimestamp); diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index d5b80f5e..bd4880c4 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -53,7 +53,7 @@ public class OnlineAccountsManager { private static final long ONLINE_ACCOUNTS_BROADCAST_INTERVAL = 15 * 1000L; // ms private static final long ONLINE_ACCOUNTS_V2_PEER_VERSION = 0x0300020000L; // v3.2.0 - private static final long ONLINE_ACCOUNTS_V3_PEER_VERSION = 0x03000200cbL; // v3.2.203 + private static final long ONLINE_ACCOUNTS_V3_PEER_VERSION = 0x03000300cbL; // v3.3.203 private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("OnlineAccounts")); private volatile boolean isStopping = false; @@ -75,8 +75,12 @@ public class OnlineAccountsManager { */ private final SortedMap> latestBlocksOnlineAccounts = new ConcurrentSkipListMap<>(); - public static long toOnlineAccountTimestamp(long timestamp) { - return (timestamp / ONLINE_TIMESTAMP_MODULUS) * ONLINE_TIMESTAMP_MODULUS; + public static Long getCurrentOnlineAccountTimestamp() { + Long now = NTP.getTime(); + if (now == null) + return null; + + return (now / ONLINE_TIMESTAMP_MODULUS) * ONLINE_TIMESTAMP_MODULUS; } private OnlineAccountsManager() { @@ -118,11 +122,10 @@ public class OnlineAccountsManager { return; } - final Long now = NTP.getTime(); - if (now == null) + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) return; - final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now); byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); Set replacementAccounts = new HashSet<>(); @@ -162,14 +165,14 @@ public class OnlineAccountsManager { // Remove from queue onlineAccountsImportQueue.remove(onlineAccountData); } - - LOGGER.debug("Finished validating online accounts import queue"); } catch (DataException e) { LOGGER.error("Repository issue while verifying online accounts", e); } - LOGGER.debug("Merging {} validated online accounts from import queue", onlineAccountsToAdd.size()); - addAccounts(onlineAccountsToAdd); + if (!onlineAccountsToAdd.isEmpty()) { + LOGGER.debug("Merging {} validated online accounts from import queue", onlineAccountsToAdd.size()); + addAccounts(onlineAccountsToAdd); + } } // Utilities @@ -224,7 +227,8 @@ public class OnlineAccountsManager { return true; } - private void addAccounts(Set onlineAccountsToAdd) { + /** Adds accounts, maybe rebuilds hashes, returns whether any new accounts were added / hashes rebuilt. */ + private boolean addAccounts(Collection onlineAccountsToAdd) { // For keeping track of which hashes to rebuild Map> hashesToRebuild = new HashMap<>(); @@ -235,6 +239,9 @@ public class OnlineAccountsManager { hashesToRebuild.computeIfAbsent(onlineAccountData.getTimestamp(), k -> new HashSet<>()).add(onlineAccountData.getPublicKey()[0]); } + if (hashesToRebuild.isEmpty()) + return false; + for (var entry : hashesToRebuild.entrySet()) { Long timestamp = entry.getKey(); @@ -263,6 +270,8 @@ public class OnlineAccountsManager { )); } } + + return true; } private boolean addAccount(OnlineAccountData onlineAccountData) { @@ -341,10 +350,10 @@ public class OnlineAccountsManager { * Send online accounts that are minting on this node. */ private void sendOurOnlineAccountsInfo() { - final Long now = NTP.getTime(); - if (now == null) { + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) return; - } List mintingAccounts; try (final Repository repository = RepositoryManager.getRepository()) { @@ -378,10 +387,6 @@ public class OnlineAccountsManager { return; } - // 'current' timestamp - final long onlineAccountsTimestamp = toOnlineAccountTimestamp(now); - boolean hasInfoChanged = false; - byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); List ourOnlineAccounts = new ArrayList<>(); @@ -393,15 +398,10 @@ public class OnlineAccountsManager { // Our account is online OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); - - boolean isNewEntry = addAccount(ourOnlineAccountData); - if (isNewEntry) { - LOGGER.trace(() -> String.format("Added our online account %s with timestamp %d", Base58.encode(mintingAccount.getPublicKey()), onlineAccountsTimestamp)); - ourOnlineAccounts.add(ourOnlineAccountData); - hasInfoChanged = true; - } + ourOnlineAccounts.add(ourOnlineAccountData); } + boolean hasInfoChanged = addAccounts(ourOnlineAccounts); if (!hasInfoChanged) return; @@ -425,20 +425,18 @@ public class OnlineAccountsManager { */ // BlockMinter: only calls this to check whether returned list is empty or not, to determine whether minting is even possible or not public boolean hasOnlineAccounts() { - final Long now = NTP.getTime(); - if (now == null) + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) return false; - final long onlineTimestamp = toOnlineAccountTimestamp(now); - - return this.currentOnlineAccounts.containsKey(onlineTimestamp); + return this.currentOnlineAccounts.containsKey(onlineAccountsTimestamp); } /** * Returns list of online accounts matching given timestamp. */ - // Block::mint() - only wants online accounts with timestamp that matches block's timestamp so they can be added to new block - // Block::areOnlineAccountsValid() - only wants online accounts with timestamp that matches block's timestamp to avoid re-verifying sigs + // Block::mint() - only wants online accounts with (online) timestamp that matches block's (online) timestamp so they can be added to new block public List getOnlineAccounts(long onlineTimestamp) { return new ArrayList<>(Set.copyOf(this.currentOnlineAccounts.getOrDefault(onlineTimestamp, Collections.emptySet()))); } @@ -448,13 +446,12 @@ public class OnlineAccountsManager { */ // API: calls this to return list of online accounts - probably expects ALL timestamps - but going to get 'current' from now on public List getOnlineAccounts() { - final Long now = NTP.getTime(); - if (now == null) + // 'current' timestamp + final Long onlineAccountsTimestamp = getCurrentOnlineAccountTimestamp(); + if (onlineAccountsTimestamp == null) return Collections.emptyList(); - final long onlineTimestamp = toOnlineAccountTimestamp(now); - - return getOnlineAccounts(onlineTimestamp); + return getOnlineAccounts(onlineAccountsTimestamp); } // Block processing From c5e5316f2e28966e717239720557dab8ef72111d Mon Sep 17 00:00:00 2001 From: catbref Date: Sun, 8 May 2022 09:29:13 +0100 Subject: [PATCH 09/18] Schnorr public key and signature aggregation for 'online accounts'. Aggregated signature should reduce block payload significantly, as well as associated network, memory & CPU loads. org.qortal.crypto.BouncyCastle25519 renamed to Qortal25519Extras. Our class provides additional features such as DH-based shared secret, aggregating public keys & signatures and sign/verify for aggregate use. BouncyCastle's Ed25519 class copied in as BouncyCastleEd25519, but with 'private' modifiers changed to 'protected', to allow extension by our Qortal25519Extras class, and to avoid lots of messy reflection-based calls. --- .../org/qortal/crypto/BouncyCastle25519.java | 99 -- .../qortal/crypto/BouncyCastleEd25519.java | 1427 +++++++++++++++++ src/main/java/org/qortal/crypto/Crypto.java | 4 +- .../org/qortal/crypto/Qortal25519Extras.java | 234 +++ .../java/org/qortal/test/CryptoTests.java | 14 +- .../java/org/qortal/test/SchnorrTests.java | 190 +++ 6 files changed, 1860 insertions(+), 108 deletions(-) delete mode 100644 src/main/java/org/qortal/crypto/BouncyCastle25519.java create mode 100644 src/main/java/org/qortal/crypto/BouncyCastleEd25519.java create mode 100644 src/main/java/org/qortal/crypto/Qortal25519Extras.java create mode 100644 src/test/java/org/qortal/test/SchnorrTests.java diff --git a/src/main/java/org/qortal/crypto/BouncyCastle25519.java b/src/main/java/org/qortal/crypto/BouncyCastle25519.java deleted file mode 100644 index 1a2e0de9..00000000 --- a/src/main/java/org/qortal/crypto/BouncyCastle25519.java +++ /dev/null @@ -1,99 +0,0 @@ -package org.qortal.crypto; - -import java.lang.reflect.Constructor; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.Arrays; - -import org.bouncycastle.crypto.Digest; -import org.bouncycastle.math.ec.rfc7748.X25519; -import org.bouncycastle.math.ec.rfc7748.X25519Field; -import org.bouncycastle.math.ec.rfc8032.Ed25519; - -/** Additions to BouncyCastle providing Ed25519 to X25519 key conversion. */ -public class BouncyCastle25519 { - - private static final Class pointAffineClass; - private static final Constructor pointAffineCtor; - private static final Method decodePointVarMethod; - private static final Field yField; - - static { - try { - Class ed25519Class = Ed25519.class; - pointAffineClass = Arrays.stream(ed25519Class.getDeclaredClasses()).filter(clazz -> clazz.getSimpleName().equals("PointAffine")).findFirst().get(); - if (pointAffineClass == null) - throw new ClassNotFoundException("Can't locate PointExt inner class inside Ed25519"); - - decodePointVarMethod = ed25519Class.getDeclaredMethod("decodePointVar", byte[].class, int.class, boolean.class, pointAffineClass); - decodePointVarMethod.setAccessible(true); - - pointAffineCtor = pointAffineClass.getDeclaredConstructors()[0]; - pointAffineCtor.setAccessible(true); - - yField = pointAffineClass.getDeclaredField("y"); - yField.setAccessible(true); - } catch (NoSuchMethodException | SecurityException | IllegalArgumentException | NoSuchFieldException | ClassNotFoundException e) { - throw new RuntimeException("Can't initialize BouncyCastle25519 shim", e); - } - } - - private static int[] obtainYFromPublicKey(byte[] ed25519PublicKey) { - try { - Object pA = pointAffineCtor.newInstance(); - - Boolean result = (Boolean) decodePointVarMethod.invoke(null, ed25519PublicKey, 0, true, pA); - if (result == null || !result) - return null; - - return (int[]) yField.get(pA); - } catch (SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { - throw new RuntimeException("Can't reflect into BouncyCastle", e); - } - } - - public static byte[] toX25519PublicKey(byte[] ed25519PublicKey) { - int[] one = new int[X25519Field.SIZE]; - X25519Field.one(one); - - int[] y = obtainYFromPublicKey(ed25519PublicKey); - - int[] oneMinusY = new int[X25519Field.SIZE]; - X25519Field.sub(one, y, oneMinusY); - - int[] onePlusY = new int[X25519Field.SIZE]; - X25519Field.add(one, y, onePlusY); - - int[] oneMinusYInverted = new int[X25519Field.SIZE]; - X25519Field.inv(oneMinusY, oneMinusYInverted); - - int[] u = new int[X25519Field.SIZE]; - X25519Field.mul(onePlusY, oneMinusYInverted, u); - - X25519Field.normalize(u); - - byte[] x25519PublicKey = new byte[X25519.SCALAR_SIZE]; - X25519Field.encode(u, x25519PublicKey, 0); - - return x25519PublicKey; - } - - public static byte[] toX25519PrivateKey(byte[] ed25519PrivateKey) { - Digest d = Ed25519.createPrehash(); - byte[] h = new byte[d.getDigestSize()]; - - d.update(ed25519PrivateKey, 0, ed25519PrivateKey.length); - d.doFinal(h, 0); - - byte[] s = new byte[X25519.SCALAR_SIZE]; - - System.arraycopy(h, 0, s, 0, X25519.SCALAR_SIZE); - s[0] &= 0xF8; - s[X25519.SCALAR_SIZE - 1] &= 0x7F; - s[X25519.SCALAR_SIZE - 1] |= 0x40; - - return s; - } - -} diff --git a/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java b/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java new file mode 100644 index 00000000..ebcf0f97 --- /dev/null +++ b/src/main/java/org/qortal/crypto/BouncyCastleEd25519.java @@ -0,0 +1,1427 @@ +package org.qortal.crypto; + +import java.security.SecureRandom; + +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.digests.SHA512Digest; +import org.bouncycastle.math.ec.rfc7748.X25519; +import org.bouncycastle.math.ec.rfc7748.X25519Field; +import org.bouncycastle.math.raw.Interleave; +import org.bouncycastle.math.raw.Nat; +import org.bouncycastle.math.raw.Nat256; +import org.bouncycastle.util.Arrays; + +/** + * Duplicate of {@link org.bouncycastle.math.ec.rfc8032.Ed25519}, + * but with {@code private} modifiers replaced with {@code protected}, + * to allow for extension by {@link org.qortal.crypto.Qortal25519Extras}. + */ +public abstract class BouncyCastleEd25519 +{ + // -x^2 + y^2 == 1 + 0x52036CEE2B6FFE738CC740797779E89800700A4D4141D8AB75EB4DCA135978A3 * x^2 * y^2 + + public static final class Algorithm + { + public static final int Ed25519 = 0; + public static final int Ed25519ctx = 1; + public static final int Ed25519ph = 2; + } + + protected static class F extends X25519Field {}; + + protected static final long M08L = 0x000000FFL; + protected static final long M28L = 0x0FFFFFFFL; + protected static final long M32L = 0xFFFFFFFFL; + + protected static final int POINT_BYTES = 32; + protected static final int SCALAR_INTS = 8; + protected static final int SCALAR_BYTES = SCALAR_INTS * 4; + + public static final int PREHASH_SIZE = 64; + public static final int PUBLIC_KEY_SIZE = POINT_BYTES; + public static final int SECRET_KEY_SIZE = 32; + public static final int SIGNATURE_SIZE = POINT_BYTES + SCALAR_BYTES; + + // "SigEd25519 no Ed25519 collisions" + protected static final byte[] DOM2_PREFIX = new byte[]{ 0x53, 0x69, 0x67, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x20, 0x6e, 0x6f, 0x20, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, 0x20, 0x63, 0x6f, 0x6c, 0x6c, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x73 }; + + protected static final int[] P = new int[]{ 0xFFFFFFED, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0x7FFFFFFF }; + protected static final int[] L = new int[]{ 0x5CF5D3ED, 0x5812631A, 0xA2F79CD6, 0x14DEF9DE, 0x00000000, 0x00000000, + 0x00000000, 0x10000000 }; + + protected static final int L0 = 0xFCF5D3ED; // L0:26/-- + protected static final int L1 = 0x012631A6; // L1:24/22 + protected static final int L2 = 0x079CD658; // L2:27/-- + protected static final int L3 = 0xFF9DEA2F; // L3:23/-- + protected static final int L4 = 0x000014DF; // L4:12/11 + + protected static final int[] B_x = new int[]{ 0x0325D51A, 0x018B5823, 0x007B2C95, 0x0304A92D, 0x00D2598E, 0x01D6DC5C, + 0x01388C7F, 0x013FEC0A, 0x029E6B72, 0x0042D26D }; + protected static final int[] B_y = new int[]{ 0x02666658, 0x01999999, 0x00666666, 0x03333333, 0x00CCCCCC, 0x02666666, + 0x01999999, 0x00666666, 0x03333333, 0x00CCCCCC, }; + protected static final int[] C_d = new int[]{ 0x035978A3, 0x02D37284, 0x018AB75E, 0x026A0A0E, 0x0000E014, 0x0379E898, + 0x01D01E5D, 0x01E738CC, 0x03715B7F, 0x00A406D9 }; + protected static final int[] C_d2 = new int[]{ 0x02B2F159, 0x01A6E509, 0x01156EBD, 0x00D4141D, 0x0001C029, 0x02F3D130, + 0x03A03CBB, 0x01CE7198, 0x02E2B6FF, 0x00480DB3 }; + protected static final int[] C_d4 = new int[]{ 0x0165E2B2, 0x034DCA13, 0x002ADD7A, 0x01A8283B, 0x00038052, 0x01E7A260, + 0x03407977, 0x019CE331, 0x01C56DFF, 0x00901B67 }; + + protected static final int WNAF_WIDTH_BASE = 7; + + protected static final int PRECOMP_BLOCKS = 8; + protected static final int PRECOMP_TEETH = 4; + protected static final int PRECOMP_SPACING = 8; + protected static final int PRECOMP_POINTS = 1 << (PRECOMP_TEETH - 1); + protected static final int PRECOMP_MASK = PRECOMP_POINTS - 1; + + protected static final Object precompLock = new Object(); + // TODO[ed25519] Convert to PointPrecomp + protected static PointExt[] precompBaseTable = null; + protected static int[] precompBase = null; + + protected static class PointAccum + { + int[] x = F.create(); + int[] y = F.create(); + int[] z = F.create(); + int[] u = F.create(); + int[] v = F.create(); + } + + protected static class PointAffine + { + int[] x = F.create(); + int[] y = F.create(); + } + + protected static class PointExt + { + int[] x = F.create(); + int[] y = F.create(); + int[] z = F.create(); + int[] t = F.create(); + } + + protected static class PointPrecomp + { + int[] ypx_h = F.create(); + int[] ymx_h = F.create(); + int[] xyd = F.create(); + } + + protected static byte[] calculateS(byte[] r, byte[] k, byte[] s) + { + int[] t = new int[SCALAR_INTS * 2]; decodeScalar(r, 0, t); + int[] u = new int[SCALAR_INTS]; decodeScalar(k, 0, u); + int[] v = new int[SCALAR_INTS]; decodeScalar(s, 0, v); + + Nat256.mulAddTo(u, v, t); + + byte[] result = new byte[SCALAR_BYTES * 2]; + for (int i = 0; i < t.length; ++i) + { + encode32(t[i], result, i * 4); + } + return reduceScalar(result); + } + + protected static boolean checkContextVar(byte[] ctx , byte phflag) + { + return ctx == null && phflag == 0x00 + || ctx != null && ctx.length < 256; + } + + protected static int checkPoint(int[] x, int[] y) + { + int[] t = F.create(); + int[] u = F.create(); + int[] v = F.create(); + + F.sqr(x, u); + F.sqr(y, v); + F.mul(u, v, t); + F.sub(v, u, v); + F.mul(t, C_d, t); + F.addOne(t); + F.sub(t, v, t); + F.normalize(t); + + return F.isZero(t); + } + + protected static int checkPoint(int[] x, int[] y, int[] z) + { + int[] t = F.create(); + int[] u = F.create(); + int[] v = F.create(); + int[] w = F.create(); + + F.sqr(x, u); + F.sqr(y, v); + F.sqr(z, w); + F.mul(u, v, t); + F.sub(v, u, v); + F.mul(v, w, v); + F.sqr(w, w); + F.mul(t, C_d, t); + F.add(t, w, t); + F.sub(t, v, t); + F.normalize(t); + + return F.isZero(t); + } + + protected static boolean checkPointVar(byte[] p) + { + int[] t = new int[8]; + decode32(p, 0, t, 0, 8); + t[7] &= 0x7FFFFFFF; + return !Nat256.gte(t, P); + } + + protected static boolean checkScalarVar(byte[] s) + { + int[] n = new int[SCALAR_INTS]; + decodeScalar(s, 0, n); + return !Nat256.gte(n, L); + } + + protected static Digest createDigest() + { + return new SHA512Digest(); + } + + public static Digest createPrehash() + { + return createDigest(); + } + + protected static int decode24(byte[] bs, int off) + { + int n = bs[ off] & 0xFF; + n |= (bs[++off] & 0xFF) << 8; + n |= (bs[++off] & 0xFF) << 16; + return n; + } + + protected static int decode32(byte[] bs, int off) + { + int n = bs[off] & 0xFF; + n |= (bs[++off] & 0xFF) << 8; + n |= (bs[++off] & 0xFF) << 16; + n |= bs[++off] << 24; + return n; + } + + protected static void decode32(byte[] bs, int bsOff, int[] n, int nOff, int nLen) + { + for (int i = 0; i < nLen; ++i) + { + n[nOff + i] = decode32(bs, bsOff + i * 4); + } + } + + protected static boolean decodePointVar(byte[] p, int pOff, boolean negate, PointAffine r) + { + byte[] py = Arrays.copyOfRange(p, pOff, pOff + POINT_BYTES); + if (!checkPointVar(py)) + { + return false; + } + + int x_0 = (py[POINT_BYTES - 1] & 0x80) >>> 7; + py[POINT_BYTES - 1] &= 0x7F; + + F.decode(py, 0, r.y); + + int[] u = F.create(); + int[] v = F.create(); + + F.sqr(r.y, u); + F.mul(C_d, u, v); + F.subOne(u); + F.addOne(v); + + if (!F.sqrtRatioVar(u, v, r.x)) + { + return false; + } + + F.normalize(r.x); + if (x_0 == 1 && F.isZeroVar(r.x)) + { + return false; + } + + if (negate ^ (x_0 != (r.x[0] & 1))) + { + F.negate(r.x, r.x); + } + + return true; + } + + protected static void decodeScalar(byte[] k, int kOff, int[] n) + { + decode32(k, kOff, n, 0, SCALAR_INTS); + } + + protected static void dom2(Digest d, byte phflag, byte[] ctx) + { + if (ctx != null) + { + int n = DOM2_PREFIX.length; + byte[] t = new byte[n + 2 + ctx.length]; + System.arraycopy(DOM2_PREFIX, 0, t, 0, n); + t[n] = phflag; + t[n + 1] = (byte)ctx.length; + System.arraycopy(ctx, 0, t, n + 2, ctx.length); + + d.update(t, 0, t.length); + } + } + + protected static void encode24(int n, byte[] bs, int off) + { + bs[ off] = (byte)(n ); + bs[++off] = (byte)(n >>> 8); + bs[++off] = (byte)(n >>> 16); + } + + protected static void encode32(int n, byte[] bs, int off) + { + bs[ off] = (byte)(n ); + bs[++off] = (byte)(n >>> 8); + bs[++off] = (byte)(n >>> 16); + bs[++off] = (byte)(n >>> 24); + } + + protected static void encode56(long n, byte[] bs, int off) + { + encode32((int)n, bs, off); + encode24((int)(n >>> 32), bs, off + 4); + } + + protected static int encodePoint(PointAccum p, byte[] r, int rOff) + { + int[] x = F.create(); + int[] y = F.create(); + + F.inv(p.z, y); + F.mul(p.x, y, x); + F.mul(p.y, y, y); + F.normalize(x); + F.normalize(y); + + int result = checkPoint(x, y); + + F.encode(y, r, rOff); + r[rOff + POINT_BYTES - 1] |= ((x[0] & 1) << 7); + + return result; + } + + public static void generatePrivateKey(SecureRandom random, byte[] k) + { + random.nextBytes(k); + } + + public static void generatePublicKey(byte[] sk, int skOff, byte[] pk, int pkOff) + { + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + scalarMultBaseEncoded(s, pk, pkOff); + } + + protected static int getWindow4(int[] x, int n) + { + int w = n >>> 3, b = (n & 7) << 2; + return (x[w] >>> b) & 15; + } + + protected static byte[] getWnafVar(int[] n, int width) + { +// assert n[SCALAR_INTS - 1] >>> 28 == 0; + + int[] t = new int[SCALAR_INTS * 2]; + { + int tPos = t.length, c = 0; + int i = SCALAR_INTS; + while (--i >= 0) + { + int next = n[i]; + t[--tPos] = (next >>> 16) | (c << 16); + t[--tPos] = c = next; + } + } + + byte[] ws = new byte[253]; + + final int pow2 = 1 << width; + final int mask = pow2 - 1; + final int sign = pow2 >>> 1; + + int j = 0, carry = 0; + for (int i = 0; i < t.length; ++i, j -= 16) + { + int word = t[i]; + while (j < 16) + { + int word16 = word >>> j; + int bit = word16 & 1; + + if (bit == carry) + { + ++j; + continue; + } + + int digit = (word16 & mask) + carry; + carry = digit & sign; + digit -= (carry << 1); + carry >>>= (width - 1); + + ws[(i << 4) + j] = (byte)digit; + + j += width; + } + } + +// assert carry == 0; + + return ws; + } + + protected static void implSign(Digest d, byte[] h, byte[] s, byte[] pk, int pkOff, byte[] ctx, byte phflag, byte[] m, + int mOff, int mLen, byte[] sig, int sigOff) + { + dom2(d, phflag, ctx); + d.update(h, SCALAR_BYTES, SCALAR_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] r = reduceScalar(h); + byte[] R = new byte[POINT_BYTES]; + scalarMultBaseEncoded(r, R, 0); + + dom2(d, phflag, ctx); + d.update(R, 0, POINT_BYTES); + d.update(pk, pkOff, POINT_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + byte[] S = calculateS(r, k, s); + + System.arraycopy(R, 0, sig, sigOff, POINT_BYTES); + System.arraycopy(S, 0, sig, sigOff + POINT_BYTES, SCALAR_BYTES); + } + + protected static void implSign(byte[] sk, int skOff, byte[] ctx, byte phflag, byte[] m, int mOff, int mLen, + byte[] sig, int sigOff) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + byte[] pk = new byte[POINT_BYTES]; + scalarMultBaseEncoded(s, pk, 0); + + implSign(d, h, s, pk, 0, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + protected static void implSign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte phflag, + byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(sk, skOff, SECRET_KEY_SIZE); + d.doFinal(h, 0); + + byte[] s = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, s); + + implSign(d, h, s, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + protected static boolean implVerify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte phflag, byte[] m, + int mOff, int mLen) + { + if (!checkContextVar(ctx, phflag)) + { + throw new IllegalArgumentException("ctx"); + } + + byte[] R = Arrays.copyOfRange(sig, sigOff, sigOff + POINT_BYTES); + byte[] S = Arrays.copyOfRange(sig, sigOff + POINT_BYTES, sigOff + SIGNATURE_SIZE); + + if (!checkPointVar(R)) + { + return false; + } + if (!checkScalarVar(S)) + { + return false; + } + + PointAffine pA = new PointAffine(); + if (!decodePointVar(pk, pkOff, true, pA)) + { + return false; + } + + Digest d = createDigest(); + byte[] h = new byte[d.getDigestSize()]; + + dom2(d, phflag, ctx); + d.update(R, 0, POINT_BYTES); + d.update(pk, pkOff, POINT_BYTES); + d.update(m, mOff, mLen); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + + int[] nS = new int[SCALAR_INTS]; + decodeScalar(S, 0, nS); + + int[] nA = new int[SCALAR_INTS]; + decodeScalar(k, 0, nA); + + PointAccum pR = new PointAccum(); + scalarMultStrausVar(nS, nA, pA, pR); + + byte[] check = new byte[POINT_BYTES]; + return 0 != encodePoint(pR, check, 0) && Arrays.areEqual(check, R); + } + + protected static void pointAdd(PointExt p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.apm(r.y, r.x, b, a); + F.apm(p.y, p.x, d, c); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(r.u, r.v, c); + F.mul(c, p.t, c); + F.mul(c, C_d2, c); + F.mul(r.z, p.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointAdd(PointExt p, PointExt r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = F.create(); + int[] f = F.create(); + int[] g = F.create(); + int[] h = F.create(); + + F.apm(p.y, p.x, b, a); + F.apm(r.y, r.x, d, c); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(p.t, r.t, c); + F.mul(c, C_d2, c); + F.mul(p.z, r.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + F.mul(e, h, r.t); + } + + protected static void pointAddVar(boolean negate, PointExt p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + int[] nc, nd, nf, ng; + if (negate) + { + nc = d; nd = c; nf = g; ng = f; + } + else + { + nc = c; nd = d; nf = f; ng = g; + } + + F.apm(r.y, r.x, b, a); + F.apm(p.y, p.x, nd, nc); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(r.u, r.v, c); + F.mul(c, p.t, c); + F.mul(c, C_d2, c); + F.mul(r.z, p.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, ng, nf); + F.carry(ng); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointAddVar(boolean negate, PointExt p, PointExt q, PointExt r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] d = F.create(); + int[] e = F.create(); + int[] f = F.create(); + int[] g = F.create(); + int[] h = F.create(); + + int[] nc, nd, nf, ng; + if (negate) + { + nc = d; nd = c; nf = g; ng = f; + } + else + { + nc = c; nd = d; nf = f; ng = g; + } + + F.apm(p.y, p.x, b, a); + F.apm(q.y, q.x, nd, nc); + F.mul(a, c, a); + F.mul(b, d, b); + F.mul(p.t, q.t, c); + F.mul(c, C_d2, c); + F.mul(p.z, q.z, d); + F.add(d, d, d); + F.apm(b, a, h, e); + F.apm(d, c, ng, nf); + F.carry(ng); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + F.mul(e, h, r.t); + } + + protected static void pointAddPrecomp(PointPrecomp p, PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.apm(r.y, r.x, b, a); + F.mul(a, p.ymx_h, a); + F.mul(b, p.ypx_h, b); + F.mul(r.u, r.v, c); + F.mul(c, p.xyd, c); + F.apm(b, a, h, e); + F.apm(r.z, c, g, f); + F.carry(g); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static PointExt pointCopy(PointAccum p) + { + PointExt r = new PointExt(); + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + F.copy(p.z, 0, r.z, 0); + F.mul(p.u, p.v, r.t); + return r; + } + + protected static PointExt pointCopy(PointAffine p) + { + PointExt r = new PointExt(); + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + pointExtendXY(r); + return r; + } + + protected static PointExt pointCopy(PointExt p) + { + PointExt r = new PointExt(); + pointCopy(p, r); + return r; + } + + protected static void pointCopy(PointAffine p, PointAccum r) + { + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + pointExtendXY(r); + } + + protected static void pointCopy(PointExt p, PointExt r) + { + F.copy(p.x, 0, r.x, 0); + F.copy(p.y, 0, r.y, 0); + F.copy(p.z, 0, r.z, 0); + F.copy(p.t, 0, r.t, 0); + } + + protected static void pointDouble(PointAccum r) + { + int[] a = F.create(); + int[] b = F.create(); + int[] c = F.create(); + int[] e = r.u; + int[] f = F.create(); + int[] g = F.create(); + int[] h = r.v; + + F.sqr(r.x, a); + F.sqr(r.y, b); + F.sqr(r.z, c); + F.add(c, c, c); + F.apm(a, b, h, g); + F.add(r.x, r.y, e); + F.sqr(e, e); + F.sub(h, e, e); + F.add(c, g, f); + F.carry(f); + F.mul(e, f, r.x); + F.mul(g, h, r.y); + F.mul(f, g, r.z); + } + + protected static void pointExtendXY(PointAccum p) + { + F.one(p.z); + F.copy(p.x, 0, p.u, 0); + F.copy(p.y, 0, p.v, 0); + } + + protected static void pointExtendXY(PointExt p) + { + F.one(p.z); + F.mul(p.x, p.y, p.t); + } + + protected static void pointLookup(int block, int index, PointPrecomp p) + { +// assert 0 <= block && block < PRECOMP_BLOCKS; +// assert 0 <= index && index < PRECOMP_POINTS; + + int off = block * PRECOMP_POINTS * 3 * F.SIZE; + + for (int i = 0; i < PRECOMP_POINTS; ++i) + { + int cond = ((i ^ index) - 1) >> 31; + F.cmov(cond, precompBase, off, p.ypx_h, 0); off += F.SIZE; + F.cmov(cond, precompBase, off, p.ymx_h, 0); off += F.SIZE; + F.cmov(cond, precompBase, off, p.xyd, 0); off += F.SIZE; + } + } + + protected static void pointLookup(int[] x, int n, int[] table, PointExt r) + { + // TODO This method is currently hardcoded to 4-bit windows and 8 precomputed points + + int w = getWindow4(x, n); + + int sign = (w >>> (4 - 1)) ^ 1; + int abs = (w ^ -sign) & 7; + +// assert sign == 0 || sign == 1; +// assert 0 <= abs && abs < 8; + + for (int i = 0, off = 0; i < 8; ++i) + { + int cond = ((i ^ abs) - 1) >> 31; + F.cmov(cond, table, off, r.x, 0); off += F.SIZE; + F.cmov(cond, table, off, r.y, 0); off += F.SIZE; + F.cmov(cond, table, off, r.z, 0); off += F.SIZE; + F.cmov(cond, table, off, r.t, 0); off += F.SIZE; + } + + F.cnegate(sign, r.x); + F.cnegate(sign, r.t); + } + + protected static void pointLookup(int[] table, int index, PointExt r) + { + int off = F.SIZE * 4 * index; + + F.copy(table, off, r.x, 0); off += F.SIZE; + F.copy(table, off, r.y, 0); off += F.SIZE; + F.copy(table, off, r.z, 0); off += F.SIZE; + F.copy(table, off, r.t, 0); + } + + protected static int[] pointPrecompute(PointAffine p, int count) + { +// assert count > 0; + + PointExt q = pointCopy(p); + PointExt d = pointCopy(q); + pointAdd(q, d); + + int[] table = F.createTable(count * 4); + int off = 0; + + int i = 0; + for (;;) + { + F.copy(q.x, 0, table, off); off += F.SIZE; + F.copy(q.y, 0, table, off); off += F.SIZE; + F.copy(q.z, 0, table, off); off += F.SIZE; + F.copy(q.t, 0, table, off); off += F.SIZE; + + if (++i == count) + { + break; + } + + pointAdd(d, q); + } + + return table; + } + + protected static PointExt[] pointPrecomputeVar(PointExt p, int count) + { +// assert count > 0; + + PointExt d = new PointExt(); + pointAddVar(false, p, p, d); + + PointExt[] table = new PointExt[count]; + table[0] = pointCopy(p); + for (int i = 1; i < count; ++i) + { + pointAddVar(false, table[i - 1], d, table[i] = new PointExt()); + } + return table; + } + + protected static void pointSetNeutral(PointAccum p) + { + F.zero(p.x); + F.one(p.y); + F.one(p.z); + F.zero(p.u); + F.one(p.v); + } + + protected static void pointSetNeutral(PointExt p) + { + F.zero(p.x); + F.one(p.y); + F.one(p.z); + F.zero(p.t); + } + + public static void precompute() + { + synchronized (precompLock) + { + if (precompBase != null) + { + return; + } + + // Precomputed table for the base point in verification ladder + { + PointExt b = new PointExt(); + F.copy(B_x, 0, b.x, 0); + F.copy(B_y, 0, b.y, 0); + pointExtendXY(b); + + precompBaseTable = pointPrecomputeVar(b, 1 << (WNAF_WIDTH_BASE - 2)); + } + + PointAccum p = new PointAccum(); + F.copy(B_x, 0, p.x, 0); + F.copy(B_y, 0, p.y, 0); + pointExtendXY(p); + + precompBase = F.createTable(PRECOMP_BLOCKS * PRECOMP_POINTS * 3); + + int off = 0; + for (int b = 0; b < PRECOMP_BLOCKS; ++b) + { + PointExt[] ds = new PointExt[PRECOMP_TEETH]; + + PointExt sum = new PointExt(); + pointSetNeutral(sum); + + for (int t = 0; t < PRECOMP_TEETH; ++t) + { + PointExt q = pointCopy(p); + pointAddVar(true, sum, q, sum); + pointDouble(p); + + ds[t] = pointCopy(p); + + if (b + t != PRECOMP_BLOCKS + PRECOMP_TEETH - 2) + { + for (int s = 1; s < PRECOMP_SPACING; ++s) + { + pointDouble(p); + } + } + } + + PointExt[] points = new PointExt[PRECOMP_POINTS]; + int k = 0; + points[k++] = sum; + + for (int t = 0; t < (PRECOMP_TEETH - 1); ++t) + { + int size = 1 << t; + for (int j = 0; j < size; ++j, ++k) + { + pointAddVar(false, points[k - size], ds[t], points[k] = new PointExt()); + } + } + +// assert k == PRECOMP_POINTS; + + int[] cs = F.createTable(PRECOMP_POINTS); + + // TODO[ed25519] A single batch inversion across all blocks? + { + int[] u = F.create(); + F.copy(points[0].z, 0, u, 0); + F.copy(u, 0, cs, 0); + + int i = 0; + while (++i < PRECOMP_POINTS) + { + F.mul(u, points[i].z, u); + F.copy(u, 0, cs, i * F.SIZE); + } + + F.add(u, u, u); + F.invVar(u, u); + --i; + + int[] t = F.create(); + + while (i > 0) + { + int j = i--; + F.copy(cs, i * F.SIZE, t, 0); + F.mul(t, u, t); + F.copy(t, 0, cs, j * F.SIZE); + F.mul(u, points[j].z, u); + } + + F.copy(u, 0, cs, 0); + } + + for (int i = 0; i < PRECOMP_POINTS; ++i) + { + PointExt q = points[i]; + + int[] x = F.create(); + int[] y = F.create(); + +// F.add(q.z, q.z, x); +// F.invVar(x, y); + F.copy(cs, i * F.SIZE, y, 0); + + F.mul(q.x, y, x); + F.mul(q.y, y, y); + + PointPrecomp r = new PointPrecomp(); + F.apm(y, x, r.ypx_h, r.ymx_h); + F.mul(x, y, r.xyd); + F.mul(r.xyd, C_d4, r.xyd); + + F.normalize(r.ypx_h); + F.normalize(r.ymx_h); +// F.normalize(r.xyd); + + F.copy(r.ypx_h, 0, precompBase, off); off += F.SIZE; + F.copy(r.ymx_h, 0, precompBase, off); off += F.SIZE; + F.copy(r.xyd, 0, precompBase, off); off += F.SIZE; + } + } + +// assert off == precompBase.length; + } + } + + protected static void pruneScalar(byte[] n, int nOff, byte[] r) + { + System.arraycopy(n, nOff, r, 0, SCALAR_BYTES); + + r[0] &= 0xF8; + r[SCALAR_BYTES - 1] &= 0x7F; + r[SCALAR_BYTES - 1] |= 0x40; + } + + protected static byte[] reduceScalar(byte[] n) + { + long x00 = decode32(n, 0) & M32L; // x00:32/-- + long x01 = (decode24(n, 4) << 4) & M32L; // x01:28/-- + long x02 = decode32(n, 7) & M32L; // x02:32/-- + long x03 = (decode24(n, 11) << 4) & M32L; // x03:28/-- + long x04 = decode32(n, 14) & M32L; // x04:32/-- + long x05 = (decode24(n, 18) << 4) & M32L; // x05:28/-- + long x06 = decode32(n, 21) & M32L; // x06:32/-- + long x07 = (decode24(n, 25) << 4) & M32L; // x07:28/-- + long x08 = decode32(n, 28) & M32L; // x08:32/-- + long x09 = (decode24(n, 32) << 4) & M32L; // x09:28/-- + long x10 = decode32(n, 35) & M32L; // x10:32/-- + long x11 = (decode24(n, 39) << 4) & M32L; // x11:28/-- + long x12 = decode32(n, 42) & M32L; // x12:32/-- + long x13 = (decode24(n, 46) << 4) & M32L; // x13:28/-- + long x14 = decode32(n, 49) & M32L; // x14:32/-- + long x15 = (decode24(n, 53) << 4) & M32L; // x15:28/-- + long x16 = decode32(n, 56) & M32L; // x16:32/-- + long x17 = (decode24(n, 60) << 4) & M32L; // x17:28/-- + long x18 = n[63] & M08L; // x18:08/-- + long t; + +// x18 += (x17 >> 28); x17 &= M28L; + x09 -= x18 * L0; // x09:34/28 + x10 -= x18 * L1; // x10:33/30 + x11 -= x18 * L2; // x11:35/28 + x12 -= x18 * L3; // x12:32/31 + x13 -= x18 * L4; // x13:28/21 + + x17 += (x16 >> 28); x16 &= M28L; // x17:28/--, x16:28/-- + x08 -= x17 * L0; // x08:54/32 + x09 -= x17 * L1; // x09:52/51 + x10 -= x17 * L2; // x10:55/34 + x11 -= x17 * L3; // x11:51/36 + x12 -= x17 * L4; // x12:41/-- + +// x16 += (x15 >> 28); x15 &= M28L; + x07 -= x16 * L0; // x07:54/28 + x08 -= x16 * L1; // x08:54/53 + x09 -= x16 * L2; // x09:55/53 + x10 -= x16 * L3; // x10:55/52 + x11 -= x16 * L4; // x11:51/41 + + x15 += (x14 >> 28); x14 &= M28L; // x15:28/--, x14:28/-- + x06 -= x15 * L0; // x06:54/32 + x07 -= x15 * L1; // x07:54/53 + x08 -= x15 * L2; // x08:56/-- + x09 -= x15 * L3; // x09:55/54 + x10 -= x15 * L4; // x10:55/53 + +// x14 += (x13 >> 28); x13 &= M28L; + x05 -= x14 * L0; // x05:54/28 + x06 -= x14 * L1; // x06:54/53 + x07 -= x14 * L2; // x07:56/-- + x08 -= x14 * L3; // x08:56/51 + x09 -= x14 * L4; // x09:56/-- + + x13 += (x12 >> 28); x12 &= M28L; // x13:28/22, x12:28/-- + x04 -= x13 * L0; // x04:54/49 + x05 -= x13 * L1; // x05:54/53 + x06 -= x13 * L2; // x06:56/-- + x07 -= x13 * L3; // x07:56/52 + x08 -= x13 * L4; // x08:56/52 + + x12 += (x11 >> 28); x11 &= M28L; // x12:28/24, x11:28/-- + x03 -= x12 * L0; // x03:54/49 + x04 -= x12 * L1; // x04:54/51 + x05 -= x12 * L2; // x05:56/-- + x06 -= x12 * L3; // x06:56/52 + x07 -= x12 * L4; // x07:56/53 + + x11 += (x10 >> 28); x10 &= M28L; // x11:29/--, x10:28/-- + x02 -= x11 * L0; // x02:55/32 + x03 -= x11 * L1; // x03:55/-- + x04 -= x11 * L2; // x04:56/55 + x05 -= x11 * L3; // x05:56/52 + x06 -= x11 * L4; // x06:56/53 + + x10 += (x09 >> 28); x09 &= M28L; // x10:29/--, x09:28/-- + x01 -= x10 * L0; // x01:55/28 + x02 -= x10 * L1; // x02:55/54 + x03 -= x10 * L2; // x03:56/55 + x04 -= x10 * L3; // x04:57/-- + x05 -= x10 * L4; // x05:56/53 + + x08 += (x07 >> 28); x07 &= M28L; // x08:56/53, x07:28/-- + x09 += (x08 >> 28); x08 &= M28L; // x09:29/25, x08:28/-- + + t = x08 >>> 27; + x09 += t; // x09:29/26 + + x00 -= x09 * L0; // x00:55/53 + x01 -= x09 * L1; // x01:55/54 + x02 -= x09 * L2; // x02:57/-- + x03 -= x09 * L3; // x03:57/-- + x04 -= x09 * L4; // x04:57/42 + + x01 += (x00 >> 28); x00 &= M28L; + x02 += (x01 >> 28); x01 &= M28L; + x03 += (x02 >> 28); x02 &= M28L; + x04 += (x03 >> 28); x03 &= M28L; + x05 += (x04 >> 28); x04 &= M28L; + x06 += (x05 >> 28); x05 &= M28L; + x07 += (x06 >> 28); x06 &= M28L; + x08 += (x07 >> 28); x07 &= M28L; + x09 = (x08 >> 28); x08 &= M28L; + + x09 -= t; + +// assert x09 == 0L || x09 == -1L; + + x00 += x09 & L0; + x01 += x09 & L1; + x02 += x09 & L2; + x03 += x09 & L3; + x04 += x09 & L4; + + x01 += (x00 >> 28); x00 &= M28L; + x02 += (x01 >> 28); x01 &= M28L; + x03 += (x02 >> 28); x02 &= M28L; + x04 += (x03 >> 28); x03 &= M28L; + x05 += (x04 >> 28); x04 &= M28L; + x06 += (x05 >> 28); x05 &= M28L; + x07 += (x06 >> 28); x06 &= M28L; + x08 += (x07 >> 28); x07 &= M28L; + + byte[] r = new byte[SCALAR_BYTES]; + encode56(x00 | (x01 << 28), r, 0); + encode56(x02 | (x03 << 28), r, 7); + encode56(x04 | (x05 << 28), r, 14); + encode56(x06 | (x07 << 28), r, 21); + encode32((int)x08, r, 28); + return r; + } + + protected static void scalarMult(byte[] k, PointAffine p, PointAccum r) + { + int[] n = new int[SCALAR_INTS]; + decodeScalar(k, 0, n); + +// assert 0 == (n[0] & 7); +// assert 1 == n[SCALAR_INTS - 1] >>> 30; + + Nat.shiftDownBits(SCALAR_INTS, n, 3, 1); + + // Recode the scalar into signed-digit form + { + //int c1 = + Nat.cadd(SCALAR_INTS, ~n[0] & 1, n, L, n); //assert c1 == 0; + //int c2 = + Nat.shiftDownBit(SCALAR_INTS, n, 0); //assert c2 == (1 << 31); + } + +// assert 1 == n[SCALAR_INTS - 1] >>> 28; + + int[] table = pointPrecompute(p, 8); + PointExt q = new PointExt(); + + // Replace first 4 doublings (2^4 * P) with 1 addition (P + 15 * P) + pointCopy(p, r); + pointLookup(table, 7, q); + pointAdd(q, r); + + int w = 62; + for (;;) + { + pointLookup(n, w, table, q); + pointAdd(q, r); + + pointDouble(r); + pointDouble(r); + pointDouble(r); + + if (--w < 0) + { + break; + } + + pointDouble(r); + } + } + + protected static void scalarMultBase(byte[] k, PointAccum r) + { + precompute(); + + int[] n = new int[SCALAR_INTS]; + decodeScalar(k, 0, n); + + // Recode the scalar into signed-digit form, then group comb bits in each block + { + //int c1 = + Nat.cadd(SCALAR_INTS, ~n[0] & 1, n, L, n); //assert c1 == 0; + //int c2 = + Nat.shiftDownBit(SCALAR_INTS, n, 1); //assert c2 == (1 << 31); + + for (int i = 0; i < SCALAR_INTS; ++i) + { + n[i] = Interleave.shuffle2(n[i]); + } + } + + PointPrecomp p = new PointPrecomp(); + + pointSetNeutral(r); + + int cOff = (PRECOMP_SPACING - 1) * PRECOMP_TEETH; + for (;;) + { + for (int b = 0; b < PRECOMP_BLOCKS; ++b) + { + int w = n[b] >>> cOff; + int sign = (w >>> (PRECOMP_TEETH - 1)) & 1; + int abs = (w ^ -sign) & PRECOMP_MASK; + +// assert sign == 0 || sign == 1; +// assert 0 <= abs && abs < PRECOMP_POINTS; + + pointLookup(b, abs, p); + + F.cswap(sign, p.ypx_h, p.ymx_h); + F.cnegate(sign, p.xyd); + + pointAddPrecomp(p, r); + } + + if ((cOff -= PRECOMP_TEETH) < 0) + { + break; + } + + pointDouble(r); + } + } + + protected static void scalarMultBaseEncoded(byte[] k, byte[] r, int rOff) + { + PointAccum p = new PointAccum(); + scalarMultBase(k, p); + if (0 == encodePoint(p, r, rOff)) + { + throw new IllegalStateException(); + } + } + + /** + * NOTE: Only for use by X25519 + */ + public static void scalarMultBaseYZ(X25519.Friend friend, byte[] k, int kOff, int[] y, int[] z) + { + if (null == friend) + { + throw new NullPointerException("This method is only for use by X25519"); + } + + byte[] n = new byte[SCALAR_BYTES]; + pruneScalar(k, kOff, n); + + PointAccum p = new PointAccum(); + scalarMultBase(n, p); + if (0 == checkPoint(p.x, p.y, p.z)) + { + throw new IllegalStateException(); + } + F.copy(p.y, 0, y, 0); + F.copy(p.z, 0, z, 0); + } + + protected static void scalarMultStrausVar(int[] nb, int[] np, PointAffine p, PointAccum r) + { + precompute(); + + final int width = 5; + + byte[] ws_b = getWnafVar(nb, WNAF_WIDTH_BASE); + byte[] ws_p = getWnafVar(np, width); + + PointExt[] tp = pointPrecomputeVar(pointCopy(p), 1 << (width - 2)); + + pointSetNeutral(r); + + for (int bit = 252;;) + { + int wb = ws_b[bit]; + if (wb != 0) + { + int sign = wb >> 31; + int index = (wb ^ sign) >>> 1; + + pointAddVar((sign != 0), precompBaseTable[index], r); + } + + int wp = ws_p[bit]; + if (wp != 0) + { + int sign = wp >> 31; + int index = (wp ^ sign) >>> 1; + + pointAddVar((sign != 0), tp[index], r); + } + + if (--bit < 0) + { + break; + } + + pointDouble(r); + } + } + + public static void sign(byte[] sk, int skOff, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte[] ctx = null; + byte phflag = 0x00; + + implSign(sk, skOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte[] ctx = null; + byte phflag = 0x00; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] ctx, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte phflag = 0x00; + + implSign(sk, skOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void sign(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte[] m, int mOff, int mLen, byte[] sig, int sigOff) + { + byte phflag = 0x00; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, mOff, mLen, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] ctx, byte[] ph, int phOff, byte[] sig, int sigOff) + { + byte phflag = 0x01; + + implSign(sk, skOff, ctx, phflag, ph, phOff, PREHASH_SIZE, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, byte[] ph, int phOff, byte[] sig, int sigOff) + { + byte phflag = 0x01; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, ph, phOff, PREHASH_SIZE, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] ctx, Digest ph, byte[] sig, int sigOff) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + implSign(sk, skOff, ctx, phflag, m, 0, m.length, sig, sigOff); + } + + public static void signPrehash(byte[] sk, int skOff, byte[] pk, int pkOff, byte[] ctx, Digest ph, byte[] sig, int sigOff) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + implSign(sk, skOff, pk, pkOff, ctx, phflag, m, 0, m.length, sig, sigOff); + } + + public static boolean verify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] m, int mOff, int mLen) + { + byte[] ctx = null; + byte phflag = 0x00; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, mOff, mLen); + } + + public static boolean verify(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte[] m, int mOff, int mLen) + { + byte phflag = 0x00; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, mOff, mLen); + } + + public static boolean verifyPrehash(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, byte[] ph, int phOff) + { + byte phflag = 0x01; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, ph, phOff, PREHASH_SIZE); + } + + public static boolean verifyPrehash(byte[] sig, int sigOff, byte[] pk, int pkOff, byte[] ctx, Digest ph) + { + byte[] m = new byte[PREHASH_SIZE]; + if (PREHASH_SIZE != ph.doFinal(m, 0)) + { + throw new IllegalArgumentException("ph"); + } + + byte phflag = 0x01; + + return implVerify(sig, sigOff, pk, pkOff, ctx, phflag, m, 0, m.length); + } +} diff --git a/src/main/java/org/qortal/crypto/Crypto.java b/src/main/java/org/qortal/crypto/Crypto.java index 5d91781c..8ee0b2b2 100644 --- a/src/main/java/org/qortal/crypto/Crypto.java +++ b/src/main/java/org/qortal/crypto/Crypto.java @@ -270,10 +270,10 @@ public abstract class Crypto { } public static byte[] getSharedSecret(byte[] privateKey, byte[] publicKey) { - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(privateKey); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(privateKey); X25519PrivateKeyParameters xPrivateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); - byte[] x25519PublicKey = BouncyCastle25519.toX25519PublicKey(publicKey); + byte[] x25519PublicKey = Qortal25519Extras.toX25519PublicKey(publicKey); X25519PublicKeyParameters xPublicKeyParams = new X25519PublicKeyParameters(x25519PublicKey, 0); byte[] sharedSecret = new byte[SHARED_SECRET_LENGTH]; diff --git a/src/main/java/org/qortal/crypto/Qortal25519Extras.java b/src/main/java/org/qortal/crypto/Qortal25519Extras.java new file mode 100644 index 00000000..42cca93e --- /dev/null +++ b/src/main/java/org/qortal/crypto/Qortal25519Extras.java @@ -0,0 +1,234 @@ +package org.qortal.crypto; + +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.digests.SHA512Digest; +import org.bouncycastle.math.ec.rfc7748.X25519; +import org.bouncycastle.math.ec.rfc7748.X25519Field; +import org.bouncycastle.math.ec.rfc8032.Ed25519; +import org.bouncycastle.math.raw.Nat256; + +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.Collection; + +/** + * Additions to BouncyCastle providing: + *

+ *
    + *
  • Ed25519 to X25519 key conversion
  • + *
  • Aggregate public keys
  • + *
  • Aggregate signatures
  • + *
+ */ +public abstract class Qortal25519Extras extends BouncyCastleEd25519 { + + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); + + public static byte[] toX25519PublicKey(byte[] ed25519PublicKey) { + int[] one = new int[X25519Field.SIZE]; + X25519Field.one(one); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(ed25519PublicKey, 0, true, pA)) + return null; + + int[] y = pA.y; + + int[] oneMinusY = new int[X25519Field.SIZE]; + X25519Field.sub(one, y, oneMinusY); + + int[] onePlusY = new int[X25519Field.SIZE]; + X25519Field.add(one, y, onePlusY); + + int[] oneMinusYInverted = new int[X25519Field.SIZE]; + X25519Field.inv(oneMinusY, oneMinusYInverted); + + int[] u = new int[X25519Field.SIZE]; + X25519Field.mul(onePlusY, oneMinusYInverted, u); + + X25519Field.normalize(u); + + byte[] x25519PublicKey = new byte[X25519.SCALAR_SIZE]; + X25519Field.encode(u, x25519PublicKey, 0); + + return x25519PublicKey; + } + + public static byte[] toX25519PrivateKey(byte[] ed25519PrivateKey) { + Digest d = Ed25519.createPrehash(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(ed25519PrivateKey, 0, ed25519PrivateKey.length); + d.doFinal(h, 0); + + byte[] s = new byte[X25519.SCALAR_SIZE]; + + System.arraycopy(h, 0, s, 0, X25519.SCALAR_SIZE); + s[0] &= 0xF8; + s[X25519.SCALAR_SIZE - 1] &= 0x7F; + s[X25519.SCALAR_SIZE - 1] |= 0x40; + + return s; + } + + // Mostly for test support + public static PointAccum newPointAccum() { + return new PointAccum(); + } + + public static byte[] aggregatePublicKeys(Collection publicKeys) { + PointAccum rAccum = null; + + for (byte[] publicKey : publicKeys) { + PointAffine pA = new PointAffine(); + if (!decodePointVar(publicKey, 0, false, pA)) + // Failed to decode + return null; + + if (rAccum == null) { + rAccum = new PointAccum(); + pointCopy(pA, rAccum); + } else { + pointAdd(pointCopy(pA), rAccum); + } + } + + byte[] publicKey = new byte[SCALAR_BYTES]; + if (0 == encodePoint(rAccum, publicKey, 0)) + // Failed to encode + return null; + + return publicKey; + } + + public static byte[] aggregateSignatures(Collection signatures) { + // Signatures are (R, s) + // R is a point + // s is a scalar + PointAccum rAccum = null; + int[] sAccum = new int[SCALAR_INTS]; + + byte[] rEncoded = new byte[POINT_BYTES]; + int[] sPart = new int[SCALAR_INTS]; + for (byte[] signature : signatures) { + System.arraycopy(signature,0, rEncoded, 0, rEncoded.length); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(rEncoded, 0, false, pA)) + // Failed to decode + return null; + + if (rAccum == null) { + rAccum = new PointAccum(); + pointCopy(pA, rAccum); + + decode32(signature, rEncoded.length, sAccum, 0, SCALAR_INTS); + } else { + pointAdd(pointCopy(pA), rAccum); + + decode32(signature, rEncoded.length, sPart, 0, SCALAR_INTS); + Nat256.addTo(sPart, sAccum); + + // "mod L" on sAccum + if (Nat256.gte(sAccum, L)) + Nat256.subFrom(L, sAccum); + } + } + + byte[] signature = new byte[SIGNATURE_SIZE]; + if (0 == encodePoint(rAccum, signature, 0)) + // Failed to encode + return null; + + for (int i = 0; i < sAccum.length; ++i) { + encode32(sAccum[i], signature, POINT_BYTES + i * 4); + } + + return signature; + } + + public static byte[] signForAggregation(byte[] privateKey, byte[] message) { + // Very similar to BouncyCastle's implementation except we use secure random nonce and different hash + Digest d = new SHA512Digest(); + byte[] h = new byte[d.getDigestSize()]; + + d.reset(); + d.update(privateKey, 0, privateKey.length); + d.doFinal(h, 0); + + byte[] sH = new byte[SCALAR_BYTES]; + pruneScalar(h, 0, sH); + + byte[] publicKey = new byte[SCALAR_BYTES]; + scalarMultBaseEncoded(sH, publicKey, 0); + + byte[] rSeed = new byte[d.getDigestSize()]; + SECURE_RANDOM.nextBytes(rSeed); + + byte[] r = new byte[SCALAR_BYTES]; + pruneScalar(rSeed, 0, r); + + byte[] R = new byte[POINT_BYTES]; + scalarMultBaseEncoded(r, R, 0); + + d.reset(); + d.update(message, 0, message.length); + d.doFinal(h, 0); + byte[] k = reduceScalar(h); + + byte[] s = calculateS(r, k, sH); + + byte[] signature = new byte[SIGNATURE_SIZE]; + System.arraycopy(R, 0, signature, 0, POINT_BYTES); + System.arraycopy(s, 0, signature, POINT_BYTES, SCALAR_BYTES); + + return signature; + } + + public static boolean verifyAggregated(byte[] publicKey, byte[] signature, byte[] message) { + byte[] R = Arrays.copyOfRange(signature, 0, POINT_BYTES); + + byte[] s = Arrays.copyOfRange(signature, POINT_BYTES, POINT_BYTES + SCALAR_BYTES); + + if (!checkPointVar(R)) + // R out of bounds + return false; + + if (!checkScalarVar(s)) + // s out of bounds + return false; + + byte[] S = new byte[POINT_BYTES]; + scalarMultBaseEncoded(s, S, 0); + + PointAffine pA = new PointAffine(); + if (!decodePointVar(publicKey, 0, true, pA)) + // Failed to decode + return false; + + Digest d = new SHA512Digest(); + byte[] h = new byte[d.getDigestSize()]; + + d.update(message, 0, message.length); + d.doFinal(h, 0); + + byte[] k = reduceScalar(h); + + int[] nS = new int[SCALAR_INTS]; + decodeScalar(s, 0, nS); + + int[] nA = new int[SCALAR_INTS]; + decodeScalar(k, 0, nA); + + /*PointAccum*/ + PointAccum pR = new PointAccum(); + scalarMultStrausVar(nS, nA, pA, pR); + + byte[] check = new byte[POINT_BYTES]; + if (0 == encodePoint(pR, check, 0)) + // Failed to encode + return false; + + return Arrays.equals(check, R); + } +} diff --git a/src/test/java/org/qortal/test/CryptoTests.java b/src/test/java/org/qortal/test/CryptoTests.java index 6a0133d2..2cc73182 100644 --- a/src/test/java/org/qortal/test/CryptoTests.java +++ b/src/test/java/org/qortal/test/CryptoTests.java @@ -4,7 +4,7 @@ import org.junit.Test; import org.qortal.account.PrivateKeyAccount; import org.qortal.block.BlockChain; import org.qortal.crypto.AES; -import org.qortal.crypto.BouncyCastle25519; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.crypto.Crypto; import org.qortal.test.common.Common; import org.qortal.utils.Base58; @@ -123,14 +123,14 @@ public class CryptoTests extends Common { random.nextBytes(ed25519PrivateKey); PrivateKeyAccount account = new PrivateKeyAccount(null, ed25519PrivateKey); - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(account.getPrivateKey()); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(account.getPrivateKey()); X25519PrivateKeyParameters x25519PrivateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); // Derive X25519 public key from X25519 private key byte[] x25519PublicKeyFromPrivate = x25519PrivateKeyParams.generatePublicKey().getEncoded(); // Derive X25519 public key from Ed25519 public key - byte[] x25519PublicKeyFromEd25519 = BouncyCastle25519.toX25519PublicKey(account.getPublicKey()); + byte[] x25519PublicKeyFromEd25519 = Qortal25519Extras.toX25519PublicKey(account.getPublicKey()); assertEquals(String.format("Public keys do not match, from private key %s", Base58.encode(ed25519PrivateKey)), Base58.encode(x25519PublicKeyFromPrivate), Base58.encode(x25519PublicKeyFromEd25519)); } @@ -162,10 +162,10 @@ public class CryptoTests extends Common { } private static byte[] calcBCSharedSecret(byte[] ed25519PrivateKey, byte[] ed25519PublicKey) { - byte[] x25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(ed25519PrivateKey); + byte[] x25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(ed25519PrivateKey); X25519PrivateKeyParameters privateKeyParams = new X25519PrivateKeyParameters(x25519PrivateKey, 0); - byte[] x25519PublicKey = BouncyCastle25519.toX25519PublicKey(ed25519PublicKey); + byte[] x25519PublicKey = Qortal25519Extras.toX25519PublicKey(ed25519PublicKey); X25519PublicKeyParameters publicKeyParams = new X25519PublicKeyParameters(x25519PublicKey, 0); byte[] sharedSecret = new byte[32]; @@ -186,10 +186,10 @@ public class CryptoTests extends Common { final String expectedTheirX25519PublicKey = "ANjnZLRSzW9B1aVamiYGKP3XtBooU9tGGDjUiibUfzp2"; final String expectedSharedSecret = "DTMZYG96x8XZuGzDvHFByVLsXedimqtjiXHhXPVe58Ap"; - byte[] ourX25519PrivateKey = BouncyCastle25519.toX25519PrivateKey(ourPrivateKey); + byte[] ourX25519PrivateKey = Qortal25519Extras.toX25519PrivateKey(ourPrivateKey); assertEquals("X25519 private key incorrect", expectedOurX25519PrivateKey, Base58.encode(ourX25519PrivateKey)); - byte[] theirX25519PublicKey = BouncyCastle25519.toX25519PublicKey(theirPublicKey); + byte[] theirX25519PublicKey = Qortal25519Extras.toX25519PublicKey(theirPublicKey); assertEquals("X25519 public key incorrect", expectedTheirX25519PublicKey, Base58.encode(theirX25519PublicKey)); byte[] sharedSecret = calcBCSharedSecret(ourPrivateKey, theirPublicKey); diff --git a/src/test/java/org/qortal/test/SchnorrTests.java b/src/test/java/org/qortal/test/SchnorrTests.java new file mode 100644 index 00000000..03c92d2f --- /dev/null +++ b/src/test/java/org/qortal/test/SchnorrTests.java @@ -0,0 +1,190 @@ +package org.qortal.test; + +import com.google.common.hash.HashCode; +import com.google.common.primitives.Bytes; +import com.google.common.primitives.Longs; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; +import org.junit.Test; +import org.qortal.crypto.Qortal25519Extras; +import org.qortal.data.network.OnlineAccountData; +import org.qortal.transform.Transformer; + +import java.math.BigInteger; +import java.security.SecureRandom; +import java.security.Security; +import java.util.*; +import java.util.stream.Collectors; + +import static org.junit.Assert.*; + +public class SchnorrTests extends Qortal25519Extras { + + static { + // This must go before any calls to LogManager/Logger + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); + + Security.insertProviderAt(new BouncyCastleProvider(), 0); + Security.insertProviderAt(new BouncyCastleJsseProvider(), 1); + } + + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); + + @Test + public void testConversion() { + // Scalar form + byte[] scalarA = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + System.out.printf("a: %s%n", HashCode.fromBytes(scalarA)); + + byte[] pointA = HashCode.fromString("5866666666666666666666666666666666666666666666666666666666666666".toLowerCase()).asBytes(); + + BigInteger expectedY = new BigInteger("46316835694926478169428394003475163141307993866256225615783033603165251855960"); + + PointAccum pointAccum = Qortal25519Extras.newPointAccum(); + scalarMultBase(scalarA, pointAccum); + + byte[] encoded = new byte[POINT_BYTES]; + if (0 == encodePoint(pointAccum, encoded, 0)) + fail("Point encoding failed"); + + System.out.printf("aG: %s%n", HashCode.fromBytes(encoded)); + assertArrayEquals(pointA, encoded); + + byte[] yBytes = new byte[POINT_BYTES]; + System.arraycopy(encoded,0, yBytes, 0, encoded.length); + Bytes.reverse(yBytes); + + System.out.printf("yBytes: %s%n", HashCode.fromBytes(yBytes)); + BigInteger yBI = new BigInteger(yBytes); + + System.out.printf("aG y: %s%n", yBI); + assertEquals(expectedY, yBI); + } + + @Test + public void testAddition() { + /* + * 1G: b'5866666666666666666666666666666666666666666666666666666666666666' + * 2G: b'c9a3f86aae465f0e56513864510f3997561fa2c9e85ea21dc2292309f3cd6022' + * 3G: b'd4b4f5784868c3020403246717ec169ff79e26608ea126a1ab69ee77d1b16712' + */ + + // Scalar form + byte[] s1 = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] s2 = HashCode.fromString("0200000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + + // Point form + byte[] g1 = HashCode.fromString("5866666666666666666666666666666666666666666666666666666666666666".toLowerCase()).asBytes(); + byte[] g2 = HashCode.fromString("c9a3f86aae465f0e56513864510f3997561fa2c9e85ea21dc2292309f3cd6022".toLowerCase()).asBytes(); + byte[] g3 = HashCode.fromString("d4b4f5784868c3020403246717ec169ff79e26608ea126a1ab69ee77d1b16712".toLowerCase()).asBytes(); + + PointAccum p1 = Qortal25519Extras.newPointAccum(); + scalarMultBase(s1, p1); + + PointAccum p2 = Qortal25519Extras.newPointAccum(); + scalarMultBase(s2, p2); + + pointAdd(pointCopy(p1), p2); + + byte[] encoded = new byte[POINT_BYTES]; + if (0 == encodePoint(p2, encoded, 0)) + fail("Point encoding failed"); + + System.out.printf("sum: %s%n", HashCode.fromBytes(encoded)); + assertArrayEquals(g3, encoded); + } + + @Test + public void testSimpleSign() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + + byte[] signature = signForAggregation(privateKey, message); + System.out.printf("signature: %s%n", HashCode.fromBytes(signature)); + } + + @Test + public void testSimpleVerify() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + byte[] signature = HashCode.fromString("13e58e88f3df9e06637d2d5bbb814c028e3ba135494530b9d3b120bdb31168d62c70a37ae9cfba816fe6038ee1ce2fb521b95c4a91c7ff0bb1dd2e67733f2b0d".toLowerCase()).asBytes(); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + assertTrue(verifyAggregated(publicKey, signature, message)); + } + + @Test + public void testSimpleSignAndVerify() { + byte[] privateKey = HashCode.fromString("0100000000000000000000000000000000000000000000000000000000000000".toLowerCase()).asBytes(); + byte[] message = HashCode.fromString("01234567".toLowerCase()).asBytes(); + + byte[] signature = signForAggregation(privateKey, message); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + assertTrue(verifyAggregated(publicKey, signature, message)); + } + + @Test + public void testSimpleAggregate() { + List onlineAccounts = generateOnlineAccounts(1); + + byte[] aggregatePublicKey = aggregatePublicKeys(onlineAccounts.stream().map(OnlineAccountData::getPublicKey).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate public key: %s%n", HashCode.fromBytes(aggregatePublicKey)); + + byte[] aggregateSignature = aggregateSignatures(onlineAccounts.stream().map(OnlineAccountData::getSignature).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate signature: %s%n", HashCode.fromBytes(aggregateSignature)); + + OnlineAccountData onlineAccount = onlineAccounts.get(0); + + assertArrayEquals(String.format("expected: %s, actual: %s", HashCode.fromBytes(onlineAccount.getPublicKey()), HashCode.fromBytes(aggregatePublicKey)), onlineAccount.getPublicKey(), aggregatePublicKey); + assertArrayEquals(String.format("expected: %s, actual: %s", HashCode.fromBytes(onlineAccount.getSignature()), HashCode.fromBytes(aggregateSignature)), onlineAccount.getSignature(), aggregateSignature); + + // This is the crucial test: + long timestamp = onlineAccount.getTimestamp(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + assertTrue(verifyAggregated(aggregatePublicKey, aggregateSignature, timestampBytes)); + } + + @Test + public void testMultipleAggregate() { + List onlineAccounts = generateOnlineAccounts(5000); + + byte[] aggregatePublicKey = aggregatePublicKeys(onlineAccounts.stream().map(OnlineAccountData::getPublicKey).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate public key: %s%n", HashCode.fromBytes(aggregatePublicKey)); + + byte[] aggregateSignature = aggregateSignatures(onlineAccounts.stream().map(OnlineAccountData::getSignature).collect(Collectors.toUnmodifiableList())); + System.out.printf("Aggregate signature: %s%n", HashCode.fromBytes(aggregateSignature)); + + OnlineAccountData onlineAccount = onlineAccounts.get(0); + + // This is the crucial test: + long timestamp = onlineAccount.getTimestamp(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + assertTrue(verifyAggregated(aggregatePublicKey, aggregateSignature, timestampBytes)); + } + + private List generateOnlineAccounts(int numAccounts) { + List onlineAccounts = new ArrayList<>(); + + long timestamp = System.currentTimeMillis(); + byte[] timestampBytes = Longs.toByteArray(timestamp); + + for (int a = 0; a < numAccounts; ++a) { + byte[] privateKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + SECURE_RANDOM.nextBytes(privateKey); + + byte[] publicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + Qortal25519Extras.generatePublicKey(privateKey, 0, publicKey, 0); + + byte[] signature = signForAggregation(privateKey, timestampBytes); + + onlineAccounts.add(new OnlineAccountData(timestamp, signature, publicKey)); + } + + return onlineAccounts; + } +} From 51930d3ccf9d99327d08fd21cb0380163d4aa466 Mon Sep 17 00:00:00 2001 From: catbref Date: Sat, 14 May 2022 16:50:52 +0100 Subject: [PATCH 10/18] Move some private key methods to Crypto class --- .../org/qortal/account/PrivateKeyAccount.java | 14 +++++--------- .../org/qortal/controller/tradebot/TradeBot.java | 2 +- src/main/java/org/qortal/crypto/Crypto.java | 15 +++++++++++++-- .../repository/hsqldb/HSQLDBRepository.java | 3 +-- .../org/qortal/test/apps/RewardShareKeys.java | 3 ++- .../java/org/qortal/test/common/AccountUtils.java | 3 ++- 6 files changed, 24 insertions(+), 16 deletions(-) diff --git a/src/main/java/org/qortal/account/PrivateKeyAccount.java b/src/main/java/org/qortal/account/PrivateKeyAccount.java index 3b370d12..4b646b4a 100644 --- a/src/main/java/org/qortal/account/PrivateKeyAccount.java +++ b/src/main/java/org/qortal/account/PrivateKeyAccount.java @@ -11,15 +11,15 @@ public class PrivateKeyAccount extends PublicKeyAccount { private final Ed25519PrivateKeyParameters edPrivateKeyParams; /** - * Create PrivateKeyAccount using byte[32] seed. + * Create PrivateKeyAccount using byte[32] private key. * - * @param seed + * @param privateKey * byte[32] used to create private/public key pair * @throws IllegalArgumentException - * if passed invalid seed + * if passed invalid privateKey */ - public PrivateKeyAccount(Repository repository, byte[] seed) { - this(repository, new Ed25519PrivateKeyParameters(seed, 0)); + public PrivateKeyAccount(Repository repository, byte[] privateKey) { + this(repository, new Ed25519PrivateKeyParameters(privateKey, 0)); } private PrivateKeyAccount(Repository repository, Ed25519PrivateKeyParameters edPrivateKeyParams) { @@ -37,10 +37,6 @@ public class PrivateKeyAccount extends PublicKeyAccount { return this.privateKey; } - public static byte[] toPublicKey(byte[] seed) { - return new Ed25519PrivateKeyParameters(seed, 0).generatePublicKey().getEncoded(); - } - public byte[] sign(byte[] message) { return Crypto.sign(this.edPrivateKeyParams, message); } diff --git a/src/main/java/org/qortal/controller/tradebot/TradeBot.java b/src/main/java/org/qortal/controller/tradebot/TradeBot.java index 938141e0..1836118a 100644 --- a/src/main/java/org/qortal/controller/tradebot/TradeBot.java +++ b/src/main/java/org/qortal/controller/tradebot/TradeBot.java @@ -292,7 +292,7 @@ public class TradeBot implements Listener { } public static byte[] deriveTradeNativePublicKey(byte[] privateKey) { - return PrivateKeyAccount.toPublicKey(privateKey); + return Crypto.toPublicKey(privateKey); } public static byte[] deriveTradeForeignPublicKey(byte[] privateKey) { diff --git a/src/main/java/org/qortal/crypto/Crypto.java b/src/main/java/org/qortal/crypto/Crypto.java index 8ee0b2b2..75e5028e 100644 --- a/src/main/java/org/qortal/crypto/Crypto.java +++ b/src/main/java/org/qortal/crypto/Crypto.java @@ -253,6 +253,10 @@ public abstract class Crypto { return false; } + public static byte[] toPublicKey(byte[] privateKey) { + return new Ed25519PrivateKeyParameters(privateKey, 0).generatePublicKey().getEncoded(); + } + public static boolean verify(byte[] publicKey, byte[] signature, byte[] message) { try { return Ed25519.verify(signature, 0, publicKey, 0, message, 0, message.length); @@ -264,7 +268,15 @@ public abstract class Crypto { public static byte[] sign(Ed25519PrivateKeyParameters edPrivateKeyParams, byte[] message) { byte[] signature = new byte[SIGNATURE_LENGTH]; - edPrivateKeyParams.sign(Ed25519.Algorithm.Ed25519, edPrivateKeyParams.generatePublicKey(), null, message, 0, message.length, signature, 0); + edPrivateKeyParams.sign(Ed25519.Algorithm.Ed25519,null, message, 0, message.length, signature, 0); + + return signature; + } + + public static byte[] sign(byte[] privateKey, byte[] message) { + byte[] signature = new byte[SIGNATURE_LENGTH]; + + new Ed25519PrivateKeyParameters(privateKey, 0).sign(Ed25519.Algorithm.Ed25519,null, message, 0, message.length, signature, 0); return signature; } @@ -281,5 +293,4 @@ public abstract class Crypto { return sharedSecret; } - } diff --git a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java index 61f4b76f..6ec30e20 100644 --- a/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java +++ b/src/main/java/org/qortal/repository/hsqldb/HSQLDBRepository.java @@ -23,7 +23,6 @@ import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.qortal.account.PrivateKeyAccount; import org.qortal.crypto.Crypto; import org.qortal.globalization.Translator; import org.qortal.gui.SysTray; @@ -1003,7 +1002,7 @@ public class HSQLDBRepository implements Repository { if (privateKey == null) return null; - return PrivateKeyAccount.toPublicKey(privateKey); + return Crypto.toPublicKey(privateKey); } public static String ed25519PublicKeyToAddress(byte[] publicKey) { diff --git a/src/test/java/org/qortal/test/apps/RewardShareKeys.java b/src/test/java/org/qortal/test/apps/RewardShareKeys.java index e0bfc1cf..5ba1aab4 100644 --- a/src/test/java/org/qortal/test/apps/RewardShareKeys.java +++ b/src/test/java/org/qortal/test/apps/RewardShareKeys.java @@ -6,6 +6,7 @@ import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider; import org.qortal.account.PrivateKeyAccount; import org.qortal.account.PublicKeyAccount; +import org.qortal.crypto.Crypto; import org.qortal.utils.Base58; public class RewardShareKeys { @@ -28,7 +29,7 @@ public class RewardShareKeys { PublicKeyAccount recipientAccount = new PublicKeyAccount(null, args.length > 1 ? Base58.decode(args[1]) : minterAccount.getPublicKey()); byte[] rewardSharePrivateKey = minterAccount.getRewardSharePrivateKey(recipientAccount.getPublicKey()); - byte[] rewardSharePublicKey = PrivateKeyAccount.toPublicKey(rewardSharePrivateKey); + byte[] rewardSharePublicKey = Crypto.toPublicKey(rewardSharePrivateKey); System.out.println(String.format("Minter account: %s", minterAccount.getAddress())); System.out.println(String.format("Minter's public key: %s", Base58.encode(minterAccount.getPublicKey()))); diff --git a/src/test/java/org/qortal/test/common/AccountUtils.java b/src/test/java/org/qortal/test/common/AccountUtils.java index 0e7ef020..bda1ae61 100644 --- a/src/test/java/org/qortal/test/common/AccountUtils.java +++ b/src/test/java/org/qortal/test/common/AccountUtils.java @@ -6,6 +6,7 @@ import java.util.HashMap; import java.util.Map; import org.qortal.account.PrivateKeyAccount; +import org.qortal.crypto.Crypto; import org.qortal.data.transaction.BaseTransactionData; import org.qortal.data.transaction.PaymentTransactionData; import org.qortal.data.transaction.RewardShareTransactionData; @@ -45,7 +46,7 @@ public class AccountUtils { long timestamp = repository.getTransactionRepository().fromSignature(reference).getTimestamp() + 1; byte[] rewardSharePrivateKey = mintingAccount.getRewardSharePrivateKey(recipientAccount.getPublicKey()); - byte[] rewardSharePublicKey = PrivateKeyAccount.toPublicKey(rewardSharePrivateKey); + byte[] rewardSharePublicKey = Crypto.toPublicKey(rewardSharePrivateKey); BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, mintingAccount.getPublicKey(), fee, null); TransactionData transactionData = new RewardShareTransactionData(baseTransactionData, recipientAccount.getAddress(), rewardSharePublicKey, sharePercent); From 84d850ee0b789838f76dd317554dfdd7cdb74655 Mon Sep 17 00:00:00 2001 From: catbref Date: Sat, 14 May 2022 16:52:34 +0100 Subject: [PATCH 11/18] WIP: use blockchain feature-trigger "aggregateSignatureTimestamp" to determine when online-accounts sigs and block sigs switch to aggregate sigs --- src/main/java/org/qortal/block/Block.java | 82 +++++++++++++------ .../java/org/qortal/block/BlockChain.java | 7 +- .../controller/OnlineAccountsManager.java | 16 +++- src/main/resources/blockchain.json | 3 +- 4 files changed, 78 insertions(+), 30 deletions(-) diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index 41700714..f5e2cb6d 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -27,6 +27,7 @@ import org.qortal.block.BlockChain.BlockTimingByHeight; import org.qortal.block.BlockChain.AccountLevelShareBin; import org.qortal.controller.OnlineAccountsManager; import org.qortal.crypto.Crypto; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.data.account.AccountBalanceData; import org.qortal.data.account.AccountData; import org.qortal.data.account.EligibleQoraHolderData; @@ -388,12 +389,24 @@ public class Block { byte[] encodedOnlineAccounts = BlockTransformer.encodeOnlineAccounts(onlineAccountsSet); int onlineAccountsCount = onlineAccountsSet.size(); - // Concatenate online account timestamp signatures (in correct order) - byte[] onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH]; - for (int i = 0; i < onlineAccountsCount; ++i) { - Integer accountIndex = accountIndexes.get(i); - OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex); - System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH); + byte[] onlineAccountsSignatures; + if (timestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // Collate all signatures + Collection signaturesToAggregate = indexedOnlineAccounts.values() + .stream() + .map(OnlineAccountData::getSignature) + .collect(Collectors.toList()); + + // Aggregated, single signature + onlineAccountsSignatures = Qortal25519Extras.aggregateSignatures(signaturesToAggregate); + } else { + // Concatenate online account timestamp signatures (in correct order) + onlineAccountsSignatures = new byte[onlineAccountsCount * Transformer.SIGNATURE_LENGTH]; + for (int i = 0; i < onlineAccountsCount; ++i) { + Integer accountIndex = accountIndexes.get(i); + OnlineAccountData onlineAccountData = indexedOnlineAccounts.get(accountIndex); + System.arraycopy(onlineAccountData.getSignature(), 0, onlineAccountsSignatures, i * Transformer.SIGNATURE_LENGTH, Transformer.SIGNATURE_LENGTH); + } } byte[] minterSignature = minter.sign(BlockTransformer.getBytesForMinterSignature(parentBlockData, @@ -1003,36 +1016,57 @@ public class Block { if (this.blockData.getOnlineAccountsSignatures() == null || this.blockData.getOnlineAccountsSignatures().length == 0) return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MISSING; - if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH) - return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + if (this.blockData.getTimestamp() >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // We expect just the one, aggregated signature + if (this.blockData.getOnlineAccountsSignatures().length != Transformer.SIGNATURE_LENGTH) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + } else { + if (this.blockData.getOnlineAccountsSignatures().length != onlineRewardShares.size() * Transformer.SIGNATURE_LENGTH) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURES_MALFORMED; + } // Check signatures long onlineTimestamp = this.blockData.getOnlineAccountsTimestamp(); byte[] onlineTimestampBytes = Longs.toByteArray(onlineTimestamp); - // Extract online accounts' timestamp signatures from block data + // Extract online accounts' timestamp signatures from block data. Only one signature if aggregated. List onlineAccountsSignatures = BlockTransformer.decodeTimestampSignatures(this.blockData.getOnlineAccountsSignatures()); - // Convert - Set onlineAccounts = new HashSet<>(); - for (int i = 0; i < onlineAccountsSignatures.size(); ++i) { - byte[] signature = onlineAccountsSignatures.get(i); - byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey(); + if (this.blockData.getTimestamp() >= BlockChain.getInstance().getAggregateSignatureTimestamp()) { + // Aggregate all public keys + Collection publicKeys = onlineRewardShares.stream() + .map(RewardShareData::getRewardSharePublicKey) + .collect(Collectors.toList()); - OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey); - onlineAccounts.add(onlineAccountData); - } + byte[] aggregatePublicKey = Qortal25519Extras.aggregatePublicKeys(publicKeys); - // Remove those already validated & cached by online accounts manager - no need to re-validate them - OnlineAccountsManager.getInstance().removeKnown(onlineAccounts, onlineTimestamp); + byte[] aggregateSignature = onlineAccountsSignatures.get(0); - // Validate the rest - for (OnlineAccountData onlineAccount : onlineAccounts) - if (!Crypto.verify(onlineAccount.getPublicKey(), onlineAccount.getSignature(), onlineTimestampBytes)) + // One-step verification of aggregate signature using aggregate public key + if (!Qortal25519Extras.verifyAggregated(aggregatePublicKey, aggregateSignature, onlineTimestampBytes)) return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT; + } else { + // Build block's view of online accounts + Set onlineAccounts = new HashSet<>(); + for (int i = 0; i < onlineAccountsSignatures.size(); ++i) { + byte[] signature = onlineAccountsSignatures.get(i); + byte[] publicKey = onlineRewardShares.get(i).getRewardSharePublicKey(); - // We've validated these, so allow online accounts manager to cache - OnlineAccountsManager.getInstance().addBlocksOnlineAccounts(onlineAccounts, onlineTimestamp); + OnlineAccountData onlineAccountData = new OnlineAccountData(onlineTimestamp, signature, publicKey); + onlineAccounts.add(onlineAccountData); + } + + // Remove those already validated & cached by online accounts manager - no need to re-validate them + OnlineAccountsManager.getInstance().removeKnown(onlineAccounts, onlineTimestamp); + + // Validate the rest + for (OnlineAccountData onlineAccount : onlineAccounts) + if (!Crypto.verify(onlineAccount.getPublicKey(), onlineAccount.getSignature(), onlineTimestampBytes)) + return ValidationResult.ONLINE_ACCOUNT_SIGNATURE_INCORRECT; + + // We've validated these, so allow online accounts manager to cache + OnlineAccountsManager.getInstance().addBlocksOnlineAccounts(onlineAccounts, onlineTimestamp); + } // All online accounts valid, so save our list of online accounts for potential later use this.cachedOnlineRewardShares = onlineRewardShares; diff --git a/src/main/java/org/qortal/block/BlockChain.java b/src/main/java/org/qortal/block/BlockChain.java index 44ad4a7f..7c4afcdb 100644 --- a/src/main/java/org/qortal/block/BlockChain.java +++ b/src/main/java/org/qortal/block/BlockChain.java @@ -70,7 +70,8 @@ public class BlockChain { shareBinFix, calcChainWeightTimestamp, transactionV5Timestamp, - transactionV6Timestamp; + transactionV6Timestamp, + aggregateSignatureTimestamp; } // Custom transaction fees @@ -410,6 +411,10 @@ public class BlockChain { return this.featureTriggers.get(FeatureTrigger.transactionV6Timestamp.name()).longValue(); } + public long getAggregateSignatureTimestamp() { + return this.featureTriggers.get(FeatureTrigger.aggregateSignatureTimestamp.name()).longValue(); + } + // More complex getters for aspects that change by height or timestamp public long getRewardAtHeight(int ourHeight) { diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index bd4880c4..f472199e 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -9,6 +9,7 @@ import org.qortal.account.PrivateKeyAccount; import org.qortal.block.Block; import org.qortal.block.BlockChain; import org.qortal.crypto.Crypto; +import org.qortal.crypto.Qortal25519Extras; import org.qortal.data.account.MintingAccountData; import org.qortal.data.account.RewardShareData; import org.qortal.data.network.OnlineAccountData; @@ -204,7 +205,10 @@ public class OnlineAccountsManager { // Verify signature byte[] data = Longs.toByteArray(onlineAccountData.getTimestamp()); - if (!Crypto.verify(rewardSharePublicKey, onlineAccountData.getSignature(), data)) { + boolean isSignatureValid = onlineAccountTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp() + ? Qortal25519Extras.verifyAggregated(rewardSharePublicKey, onlineAccountData.getSignature(), data) + : Crypto.verify(rewardSharePublicKey, onlineAccountData.getSignature(), data); + if (!isSignatureValid) { LOGGER.trace(() -> String.format("Rejecting invalid online account %s", Base58.encode(rewardSharePublicKey))); return false; } @@ -387,14 +391,18 @@ public class OnlineAccountsManager { return; } + final boolean useAggregateCompatibleSignature = onlineAccountsTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp(); + byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); List ourOnlineAccounts = new ArrayList<>(); for (MintingAccountData mintingAccountData : mintingAccounts) { - PrivateKeyAccount mintingAccount = new PrivateKeyAccount(null, mintingAccountData.getPrivateKey()); + byte[] privateKey = mintingAccountData.getPrivateKey(); + byte[] publicKey = Crypto.toPublicKey(privateKey); - byte[] signature = mintingAccount.sign(timestampBytes); - byte[] publicKey = mintingAccount.getPublicKey(); + byte[] signature = useAggregateCompatibleSignature + ? Qortal25519Extras.signForAggregation(privateKey, timestampBytes) + : Crypto.sign(privateKey, timestampBytes); // Our account is online OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); diff --git a/src/main/resources/blockchain.json b/src/main/resources/blockchain.json index c8502d1b..9190cb39 100644 --- a/src/main/resources/blockchain.json +++ b/src/main/resources/blockchain.json @@ -59,7 +59,8 @@ "shareBinFix": 399000, "calcChainWeightTimestamp": 1620579600000, "transactionV5Timestamp": 1642176000000, - "transactionV6Timestamp": 9999999999999 + "transactionV6Timestamp": 9999999999999, + "aggregateSignatureTimestamp": 9999999999999 }, "genesisInfo": { "version": 4, From 0cf32f6c5e3a3e1c79d77747600ad5fbd94ceba2 Mon Sep 17 00:00:00 2001 From: catbref Date: Mon, 16 May 2022 20:09:47 +0100 Subject: [PATCH 12/18] BlockMinter now only acquires repository instance as needed to prevent long HSQLDB rollbacks --- .../org/qortal/controller/BlockMinter.java | 580 +++++++++--------- 1 file changed, 293 insertions(+), 287 deletions(-) diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java index 76b57c44..07a31ad7 100644 --- a/src/main/java/org/qortal/controller/BlockMinter.java +++ b/src/main/java/org/qortal/controller/BlockMinter.java @@ -65,9 +65,8 @@ public class BlockMinter extends Thread { // Lite nodes do not mint return; } - - try (final Repository repository = RepositoryManager.getRepository()) { - if (Settings.getInstance().getWipeUnconfirmedOnStart()) { + if (Settings.getInstance().getWipeUnconfirmedOnStart()) { + try (final Repository repository = RepositoryManager.getRepository()) { // Wipe existing unconfirmed transactions List unconfirmedTransactions = repository.getTransactionRepository().getUnconfirmedTransactions(); @@ -77,30 +76,31 @@ public class BlockMinter extends Thread { } repository.saveChanges(); + } catch (DataException e) { + LOGGER.warn("Repository issue trying to wipe unconfirmed transactions on start-up: {}", e.getMessage()); + // Fall-through to normal behaviour in case we can recover } + } - // Going to need this a lot... - BlockRepository blockRepository = repository.getBlockRepository(); - BlockData previousBlockData = null; + BlockData previousBlockData = null; - // Vars to keep track of blocks that were skipped due to chain weight - byte[] parentSignatureForLastLowWeightBlock = null; - Long timeOfLastLowWeightBlock = null; + // Vars to keep track of blocks that were skipped due to chain weight + byte[] parentSignatureForLastLowWeightBlock = null; + Long timeOfLastLowWeightBlock = null; - List newBlocks = new ArrayList<>(); + List newBlocks = new ArrayList<>(); - // Flags for tracking change in whether minting is possible, - // so we can notify Controller, and further update SysTray, etc. - boolean isMintingPossible = false; - boolean wasMintingPossible = isMintingPossible; - while (running) { - repository.discardChanges(); // Free repository locks, if any + // Flags for tracking change in whether minting is possible, + // so we can notify Controller, and further update SysTray, etc. + boolean isMintingPossible = false; + boolean wasMintingPossible = isMintingPossible; + while (running) { + if (isMintingPossible != wasMintingPossible) + Controller.getInstance().onMintingPossibleChange(isMintingPossible); - if (isMintingPossible != wasMintingPossible) - Controller.getInstance().onMintingPossibleChange(isMintingPossible); - - wasMintingPossible = isMintingPossible; + wasMintingPossible = isMintingPossible; + try { // Sleep for a while Thread.sleep(1000); @@ -118,315 +118,321 @@ public class BlockMinter extends Thread { if (!OnlineAccountsManager.getInstance().hasOnlineAccounts()) continue; - List mintingAccountsData = repository.getAccountRepository().getMintingAccounts(); - // No minting accounts? - if (mintingAccountsData.isEmpty()) - continue; + try (final Repository repository = RepositoryManager.getRepository()) { + // Going to need this a lot... + BlockRepository blockRepository = repository.getBlockRepository(); - // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level - // Note that minting accounts are actually reward-shares in Qortal - Iterator madi = mintingAccountsData.iterator(); - while (madi.hasNext()) { - MintingAccountData mintingAccountData = madi.next(); - - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey()); - if (rewardShareData == null) { - // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts - madi.remove(); - continue; - } - - Account mintingAccount = new Account(repository, rewardShareData.getMinter()); - if (!mintingAccount.canMint()) { - // Minting-account component of reward-share can no longer mint - disregard - madi.remove(); - continue; - } - - // Optional (non-validated) prevention of block submissions below a defined level. - // This is an unvalidated version of Blockchain.minAccountLevelToMint - // and exists only to reduce block candidates by default. - int level = mintingAccount.getEffectiveMintingLevel(); - if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) { - madi.remove(); - continue; - } - } - - // Needs a mutable copy of the unmodifiableList - List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers()); - BlockData lastBlockData = blockRepository.getLastBlock(); - - // Disregard peers that have "misbehaved" recently - peers.removeIf(Controller.hasMisbehaved); - - // Disregard peers that don't have a recent block, but only if we're not in recovery mode. - // In that mode, we want to allow minting on top of older blocks, to recover stalled networks. - if (Synchronizer.getInstance().getRecoveryMode() == false) - peers.removeIf(Controller.hasNoRecentBlock); - - // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from? - if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) - continue; - - // If we are stuck on an invalid block, we should allow an alternative to be minted - boolean recoverInvalidBlock = false; - if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) { - // We've had at least one invalid block - long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived; - long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived; - if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) { - if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) { - // Last valid block was more than 10 mins ago, but we've had an invalid block since then - // Assume that the chain has stalled because there is no alternative valid candidate - // Enter recovery mode to allow alternative, valid candidates to be minted - recoverInvalidBlock = true; - } - } - } - - // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode. - if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp) - if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false) + List mintingAccountsData = repository.getAccountRepository().getMintingAccounts(); + // No minting accounts? + if (mintingAccountsData.isEmpty()) continue; - // There are enough peers with a recent block and our latest block is recent - // so go ahead and mint a block if possible. - isMintingPossible = true; + // Disregard minting accounts that are no longer valid, e.g. by transfer/loss of founder flag or account level + // Note that minting accounts are actually reward-shares in Qortal + Iterator madi = mintingAccountsData.iterator(); + while (madi.hasNext()) { + MintingAccountData mintingAccountData = madi.next(); - // Check blockchain hasn't changed - if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) { - previousBlockData = lastBlockData; - newBlocks.clear(); - - // Reduce log timeout - logTimeout = 10 * 1000L; - - // Last low weight block is no longer valid - parentSignatureForLastLowWeightBlock = null; - } - - // Discard accounts we have already built blocks with - mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey()))); - - // Do we need to build any potential new blocks? - List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList()); - - // We might need to sit the next block out, if one of our minting accounts signed the previous one - final byte[] previousBlockMinter = previousBlockData.getMinterPublicKey(); - final boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter)); - if (mintedLastBlock) { - LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one")); - continue; - } - - if (parentSignatureForLastLowWeightBlock != null) { - // The last iteration found a higher weight block in the network, so sleep for a while - // to allow is to sync the higher weight chain. We are sleeping here rather than when - // detected as we don't want to hold the blockchain lock open. - LOGGER.debug("Sleeping for 10 seconds..."); - Thread.sleep(10 * 1000L); - } - - for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) { - // First block does the AT heavy-lifting - if (newBlocks.isEmpty()) { - Block newBlock = Block.mint(repository, previousBlockData, mintingAccount); - if (newBlock == null) { - // For some reason we can't mint right now - moderatedLog(() -> LOGGER.error("Couldn't build a to-be-minted block")); + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey()); + if (rewardShareData == null) { + // Reward-share doesn't exist - probably cancelled but not yet removed from node's list of minting accounts + madi.remove(); continue; } - newBlocks.add(newBlock); - } else { - // The blocks for other minters require less effort... - Block newBlock = newBlocks.get(0).remint(mintingAccount); - if (newBlock == null) { - // For some reason we can't mint right now - moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block")); + Account mintingAccount = new Account(repository, rewardShareData.getMinter()); + if (!mintingAccount.canMint()) { + // Minting-account component of reward-share can no longer mint - disregard + madi.remove(); continue; } - newBlocks.add(newBlock); + // Optional (non-validated) prevention of block submissions below a defined level. + // This is an unvalidated version of Blockchain.minAccountLevelToMint + // and exists only to reduce block candidates by default. + int level = mintingAccount.getEffectiveMintingLevel(); + if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) { + madi.remove(); + continue; + } } - } - // No potential block candidates? - if (newBlocks.isEmpty()) - continue; + // Needs a mutable copy of the unmodifiableList + List peers = new ArrayList<>(Network.getInstance().getImmutableHandshakedPeers()); + BlockData lastBlockData = blockRepository.getLastBlock(); - // Make sure we're the only thread modifying the blockchain - ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); - if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) { - LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds"); - continue; - } + // Disregard peers that have "misbehaved" recently + peers.removeIf(Controller.hasMisbehaved); - boolean newBlockMinted = false; - Block newBlock = null; + // Disregard peers that don't have a recent block, but only if we're not in recovery mode. + // In that mode, we want to allow minting on top of older blocks, to recover stalled networks. + if (Synchronizer.getInstance().getRecoveryMode() == false) + peers.removeIf(Controller.hasNoRecentBlock); - try { - // Clear repository session state so we have latest view of data - repository.discardChanges(); - - // Now that we have blockchain lock, do final check that chain hasn't changed - BlockData latestBlockData = blockRepository.getLastBlock(); - if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature())) + // Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from? + if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) continue; - List goodBlocks = new ArrayList<>(); - for (Block testBlock : newBlocks) { - // Is new block's timestamp valid yet? - // We do a separate check as some timestamp checks are skipped for testchains - if (testBlock.isTimestampValid() != ValidationResult.OK) - continue; - - testBlock.preProcess(); - - // Is new block valid yet? (Before adding unconfirmed transactions) - ValidationResult result = testBlock.isValid(); - if (result != ValidationResult.OK) { - moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name()))); - - continue; - } - - goodBlocks.add(testBlock); - } - - if (goodBlocks.isEmpty()) - continue; - - // Pick best block - final int parentHeight = previousBlockData.getHeight(); - final byte[] parentBlockSignature = previousBlockData.getSignature(); - - BigInteger bestWeight = null; - - for (int bi = 0; bi < goodBlocks.size(); ++bi) { - BlockData blockData = goodBlocks.get(bi).getBlockData(); - - BlockSummaryData blockSummaryData = new BlockSummaryData(blockData); - int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey()); - blockSummaryData.setMinterLevel(minterLevel); - - BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData); - - if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) { - newBlock = goodBlocks.get(bi); - bestWeight = blockWeight; - } - } - - try { - if (this.higherWeightChainExists(repository, bestWeight)) { - - // Check if the base block has updated since the last time we were here - if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null || - !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) { - // We've switched to a different chain, so reset the timer - timeOfLastLowWeightBlock = NTP.getTime(); + // If we are stuck on an invalid block, we should allow an alternative to be minted + boolean recoverInvalidBlock = false; + if (Synchronizer.getInstance().timeInvalidBlockLastReceived != null) { + // We've had at least one invalid block + long timeSinceLastValidBlock = NTP.getTime() - Synchronizer.getInstance().timeValidBlockLastReceived; + long timeSinceLastInvalidBlock = NTP.getTime() - Synchronizer.getInstance().timeInvalidBlockLastReceived; + if (timeSinceLastValidBlock > INVALID_BLOCK_RECOVERY_TIMEOUT) { + if (timeSinceLastInvalidBlock < INVALID_BLOCK_RECOVERY_TIMEOUT) { + // Last valid block was more than 10 mins ago, but we've had an invalid block since then + // Assume that the chain has stalled because there is no alternative valid candidate + // Enter recovery mode to allow alternative, valid candidates to be minted + recoverInvalidBlock = true; } - parentSignatureForLastLowWeightBlock = previousBlockData.getSignature(); + } + } - // If less than 30 seconds has passed since first detection the higher weight chain, - // we should skip our block submission to give us the opportunity to sync to the better chain - if (NTP.getTime() - timeOfLastLowWeightBlock < 30*1000L) { - LOGGER.debug("Higher weight chain found in peers, so not signing a block this round"); - LOGGER.debug("Time since detected: {}ms", NTP.getTime() - timeOfLastLowWeightBlock); + // If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode. + if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp) + if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false) + continue; + + // There are enough peers with a recent block and our latest block is recent + // so go ahead and mint a block if possible. + isMintingPossible = true; + + // Reattach newBlocks to new repository handle + for (Block newBlock : newBlocks) + newBlock.setRepository(repository); + + // Check blockchain hasn't changed + if (previousBlockData == null || !Arrays.equals(previousBlockData.getSignature(), lastBlockData.getSignature())) { + previousBlockData = lastBlockData; + newBlocks.clear(); + + // Reduce log timeout + logTimeout = 10 * 1000L; + + // Last low weight block is no longer valid + parentSignatureForLastLowWeightBlock = null; + } + + // Discard accounts we have already built blocks with + mintingAccountsData.removeIf(mintingAccountData -> newBlocks.stream().anyMatch(newBlock -> Arrays.equals(newBlock.getBlockData().getMinterPublicKey(), mintingAccountData.getPublicKey()))); + + // Do we need to build any potential new blocks? + List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList()); + + // We might need to sit the next block out, if one of our minting accounts signed the previous one + final byte[] previousBlockMinter = previousBlockData.getMinterPublicKey(); + final boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter)); + if (mintedLastBlock) { + LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one")); + continue; + } + + if (parentSignatureForLastLowWeightBlock != null) { + // The last iteration found a higher weight block in the network, so sleep for a while + // to allow is to sync the higher weight chain. We are sleeping here rather than when + // detected as we don't want to hold the blockchain lock open. + LOGGER.info("Sleeping for 10 seconds..."); + Thread.sleep(10 * 1000L); + } + + for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) { + // First block does the AT heavy-lifting + if (newBlocks.isEmpty()) { + Block newBlock = Block.mint(repository, previousBlockData, mintingAccount); + if (newBlock == null) { + // For some reason we can't mint right now + moderatedLog(() -> LOGGER.error("Couldn't build a to-be-minted block")); continue; } - else { - // More than 30 seconds have passed, so we should submit our block candidate anyway. - LOGGER.debug("More than 30 seconds passed, so proceeding to submit block candidate..."); + + newBlocks.add(newBlock); + } else { + // The blocks for other minters require less effort... + Block newBlock = newBlocks.get(0).remint(mintingAccount); + if (newBlock == null) { + // For some reason we can't mint right now + moderatedLog(() -> LOGGER.error("Couldn't rebuild a to-be-minted block")); + continue; } + + newBlocks.add(newBlock); } - else { - LOGGER.debug("No higher weight chain found in peers"); - } - } catch (DataException e) { - LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway..."); } - // Discard any uncommitted changes as a result of the higher weight chain detection - repository.discardChanges(); + // No potential block candidates? + if (newBlocks.isEmpty()) + continue; - // Clear variables that track low weight blocks - parentSignatureForLastLowWeightBlock = null; - timeOfLastLowWeightBlock = null; - - - // Add unconfirmed transactions - addUnconfirmedTransactions(repository, newBlock); - - // Sign to create block's signature - newBlock.sign(); - - // Is newBlock still valid? - ValidationResult validationResult = newBlock.isValid(); - if (validationResult != ValidationResult.OK) { - // No longer valid? Report and discard - LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name())); - - // Rebuild block candidates, just to be sure - newBlocks.clear(); + // Make sure we're the only thread modifying the blockchain + ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock(); + if (!blockchainLock.tryLock(30, TimeUnit.SECONDS)) { + LOGGER.debug("Couldn't acquire blockchain lock even after waiting 30 seconds"); continue; } - // Add to blockchain - something else will notice and broadcast new block to network + boolean newBlockMinted = false; + Block newBlock = null; + try { - newBlock.process(); + // Clear repository session state so we have latest view of data + repository.discardChanges(); - repository.saveChanges(); + // Now that we have blockchain lock, do final check that chain hasn't changed + BlockData latestBlockData = blockRepository.getLastBlock(); + if (!Arrays.equals(lastBlockData.getSignature(), latestBlockData.getSignature())) + continue; - LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight())); + List goodBlocks = new ArrayList<>(); + for (Block testBlock : newBlocks) { + // Is new block's timestamp valid yet? + // We do a separate check as some timestamp checks are skipped for testchains + if (testBlock.isTimestampValid() != ValidationResult.OK) + continue; - RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey()); + testBlock.preProcess(); - if (rewardShareData != null) { - LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s", - newBlock.getBlockData().getHeight(), - Base58.encode(newBlock.getBlockData().getSignature()), - Base58.encode(newBlock.getParent().getSignature()), - rewardShareData.getMinter(), - rewardShareData.getRecipient())); - } else { - LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s", - newBlock.getBlockData().getHeight(), - Base58.encode(newBlock.getBlockData().getSignature()), - Base58.encode(newBlock.getParent().getSignature()), - newBlock.getMinter().getAddress())); + // Is new block valid yet? (Before adding unconfirmed transactions) + ValidationResult result = testBlock.isValid(); + if (result != ValidationResult.OK) { + moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name()))); + + continue; + } + + goodBlocks.add(testBlock); } - // Notify network after we're released blockchain lock - newBlockMinted = true; + if (goodBlocks.isEmpty()) + continue; - // Notify Controller - repository.discardChanges(); // clear transaction status to prevent deadlocks - Controller.getInstance().onNewBlock(newBlock.getBlockData()); - } catch (DataException e) { - // Unable to process block - report and discard - LOGGER.error("Unable to process newly minted block?", e); - newBlocks.clear(); + // Pick best block + final int parentHeight = previousBlockData.getHeight(); + final byte[] parentBlockSignature = previousBlockData.getSignature(); + + BigInteger bestWeight = null; + + for (int bi = 0; bi < goodBlocks.size(); ++bi) { + BlockData blockData = goodBlocks.get(bi).getBlockData(); + + BlockSummaryData blockSummaryData = new BlockSummaryData(blockData); + int minterLevel = Account.getRewardShareEffectiveMintingLevel(repository, blockData.getMinterPublicKey()); + blockSummaryData.setMinterLevel(minterLevel); + + BigInteger blockWeight = Block.calcBlockWeight(parentHeight, parentBlockSignature, blockSummaryData); + + if (bestWeight == null || blockWeight.compareTo(bestWeight) < 0) { + newBlock = goodBlocks.get(bi); + bestWeight = blockWeight; + } + } + + try { + if (this.higherWeightChainExists(repository, bestWeight)) { + + // Check if the base block has updated since the last time we were here + if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null || + !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) { + // We've switched to a different chain, so reset the timer + timeOfLastLowWeightBlock = NTP.getTime(); + } + parentSignatureForLastLowWeightBlock = previousBlockData.getSignature(); + + // If less than 30 seconds has passed since first detection the higher weight chain, + // we should skip our block submission to give us the opportunity to sync to the better chain + if (NTP.getTime() - timeOfLastLowWeightBlock < 30 * 1000L) { + LOGGER.info("Higher weight chain found in peers, so not signing a block this round"); + LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock); + continue; + } else { + // More than 30 seconds have passed, so we should submit our block candidate anyway. + LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate..."); + } + } else { + LOGGER.debug("No higher weight chain found in peers"); + } + } catch (DataException e) { + LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway..."); + } + + // Discard any uncommitted changes as a result of the higher weight chain detection + repository.discardChanges(); + + // Clear variables that track low weight blocks + parentSignatureForLastLowWeightBlock = null; + timeOfLastLowWeightBlock = null; + + // Add unconfirmed transactions + addUnconfirmedTransactions(repository, newBlock); + + // Sign to create block's signature + newBlock.sign(); + + // Is newBlock still valid? + ValidationResult validationResult = newBlock.isValid(); + if (validationResult != ValidationResult.OK) { + // No longer valid? Report and discard + LOGGER.error(String.format("To-be-minted block now invalid '%s' after adding unconfirmed transactions?", validationResult.name())); + + // Rebuild block candidates, just to be sure + newBlocks.clear(); + continue; + } + + // Add to blockchain - something else will notice and broadcast new block to network + try { + newBlock.process(); + + repository.saveChanges(); + + LOGGER.info(String.format("Minted new block: %d", newBlock.getBlockData().getHeight())); + + RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(newBlock.getBlockData().getMinterPublicKey()); + + if (rewardShareData != null) { + LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s on behalf of %s", + newBlock.getBlockData().getHeight(), + Base58.encode(newBlock.getBlockData().getSignature()), + Base58.encode(newBlock.getParent().getSignature()), + rewardShareData.getMinter(), + rewardShareData.getRecipient())); + } else { + LOGGER.info(String.format("Minted block %d, sig %.8s, parent sig: %.8s by %s", + newBlock.getBlockData().getHeight(), + Base58.encode(newBlock.getBlockData().getSignature()), + Base58.encode(newBlock.getParent().getSignature()), + newBlock.getMinter().getAddress())); + } + + // Notify network after we're released blockchain lock + newBlockMinted = true; + + // Notify Controller + repository.discardChanges(); // clear transaction status to prevent deadlocks + Controller.getInstance().onNewBlock(newBlock.getBlockData()); + } catch (DataException e) { + // Unable to process block - report and discard + LOGGER.error("Unable to process newly minted block?", e); + newBlocks.clear(); + } + } finally { + blockchainLock.unlock(); } - } finally { - blockchainLock.unlock(); - } - if (newBlockMinted) { - // Broadcast our new chain to network - BlockData newBlockData = newBlock.getBlockData(); + if (newBlockMinted) { + // Broadcast our new chain to network + BlockData newBlockData = newBlock.getBlockData(); - Network network = Network.getInstance(); - network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData)); + Network network = Network.getInstance(); + network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newBlockData)); + } + } catch (DataException e) { + LOGGER.warn("Repository issue while running block minter", e); } + } catch (InterruptedException e) { + // We've been interrupted - time to exit + return; } - } catch (DataException e) { - LOGGER.warn("Repository issue while running block minter", e); - } catch (InterruptedException e) { - // We've been interrupted - time to exit - return; } } From 24d04fe92842586030d1dadeba88ad9c0fb37ed9 Mon Sep 17 00:00:00 2001 From: catbref Date: Tue, 17 May 2022 21:53:22 +0100 Subject: [PATCH 13/18] Block.mint() always uses latest timestamped online accounts --- src/main/java/org/qortal/block/Block.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/qortal/block/Block.java b/src/main/java/org/qortal/block/Block.java index f5e2cb6d..ddfe247a 100644 --- a/src/main/java/org/qortal/block/Block.java +++ b/src/main/java/org/qortal/block/Block.java @@ -10,6 +10,7 @@ import java.math.BigInteger; import java.math.RoundingMode; import java.nio.charset.StandardCharsets; import java.text.DecimalFormat; +import java.text.MessageFormat; import java.text.NumberFormat; import java.util.*; import java.util.stream.Collectors; @@ -355,7 +356,6 @@ public class Block { } long timestamp = calcTimestamp(parentBlockData, minter.getPublicKey(), minterLevel); - long onlineAccountsTimestamp = OnlineAccountsManager.getCurrentOnlineAccountTimestamp(); // Fetch our list of online accounts From a4e2aedde18a0010d0d91ea2d5b01bb5ee1001ca Mon Sep 17 00:00:00 2001 From: catbref Date: Tue, 17 May 2022 21:54:31 +0100 Subject: [PATCH 14/18] Remove debug-hindering "final" modifier from effectively final locals --- src/main/java/org/qortal/controller/BlockMinter.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java index 07a31ad7..b8831464 100644 --- a/src/main/java/org/qortal/controller/BlockMinter.java +++ b/src/main/java/org/qortal/controller/BlockMinter.java @@ -221,8 +221,8 @@ public class BlockMinter extends Thread { List newBlocksMintingAccounts = mintingAccountsData.stream().map(accountData -> new PrivateKeyAccount(repository, accountData.getPrivateKey())).collect(Collectors.toList()); // We might need to sit the next block out, if one of our minting accounts signed the previous one - final byte[] previousBlockMinter = previousBlockData.getMinterPublicKey(); - final boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter)); + byte[] previousBlockMinter = previousBlockData.getMinterPublicKey(); + boolean mintedLastBlock = mintingAccountsData.stream().anyMatch(mintingAccount -> Arrays.equals(mintingAccount.getPublicKey(), previousBlockMinter)); if (mintedLastBlock) { LOGGER.trace(String.format("One of our keys signed the last block, so we won't sign the next one")); continue; From 8f58da4f52f2befff73dddaac53ab1996db4d178 Mon Sep 17 00:00:00 2001 From: catbref Date: Tue, 17 May 2022 21:55:49 +0100 Subject: [PATCH 15/18] OnlineAccountsManager: Bump v3 min peer version from 3.2.203 to 3.3.203 No need for toOnlineAccountTimestamp(long) as we only ever use getCurrentOnlineAccountTimestamp(). Latter now returns Long and does the call to NTP.getTime() on behalf of caller, removing duplicated NTP.getTime() calls and null checks in multiple callers. Add aggregate-signature feature-trigger timestamp threshold checks where needed, near sign() and verify() calls. Improve logging - but some logging will need to be removed / reduced before merging. --- .../org/qortal/controller/OnlineAccountsManager.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/qortal/controller/OnlineAccountsManager.java b/src/main/java/org/qortal/controller/OnlineAccountsManager.java index f472199e..648d2d81 100644 --- a/src/main/java/org/qortal/controller/OnlineAccountsManager.java +++ b/src/main/java/org/qortal/controller/OnlineAccountsManager.java @@ -128,12 +128,15 @@ public class OnlineAccountsManager { return; byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp); + final boolean useAggregateCompatibleSignature = onlineAccountsTimestamp >= BlockChain.getInstance().getAggregateSignatureTimestamp(); Set replacementAccounts = new HashSet<>(); for (PrivateKeyAccount onlineAccount : onlineAccounts) { // Check mintingAccount is actually reward-share? - byte[] signature = onlineAccount.sign(timestampBytes); + byte[] signature = useAggregateCompatibleSignature + ? Qortal25519Extras.signForAggregation(onlineAccount.getPrivateKey(), timestampBytes) + : onlineAccount.sign(timestampBytes); byte[] publicKey = onlineAccount.getPublicKey(); OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey); @@ -275,6 +278,8 @@ public class OnlineAccountsManager { } } + LOGGER.info(String.format("we have online accounts for timestamps: %s", String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", "))))); + return true; } @@ -446,6 +451,8 @@ public class OnlineAccountsManager { */ // Block::mint() - only wants online accounts with (online) timestamp that matches block's (online) timestamp so they can be added to new block public List getOnlineAccounts(long onlineTimestamp) { + LOGGER.info(String.format("caller's timestamp: %d, our timestamps: %s", onlineTimestamp, String.join(", ", this.currentOnlineAccounts.keySet().stream().map(l -> Long.toString(l)).collect(Collectors.joining(", "))))); + return new ArrayList<>(Set.copyOf(this.currentOnlineAccounts.getOrDefault(onlineTimestamp, Collections.emptySet()))); } From 8d8e58a905925bd7f25707c0bc38d0aae64e1f67 Mon Sep 17 00:00:00 2001 From: catbref Date: Tue, 17 May 2022 22:02:22 +0100 Subject: [PATCH 16/18] Network$NetworkProcessor now has its own LOGGER --- src/main/java/org/qortal/network/Network.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/java/org/qortal/network/Network.java b/src/main/java/org/qortal/network/Network.java index 6bc58bb4..57073e99 100644 --- a/src/main/java/org/qortal/network/Network.java +++ b/src/main/java/org/qortal/network/Network.java @@ -469,6 +469,8 @@ public class Network { class NetworkProcessor extends ExecuteProduceConsume { + private final Logger LOGGER = LogManager.getLogger(NetworkProcessor.class); + private final AtomicLong nextConnectTaskTimestamp = new AtomicLong(0L); // ms - try first connect once NTP syncs private final AtomicLong nextBroadcastTimestamp = new AtomicLong(0L); // ms - try first broadcast once NTP syncs From 4eb58d359130f74465b942a5319eb64c6acd8926 Mon Sep 17 00:00:00 2001 From: catbref Date: Tue, 17 May 2022 22:04:14 +0100 Subject: [PATCH 17/18] BlockTimestampTests to show results from changing blockTimingsByHeight --- .../test/minting/BlockTimestampTests.java | 63 ++++++++++++++ .../test-chain-v2-block-timestamps.json | 85 +++++++++++++++++++ src/test/resources/test-chain-v2.json | 3 +- .../test-settings-v2-block-timestamps.json | 19 +++++ 4 files changed, 169 insertions(+), 1 deletion(-) create mode 100644 src/test/java/org/qortal/test/minting/BlockTimestampTests.java create mode 100644 src/test/resources/test-chain-v2-block-timestamps.json create mode 100644 src/test/resources/test-settings-v2-block-timestamps.json diff --git a/src/test/java/org/qortal/test/minting/BlockTimestampTests.java b/src/test/java/org/qortal/test/minting/BlockTimestampTests.java new file mode 100644 index 00000000..0f91408f --- /dev/null +++ b/src/test/java/org/qortal/test/minting/BlockTimestampTests.java @@ -0,0 +1,63 @@ +package org.qortal.test.minting; + +import org.junit.Before; +import org.junit.Test; +import org.qortal.block.Block; +import org.qortal.data.block.BlockData; +import org.qortal.repository.DataException; +import org.qortal.repository.Repository; +import org.qortal.repository.RepositoryManager; +import org.qortal.test.common.BlockUtils; +import org.qortal.test.common.Common; +import org.qortal.transform.Transformer; +import org.qortal.utils.NTP; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +public class BlockTimestampTests extends Common { + + private static class BlockTimestampDataPoint { + public byte[] minterPublicKey; + public int minterAccountLevel; + public long blockTimestamp; + } + + private static final Random RANDOM = new Random(); + + @Before + public void beforeTest() throws DataException { + Common.useSettings("test-settings-v2-block-timestamps.json"); + NTP.setFixedOffset(0L); + } + + @Test + public void testTimestamps() throws DataException { + try (final Repository repository = RepositoryManager.getRepository()) { + Block parentBlock = BlockUtils.mintBlock(repository); + BlockData parentBlockData = parentBlock.getBlockData(); + + // Generate lots of test minters + List dataPoints = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + BlockTimestampDataPoint dataPoint = new BlockTimestampDataPoint(); + + dataPoint.minterPublicKey = new byte[Transformer.PUBLIC_KEY_LENGTH]; + RANDOM.nextBytes(dataPoint.minterPublicKey); + + dataPoint.minterAccountLevel = RANDOM.nextInt(5) + 5; + + dataPoint.blockTimestamp = Block.calcTimestamp(parentBlockData, dataPoint.minterPublicKey, dataPoint.minterAccountLevel); + + System.out.printf("[%d] level %d, blockTimestamp %d - parentTimestamp %d = %d%n", + i, + dataPoint.minterAccountLevel, + dataPoint.blockTimestamp, + parentBlockData.getTimestamp(), + dataPoint.blockTimestamp - parentBlockData.getTimestamp() + ); + } + } + } +} diff --git a/src/test/resources/test-chain-v2-block-timestamps.json b/src/test/resources/test-chain-v2-block-timestamps.json new file mode 100644 index 00000000..072283f0 --- /dev/null +++ b/src/test/resources/test-chain-v2-block-timestamps.json @@ -0,0 +1,85 @@ +{ + "isTestChain": true, + "blockTimestampMargin": 500, + "transactionExpiryPeriod": 86400000, + "maxBlockSize": 2097152, + "maxBytesPerUnitFee": 1024, + "unitFee": "0.1", + "nameRegistrationUnitFees": [ + { "timestamp": 1645372800000, "fee": "5" } + ], + "requireGroupForApproval": false, + "minAccountLevelToRewardShare": 5, + "maxRewardSharesPerMintingAccount": 20, + "founderEffectiveMintingLevel": 10, + "onlineAccountSignaturesMinLifetime": 3600000, + "onlineAccountSignaturesMaxLifetime": 86400000, + "rewardsByHeight": [ + { "height": 1, "reward": 100 }, + { "height": 11, "reward": 10 }, + { "height": 21, "reward": 1 } + ], + "sharesByLevel": [ + { "levels": [ 1, 2 ], "share": 0.05 }, + { "levels": [ 3, 4 ], "share": 0.10 }, + { "levels": [ 5, 6 ], "share": 0.15 }, + { "levels": [ 7, 8 ], "share": 0.20 }, + { "levels": [ 9, 10 ], "share": 0.25 } + ], + "qoraHoldersShare": 0.20, + "qoraPerQortReward": 250, + "blocksNeededByLevel": [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 ], + "blockTimingsByHeight": [ + { "height": 1, "target": 60000, "deviation": 30000, "power": 0.2 }, + { "height": 2, "target": 70000, "deviation": 10000, "power": 0.8 } + ], + "ciyamAtSettings": { + "feePerStep": "0.0001", + "maxStepsPerRound": 500, + "stepsPerFunctionCall": 10, + "minutesPerBlock": 1 + }, + "featureTriggers": { + "messageHeight": 0, + "atHeight": 0, + "assetsTimestamp": 0, + "votingTimestamp": 0, + "arbitraryTimestamp": 0, + "powfixTimestamp": 0, + "qortalTimestamp": 0, + "newAssetPricingTimestamp": 0, + "groupApprovalTimestamp": 0, + "atFindNextTransactionFix": 0, + "newBlockSigHeight": 999999, + "shareBinFix": 999999, + "calcChainWeightTimestamp": 0, + "transactionV5Timestamp": 0, + "transactionV6Timestamp": 9999999999999, + "aggregateSignatureTimestamp": 9999999999999 + }, + "genesisInfo": { + "version": 4, + "timestamp": 0, + "transactions": [ + { "type": "ISSUE_ASSET", "assetName": "QORT", "description": "QORT native coin", "data": "", "quantity": 0, "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "assetName": "Legacy-QORA", "description": "Representative legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true }, + { "type": "ISSUE_ASSET", "assetName": "QORT-from-QORA", "description": "QORT gained from holding legacy QORA", "quantity": 0, "isDivisible": true, "data": "{}", "isUnspendable": true }, + + { "type": "GENESIS", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "amount": "1000000000" }, + { "type": "GENESIS", "recipient": "QixPbJUwsaHsVEofJdozU9zgVqkK6aYhrK", "amount": "1000000" }, + { "type": "GENESIS", "recipient": "QaUpHNhT3Ygx6avRiKobuLdusppR5biXjL", "amount": "1000000" }, + { "type": "GENESIS", "recipient": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "amount": "1000000" }, + + { "type": "CREATE_GROUP", "creatorPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "groupName": "dev-group", "description": "developer group", "isOpen": false, "approvalThreshold": "PCT100", "minimumBlockDelay": 0, "maximumBlockDelay": 1440 }, + + { "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "TEST", "description": "test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "issuerPublicKey": "C6wuddsBV3HzRrXUtezE7P5MoRXp5m3mEDokRDGZB6ry", "assetName": "OTHER", "description": "other test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + { "type": "ISSUE_ASSET", "issuerPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "assetName": "GOLD", "description": "gold test asset", "data": "", "quantity": "1000000", "isDivisible": true, "fee": 0 }, + + { "type": "ACCOUNT_FLAGS", "target": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "andMask": -1, "orMask": 1, "xorMask": 0 }, + { "type": "REWARD_SHARE", "minterPublicKey": "2tiMr5LTpaWCgbRvkPK8TFd7k63DyHJMMFFsz9uBf1ZP", "recipient": "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v", "rewardSharePublicKey": "7PpfnvLSG7y4HPh8hE7KoqAjLCkv7Ui6xw4mKAkbZtox", "sharePercent": "100" }, + + { "type": "ACCOUNT_LEVEL", "target": "Qci5m9k4rcwe4ruKrZZQKka4FzUUMut3er", "level": 5 } + ] + } +} diff --git a/src/test/resources/test-chain-v2.json b/src/test/resources/test-chain-v2.json index f308712d..c1677da8 100644 --- a/src/test/resources/test-chain-v2.json +++ b/src/test/resources/test-chain-v2.json @@ -53,7 +53,8 @@ "shareBinFix": 999999, "calcChainWeightTimestamp": 0, "transactionV5Timestamp": 0, - "transactionV6Timestamp": 0 + "transactionV6Timestamp": 0, + "aggregateSignatureTimestamp": 2652560000000 }, "genesisInfo": { "version": 4, diff --git a/src/test/resources/test-settings-v2-block-timestamps.json b/src/test/resources/test-settings-v2-block-timestamps.json new file mode 100644 index 00000000..dbbbebbe --- /dev/null +++ b/src/test/resources/test-settings-v2-block-timestamps.json @@ -0,0 +1,19 @@ +{ + "repositoryPath": "testdb", + "bitcoinNet": "TEST3", + "litecoinNet": "TEST3", + "restrictedApi": false, + "blockchainConfig": "src/test/resources/test-chain-v2-block-timestamps.json", + "exportPath": "qortal-backup-test", + "bootstrap": false, + "wipeUnconfirmedOnStart": false, + "testNtpOffset": 0, + "minPeers": 0, + "pruneBlockLimit": 100, + "bootstrapFilenamePrefix": "test-", + "dataPath": "data-test", + "tempDataPath": "data-test/_temp", + "listsPath": "lists-test", + "storagePolicy": "FOLLOWED_OR_VIEWED", + "maxStorageCapacity": 104857600 +} From 431cbf01af5d52e5ff6a723bb6b1be87cf824b03 Mon Sep 17 00:00:00 2001 From: catbref Date: Thu, 16 Jun 2022 17:47:08 +0100 Subject: [PATCH 18/18] BlockMinter will discard block candidates that turn out to be invalid just prior to adding transactions, to be potentially reminted in the next pass --- .../org/qortal/controller/BlockMinter.java | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java index b8831464..c77bc579 100644 --- a/src/main/java/org/qortal/controller/BlockMinter.java +++ b/src/main/java/org/qortal/controller/BlockMinter.java @@ -284,7 +284,12 @@ public class BlockMinter extends Thread { continue; List goodBlocks = new ArrayList<>(); - for (Block testBlock : newBlocks) { + boolean wasInvalidBlockDiscarded = false; + Iterator newBlocksIterator = newBlocks.iterator(); + + while (newBlocksIterator.hasNext()) { + Block testBlock = newBlocksIterator.next(); + // Is new block's timestamp valid yet? // We do a separate check as some timestamp checks are skipped for testchains if (testBlock.isTimestampValid() != ValidationResult.OK) @@ -297,13 +302,21 @@ public class BlockMinter extends Thread { if (result != ValidationResult.OK) { moderatedLog(() -> LOGGER.error(String.format("To-be-minted block invalid '%s' before adding transactions?", result.name()))); - continue; + newBlocksIterator.remove(); + wasInvalidBlockDiscarded = true; + /* + * Bail out fast so that we loop around from the top again. + * This gives BlockMinter the possibility to remint this candidate block using another block from newBlocks, + * via the Blocks.remint() method, which avoids having to re-process Block ATs all over again. + * Particularly useful if some aspect of Blocks changes due a timestamp-based feature-trigger (see BlockChain class). + */ + break; } goodBlocks.add(testBlock); } - if (goodBlocks.isEmpty()) + if (wasInvalidBlockDiscarded || goodBlocks.isEmpty()) continue; // Pick best block