Compare commits

...

41 Commits

Author SHA1 Message Date
Qortal-Auto-Update
b7671d11b6 Bump version to 5.0.4 2025-08-13 12:19:08 -07:00
crowetic
a78838c82b Merge pull request #270 from kennycud/master
QDN, Pirate Chain, Message Scheduling
2025-08-13 12:10:13 -07:00
kennycud
a4c449235e schedule Pirate Chain connections to ensure the connection is ready when needed 2025-08-10 13:17:25 -07:00
kennycud
6808b11d08 Merge remote-tracking branch 'origin/master' 2025-08-09 18:43:24 -07:00
kennycud
78c5f17b41 establish connections for cases where they are often not established, fixed problems with transaction collecting 2025-08-09 18:42:15 -07:00
kennycud
0b427529b6 Merge pull request #18 from Philreact/master-19
fix when chunk already exists and shuffle
2025-08-02 05:38:48 -07:00
663894182d keep first chunk hash unshuffled 2025-08-02 10:33:09 +03:00
b1d5ec6b97 fixed handler when chunk already exists 2025-08-02 10:32:38 +03:00
kennycud
b34a1399d7 Merge pull request #17 from Philreact/master-19
delete arbitraryDataFile chunk if no data
2025-08-01 11:13:16 -07:00
e079dff032 remove delete 2025-08-01 19:22:22 +03:00
29a899b0d6 delete arbitraryDataFile chunk if no data 2025-08-01 10:53:30 +03:00
kennycud
c8c2798f17 scheduling unconfirmed transaction messages to reduce pressure on the database 2025-07-31 09:24:36 -07:00
kennycud
7864fc3776 reduce logging spam 2025-07-31 09:23:07 -07:00
Qortal-Auto-Update
0e8bdbc578 Bump version to 5.0.3 2025-07-30 12:33:57 -07:00
crowetic
2f677cc4d6 Merge pull request #265 from crowetic/master
updated primary community node list.
2025-07-29 18:28:31 -07:00
crowetic
a0b0f374fe Merge pull request #267 from kennycud/master
The latest QDN optimizations
2025-07-29 18:28:02 -07:00
kennycud
00cdc223bb logging all exceptions for synchronizer now 2025-07-29 06:12:49 -07:00
kennycud
f03c78352c another null pointer guard 2025-07-29 06:11:21 -07:00
kennycud
845be3e0fc null pointer protection 2025-07-28 20:54:06 -07:00
98ea0fc96e updated primary community node list. 2025-07-28 12:57:34 -07:00
kennycud
5ec1201880 invalidate percentage when total chunk count is less that local chunk count 2025-07-28 12:47:22 -07:00
kennycud
3c4d68b3ff Merge pull request #16 from Philreact/bugfix/validate-incoming-chunks
validate incoming chunks
2025-07-25 14:14:00 -07:00
b4b845fcef validate incoming chunks 2025-07-25 17:30:34 +03:00
kennycud
769cc8b26e shuffle hashes requested, so requesting peers don't all request the same hashes at the same time 2025-07-23 16:29:47 -07:00
kennycud
41a747e34f delay database writes to the shutdown phase since the database is only read on startup 2025-07-23 16:28:06 -07:00
kennycud
7a86496532 removed logging spam 2025-07-23 16:26:21 -07:00
kennycud
e6c3d5cfc6 reentries for discarded response infos, originally done by Phil 2025-07-23 16:25:27 -07:00
crowetic
50bfe8cbbf Merge pull request #264 from crowetic/master
Merge revert-to-5.0.2 branch, which is a hard-reset back to 5.0.2 rel…
2025-07-23 14:58:18 -07:00
4b4b466587 Merge revert-to-5.0.2 branch, which is a hard-reset back to 5.0.2 release 2025-07-23 14:31:37 -07:00
Ice
6f628be053 Update pom.xml - Deps
Corrections for NTP slipage at start up
2025-07-20 03:18:52 -04:00
Ice
eb07c45955 Merge pull request #255 from IceBurst/master
* Abstraction of AltCoinJ 
* Abstraction of CIYAM
* Update to BouncyCastle
2025-07-13 14:08:15 -04:00
Ice
8bea11bc52 Merge branch 'master' into master 2025-07-13 14:06:11 -04:00
Ice
95e12395ae Merge pull request #1 from IceBurst/Abstract-and-Update-Deps
Abstract and update deps
2025-06-11 03:15:34 -04:00
Ice
47e5c473b3 Merge branch 'master' into Abstract-and-Update-Deps 2025-06-11 03:15:22 -04:00
MergeMerc
30c5136c44 Add Logging for failing to get a Repository Connection for Non-Required/Non-Blocking Tasks 2025-06-09 13:34:05 -04:00
Ice
618945620d Abstract CIYAM.AT out of Repo 2025-04-29 07:13:34 -04:00
Ice
b6d3e407c8 Updates to Dependencies - Test Improvements 2025-04-28 07:25:58 -04:00
Ice
2a97fba108 Merge remote-tracking branch 'origin/IceBurst-Unit-Tests-Updates' into Abstract-and-Update-Deps 2025-04-24 03:45:38 -04:00
Ice
2e7cd93716 Delete .github/workflows/pr-testomg 2025-04-16 15:07:52 -04:00
Ice
2cf0aeac22 Update pr-testing.yml 2025-04-16 14:30:10 -04:00
Ice
cc4056047e Create pr-testomg 2025-04-15 15:45:00 -04:00
15 changed files with 276 additions and 202 deletions

View File

@@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.qortal</groupId>
<artifactId>qortal</artifactId>
<version>5.0.2</version>
<version>5.0.4</version>
<packaging>jar</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

View File

@@ -27,8 +27,6 @@ public class UnsignedFeesSocket extends ApiWebSocket implements Listener {
@Override
public void configure(WebSocketServletFactory factory) {
LOGGER.info("configure");
factory.register(UnsignedFeesSocket.class);
EventBus.INSTANCE.addListener(this);
@@ -65,7 +63,6 @@ public class UnsignedFeesSocket extends ApiWebSocket implements Listener {
@OnWebSocketMessage
public void onWebSocketMessage(Session session, String message) {
LOGGER.info("onWebSocketMessage: message = " + message);
}
private void sendUnsignedFeeEvent(Session session, UnsignedFeeEvent unsignedFeeEvent) {

View File

@@ -177,7 +177,7 @@ public class ArbitraryDataFile {
File file = path.toFile();
if (file.exists()) {
try {
byte[] digest = Crypto.digest(file);
byte[] digest = Crypto.digestFileStream(file);
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
// Copy file to data directory if needed
@@ -352,7 +352,7 @@ public class ArbitraryDataFile {
return this.chunks.size();
}
public boolean join() {
public boolean join() {
// Ensure we have chunks
if (this.chunks != null && !this.chunks.isEmpty()) {
@@ -373,7 +373,7 @@ public class ArbitraryDataFile {
for (ArbitraryDataFileChunk chunk : this.chunks) {
File sourceFile = chunk.filePath.toFile();
BufferedInputStream in = new BufferedInputStream(new FileInputStream(sourceFile));
byte[] buffer = new byte[2048];
byte[] buffer = new byte[8192];
int inSize;
while ((inSize = in.read(buffer)) != -1) {
out.write(buffer, 0, inSize);
@@ -398,6 +398,8 @@ public class ArbitraryDataFile {
return false;
}
public boolean delete() {
// Delete the complete file
// ... but only if it's inside the Qortal data or temp directory
@@ -667,6 +669,9 @@ public class ArbitraryDataFile {
}
}
public boolean containsChunk(byte[] hash) {
for (ArbitraryDataFileChunk chunk : this.chunks) {
if (Arrays.equals(hash, chunk.getHash())) {
@@ -781,18 +786,17 @@ public class ArbitraryDataFile {
return this.filePath;
}
public byte[] digest() {
File file = this.getFile();
if (file != null && file.exists()) {
try {
return Crypto.digest(file);
} catch (IOException e) {
LOGGER.error("Couldn't compute digest for ArbitraryDataFile");
}
public byte[] digest() {
File file = this.getFile();
if (file != null && file.exists()) {
try {
return Crypto.digestFileStream(file);
} catch (IOException e) {
LOGGER.error("Couldn't compute digest for ArbitraryDataFile");
}
return null;
}
return null;
}
public String digest58() {
if (this.digest() != null) {

View File

@@ -437,16 +437,24 @@ public class ArbitraryDataReader {
throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile));
}
// Ensure the complete hash matches the joined chunks
if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) {
// Delete the invalid file
LOGGER.info("Deleting invalid file: path = " + arbitraryDataFile.getFilePath());
if( arbitraryDataFile.delete() ) {
LOGGER.info("Deleted invalid file successfully: path = " + arbitraryDataFile.getFilePath());
}
else {
LOGGER.warn("Could not delete invalid file: path = " + arbitraryDataFile.getFilePath());
}
if (!Arrays.equals(arbitraryDataFile.digest(), transactionData.getData())) {
// Delete the invalid file
LOGGER.info("Deleting invalid file: path = {}", arbitraryDataFile.getFilePath());
if (arbitraryDataFile.delete()) {
LOGGER.info("Deleted invalid file successfully: path = {}", arbitraryDataFile.getFilePath());
} else {
LOGGER.warn("Could not delete invalid file: path = {}", arbitraryDataFile.getFilePath());
}
// Also delete its chunks
if (arbitraryDataFile.deleteAllChunks()) {
LOGGER.info("Deleted all chunks associated with invalid file: {}", arbitraryDataFile.getFilePath());
} else {
LOGGER.warn("Failed to delete one or more chunks for invalid file: {}", arbitraryDataFile.getFilePath());
}
throw new DataException("Unable to validate complete file hash");
}

View File

@@ -157,6 +157,8 @@ public class Synchronizer extends Thread {
// Clear interrupted flag so we can shutdown trim threads
Thread.interrupted();
// Fall-through to exit
} catch (Exception e) {
LOGGER.error(e.getMessage(), e);
}
}

View File

@@ -58,6 +58,7 @@ public class TransactionImporter extends Thread {
public TransactionImporter() {
signatureMessageScheduler.scheduleAtFixedRate(this::processNetworkTransactionSignaturesMessage, 60, 1, TimeUnit.SECONDS);
getTransactionMessageScheduler.scheduleAtFixedRate(this::processNetworkGetTransactionMessages, 60, 1, TimeUnit.SECONDS);
getUnconfirmedTransactionsMessageScheduler.scheduleAtFixedRate(this::processNetworkGetUnconfirmedTransactionsMessages, 60, 1, TimeUnit.SECONDS);
}
public static synchronized TransactionImporter getInstance() {
@@ -481,20 +482,48 @@ public class TransactionImporter extends Thread {
}
}
// List to collect messages
private final List<PeerMessage> getUnconfirmedTransactionsMessageList = new ArrayList<>();
// Lock to synchronize access to the list
private final Object getUnconfirmedTransactionsMessageLock = new Object();
// Scheduled executor service to process messages every second
private final ScheduledExecutorService getUnconfirmedTransactionsMessageScheduler = Executors.newScheduledThreadPool(1);
public void onNetworkGetUnconfirmedTransactionsMessage(Peer peer, Message message) {
try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = Collections.emptyList();
synchronized (getUnconfirmedTransactionsMessageLock) {
getUnconfirmedTransactionsMessageList.add(new PeerMessage(peer, message));
}
}
// If we're NOT up-to-date then don't send out unconfirmed transactions
// as it's possible they are already included in a later block that we don't have.
if (Controller.getInstance().isUpToDate())
private void processNetworkGetUnconfirmedTransactionsMessages() {
List<PeerMessage> messagesToProcess;
synchronized (getUnconfirmedTransactionsMessageLock) {
messagesToProcess = new ArrayList<>(getUnconfirmedTransactionsMessageList);
getUnconfirmedTransactionsMessageList.clear();
}
if( messagesToProcess.isEmpty() ) return;
List<byte[]> signatures = Collections.emptyList();
// If we're NOT up-to-date then don't send out unconfirmed transactions
// as it's possible they are already included in a later block that we don't have.
if (Controller.getInstance().isUpToDate()) {
try (final Repository repository = RepositoryManager.getRepository()) {
signatures = repository.getTransactionRepository().getUnconfirmedTransactionSignatures();
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while sending unconfirmed transaction signatures to peers"), e);
}
}
Message transactionSignaturesMessage = new TransactionSignaturesMessage(signatures);
if (!peer.sendMessage(transactionSignaturesMessage))
peer.disconnect("failed to send unconfirmed transaction signatures");
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while sending unconfirmed transaction signatures to peer %s", peer), e);
Message transactionSignaturesMessage = new TransactionSignaturesMessage(signatures);
for( PeerMessage messageToProcess : messagesToProcess ) {
if (!messageToProcess.getPeer().sendMessage(transactionSignaturesMessage))
messageToProcess.getPeer().disconnect("failed to send unconfirmed transaction signatures");
}
}

View File

@@ -772,6 +772,9 @@ public class ArbitraryDataFileListManager {
String ourAddress = Network.getInstance().getOurExternalIpAddressAndPort();
ArbitraryDataFileListMessage arbitraryDataFileListMessage;
Collections.shuffle(hashes.subList(1, hashes.size()));
// Remove optional parameters if the requesting peer doesn't support it yet
// A message with less statistical data is better than no message at all
if (!peer.isAtLeastVersion(MIN_PEER_VERSION_FOR_FILE_LIST_STATS)) {

View File

@@ -32,6 +32,8 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.qortal.crypto.Crypto;
public class ArbitraryDataFileManager extends Thread {
public static final int SEND_TIMEOUT_MS = 500;
@@ -129,7 +131,7 @@ public class ArbitraryDataFileManager extends Thread {
public boolean fetchArbitraryDataFiles(Peer peer,
byte[] signature,
ArbitraryTransactionData arbitraryTransactionData,
List<byte[]> hashes) throws DataException {
List<byte[]> hashes, ArbitraryFileListResponseInfo responseInfo) throws DataException {
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromTransactionData(arbitraryTransactionData);
@@ -161,6 +163,8 @@ public class ArbitraryDataFileManager extends Thread {
}
else {
LOGGER.trace("Already requesting data file {} for signature {} from peer {}", arbitraryDataFile, Base58.encode(signature), peer);
this.addResponse(responseInfo);
}
}
}
@@ -247,13 +251,27 @@ public class ArbitraryDataFileManager extends Thread {
ArbitraryDataFileMessage peersArbitraryDataFileMessage = (ArbitraryDataFileMessage) response;
arbitraryDataFile = peersArbitraryDataFileMessage.getArbitraryDataFile();
byte[] fileBytes = arbitraryDataFile.getBytes();
if (fileBytes == null || fileBytes.length == 0) {
LOGGER.debug(String.format("Failed to read bytes for file hash %s", hash58));
return null;
}
byte[] actualHash = Crypto.digest(fileBytes);
if (!Arrays.equals(hash, actualHash)) {
LOGGER.debug(String.format("Hash mismatch for chunk: expected %s but got %s",
hash58, Base58.encode(actualHash)));
return null;
}
} else {
LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
arbitraryDataFile = existingFile;
arbitraryDataFile = null;
}
if (arbitraryDataFile != null) {
arbitraryDataFile.save();
// If this is a metadata file then we need to update the cache

View File

@@ -180,7 +180,8 @@ public class ArbitraryDataFileRequestThread {
responseInfo.getPeer(),
arbitraryTransactionData.getSignature(),
arbitraryTransactionData,
Arrays.asList(Base58.decode(responseInfo.getHash58()))
Arrays.asList(Base58.decode(responseInfo.getHash58())),
responseInfo
);
} catch (DataException e) {
LOGGER.warn("Unable to process file hashes: {}", e.getMessage());

View File

@@ -87,7 +87,7 @@ public class AtStatesPruner implements Runnable {
if (pruneStartHeight >= upperPruneHeight)
continue;
LOGGER.info(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
LOGGER.debug(String.format("Pruning AT states between blocks %d and %d...", pruneStartHeight, upperPruneHeight));
int numAtStatesPruned = repository.getATRepository().pruneAtStates(pruneStartHeight, upperPruneHeight);
repository.saveChanges();
@@ -97,7 +97,7 @@ public class AtStatesPruner implements Runnable {
if (numAtStatesPruned > 0 || numAtStateDataRowsTrimmed > 0) {
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.info(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
LOGGER.debug(() -> String.format("Pruned %d AT state%s between blocks %d and %d",
numAtStatesPruned, (numAtStatesPruned != 1 ? "s" : ""),
finalPruneStartHeight, upperPruneHeight));
} else {
@@ -110,7 +110,7 @@ public class AtStatesPruner implements Runnable {
repository.saveChanges();
final int finalPruneStartHeight = pruneStartHeight;
LOGGER.info(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
LOGGER.debug(() -> String.format("Bumping AT state base prune height to %d", finalPruneStartHeight));
} else {
// We've pruned up to the upper prunable height
// Back off for a while to save CPU for syncing

View File

@@ -21,6 +21,9 @@ import org.qortal.utils.BitTwiddling;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
public class PirateChain extends Bitcoiny {
@@ -133,12 +136,17 @@ public class PirateChain extends Bitcoiny {
private final PirateChainNet pirateChainNet;
// Scheduled executor service to check connection to Pirate Chain server
private final ScheduledExecutorService pirateChainCheckScheduler = Executors.newScheduledThreadPool(1);
// Constructors and instance
private PirateChain(PirateChainNet pirateChainNet, BitcoinyBlockchainProvider blockchain, Context bitcoinjContext, String currencyCode) {
super(blockchain, bitcoinjContext, currencyCode, DEFAULT_FEE_PER_KB);
this.pirateChainNet = pirateChainNet;
pirateChainCheckScheduler.scheduleWithFixedDelay(this::establishConnection, 30, 300, TimeUnit.SECONDS);
LOGGER.info(() -> String.format("Starting Pirate Chain support using %s", this.pirateChainNet.name()));
}
@@ -272,6 +280,7 @@ public class PirateChain extends Bitcoiny {
}
public Long getWalletBalance(String entropy58) throws ForeignBlockchainException {
synchronized (this) {
PirateChainWalletController walletController = PirateChainWalletController.getInstance();
walletController.initWithEntropy58(entropy58);
@@ -290,7 +299,30 @@ public class PirateChain extends Bitcoiny {
}
}
/**
* Establish Connection
*
* Some methods in this class need to establish a connection before proceeding and this is the best way
* to do it as far as I know.
*/
private void establishConnection() {
try {
LOGGER.info("Checking Pirate Chain Connection ... ");
int height;
synchronized( this ) {
height = this.blockchainProvider.getCurrentHeight();
}
LOGGER.info("Checked Pirate Chain Connection: height = " + height);
} catch (ForeignBlockchainException e) {
LOGGER.error(e.getMessage(), e);
}
}
public List<SimpleTransaction> getWalletTransactions(String entropy58) throws ForeignBlockchainException {
synchronized (this) {
PirateChainWalletController walletController = PirateChainWalletController.getInstance();
walletController.initWithEntropy58(entropy58);
@@ -310,8 +342,8 @@ public class PirateChain extends Bitcoiny {
if (transactionJson.has("txid")) {
String txId = transactionJson.getString("txid");
Long timestamp = transactionJson.getLong("datetime");
Long amount = transactionJson.getLong("amount");
Long fee = transactionJson.getLong("fee");
Long amount = 0L;
Long fee = 0L;
String memo = null;
if (transactionJson.has("incoming_metadata")) {
@@ -322,7 +354,7 @@ public class PirateChain extends Bitcoiny {
if (incomingMetadata.has("value")) {
//String address = incomingMetadata.getString("address");
Long value = incomingMetadata.getLong("value");
amount = value; // TODO: figure out how to parse transactions with multiple incomingMetadata entries
amount += value;
}
if (incomingMetadata.has("memo") && !incomingMetadata.isNull("memo")) {
@@ -337,6 +369,11 @@ public class PirateChain extends Bitcoiny {
for (int j = 0; j < outgoingMetadatas.length(); j++) {
JSONObject outgoingMetadata = outgoingMetadatas.getJSONObject(j);
if(outgoingMetadata.has("value")) {
Long value = outgoingMetadata.getLong("value");
amount -= value;
fee += MAINNET_FEE; // add the standard fee for each send
}
if (outgoingMetadata.has("memo") && !outgoingMetadata.isNull("memo")) {
memo = outgoingMetadata.getString("memo");
}
@@ -350,6 +387,10 @@ public class PirateChain extends Bitcoiny {
}
}
double sum = transactions.stream().mapToDouble(SimpleTransaction::getTotalAmount).sum() / 100000000.0;
double fees = transactions.stream().mapToDouble(SimpleTransaction::getFeeAmount).sum() / 100000000.0;
LOGGER.info("balance = " + (sum - fees));
return transactions;
}
}

View File

@@ -1,6 +1,7 @@
package org.qortal.crypto;
import com.google.common.primitives.Bytes;
import org.bouncycastle.crypto.params.Ed25519PrivateKeyParameters;
import org.bouncycastle.crypto.params.X25519PrivateKeyParameters;
import org.bouncycastle.crypto.params.X25519PublicKeyParameters;
@@ -11,6 +12,7 @@ import org.qortal.utils.Base58;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
@@ -66,6 +68,20 @@ public abstract class Crypto {
}
}
public static byte[] digestFileStream(File file) throws IOException {
try (InputStream fis = new FileInputStream(file)) {
MessageDigest digest = MessageDigest.getInstance("SHA-256");
byte[] buffer = new byte[8192]; // 8 KB buffer
int bytesRead;
while ((bytesRead = fis.read(buffer)) != -1) {
digest.update(buffer, 0, bytesRead);
}
return digest.digest();
} catch (NoSuchAlgorithmException e) {
throw new IOException("SHA-256 algorithm not available", e);
}
}
/**
* Returns 32-byte digest of two rounds of SHA-256 on message passed in input.
*

View File

@@ -63,7 +63,7 @@ public class ArbitraryResourceStatus {
this.description = status.description;
this.localChunkCount = localChunkCount;
this.totalChunkCount = totalChunkCount;
this.percentLoaded = (this.localChunkCount != null && this.totalChunkCount != null && this.totalChunkCount > 0) ? this.localChunkCount / (float)this.totalChunkCount * 100.0f : null;
this.percentLoaded = (this.localChunkCount != null && this.totalChunkCount != null && this.totalChunkCount > 0 && this.totalChunkCount >= this.localChunkCount) ? this.localChunkCount / (float)this.totalChunkCount * 100.0f : null;
}
public ArbitraryResourceStatus(Status status) {

View File

@@ -706,7 +706,9 @@ public class Group {
// Save reference to invite transaction so invite can be rebuilt during orphaning.
GroupInviteData groupInviteData = this.getInvite(invitee);
cancelGroupInviteTransactionData.setInviteReference(groupInviteData.getReference());
if( groupInviteData != null) {
cancelGroupInviteTransactionData.setInviteReference(groupInviteData.getReference());
}
// Delete invite
this.deleteInvite(invitee);
@@ -715,7 +717,9 @@ public class Group {
public void uncancelInvite(CancelGroupInviteTransactionData cancelGroupInviteTransactionData) throws DataException {
// Reinstate invite
TransactionData transactionData = this.repository.getTransactionRepository().fromSignature(cancelGroupInviteTransactionData.getInviteReference());
this.addInvite((GroupInviteTransactionData) transactionData);
if( transactionData != null ) {
this.addInvite((GroupInviteTransactionData) transactionData);
}
// Clear cached reference to invite transaction
cancelGroupInviteTransactionData.setInviteReference(null);

View File

@@ -77,10 +77,11 @@ public class Network {
private static final String[] INITIAL_PEERS = new String[]{
"node1.qortal.org", "node2.qortal.org", "node3.qortal.org", "node4.qortal.org", "node5.qortal.org",
"node6.qortal.org", "node7.qortal.org", "node8.qortal.org", "node9.qortal.org", "node10.qortal.org",
"node.qortal.ru", "node2.qortal.ru", "node3.qortal.ru", "node.qortal.uk", "node22.qortal.org",
"cinfu1.crowetic.com", "node.cwd.systems", "bootstrap.cwd.systems", "node1.qortalnodes.live",
"node11.qortal.org", "node12.qortal.org", "node13.qortal.org", "node14.qortal.org", "node15.qortal.org",
"node.qortal.ru", "node2.qortal.ru", "node3.qortal.ru", "node.qortal.uk", "qnode1.crowetic.com", "bootstrap-ssh.qortal.org",
"proxynodes.qortal.link", "api.qortal.org", "bootstrap2-ssh.qortal.org", "bootstrap3-ssh.qortal.org",
"node2.qortalnodes.live", "node3.qortalnodes.live", "node4.qortalnodes.live", "node5.qortalnodes.live",
"node6.qortalnodes.live", "node7.qortalnodes.live", "node8.qortalnodes.live"
"node6.qortalnodes.live", "node7.qortalnodes.live", "node8.qortalnodes.live", "ubuntu-monster.qortal.org"
};
private static final long NETWORK_EPC_KEEPALIVE = 5L; // seconds
@@ -235,6 +236,8 @@ public class Network {
this.allKnownPeers.addAll(repository.getNetworkRepository().getAllPeers());
}
}
LOGGER.debug("starting with {} known peers", this.allKnownPeers.size());
}
// Attempt to set up UPnP. All errors are ignored.
@@ -711,63 +714,49 @@ public class Network {
}
private Peer getConnectablePeer(final Long now) throws InterruptedException {
// We can't block here so use tryRepository(). We don't NEED to connect a new peer.
try (Repository repository = RepositoryManager.tryRepository()) {
if (repository == null) {
LOGGER.warn("Unable to get repository connection : Network.getConnectablePeer()");
return null;
}
// Find an address to connect to
List<PeerData> peers = this.getAllKnownPeers();
// Find an address to connect to
List<PeerData> peers = this.getAllKnownPeers();
// Don't consider peers with recent connection failures
final long lastAttemptedThreshold = now - CONNECT_FAILURE_BACKOFF;
peers.removeIf(peerData -> peerData.getLastAttempted() != null
&& (peerData.getLastConnected() == null
|| peerData.getLastConnected() < peerData.getLastAttempted())
&& peerData.getLastAttempted() > lastAttemptedThreshold);
// Don't consider peers with recent connection failures
final long lastAttemptedThreshold = now - CONNECT_FAILURE_BACKOFF;
peers.removeIf(peerData -> peerData.getLastAttempted() != null
&& (peerData.getLastConnected() == null
|| peerData.getLastConnected() < peerData.getLastAttempted())
&& peerData.getLastAttempted() > lastAttemptedThreshold);
// Don't consider peers that we know loop back to ourself
synchronized (this.selfPeers) {
peers.removeIf(isSelfPeer);
}
// Don't consider peers that we know loop back to ourself
synchronized (this.selfPeers) {
peers.removeIf(isSelfPeer);
}
// Don't consider already connected peers (simple address match)
peers.removeIf(isConnectedPeer);
// Don't consider already connected peers (simple address match)
peers.removeIf(isConnectedPeer);
// Don't consider already connected peers (resolved address match)
// Disabled because this might be too slow if we end up waiting a long time for hostnames to resolve via DNS
// Which is ok because duplicate connections to the same peer are handled during handshaking
// peers.removeIf(isResolvedAsConnectedPeer);
// Don't consider already connected peers (resolved address match)
// Disabled because this might be too slow if we end up waiting a long time for hostnames to resolve via DNS
// Which is ok because duplicate connections to the same peer are handled during handshaking
// peers.removeIf(isResolvedAsConnectedPeer);
this.checkLongestConnection(now);
this.checkLongestConnection(now);
// Any left?
if (peers.isEmpty()) {
return null;
}
// Pick random peer
int peerIndex = new Random().nextInt(peers.size());
// Pick candidate
PeerData peerData = peers.get(peerIndex);
Peer newPeer = new Peer(peerData);
newPeer.setIsDataPeer(false);
// Update connection attempt info
peerData.setLastAttempted(now);
synchronized (this.allKnownPeers) {
repository.getNetworkRepository().save(peerData);
repository.saveChanges();
}
return newPeer;
} catch (DataException e) {
LOGGER.error("Repository issue while finding a connectable peer", e);
// Any left?
if (peers.isEmpty()) {
return null;
}
// Pick random peer
int peerIndex = new Random().nextInt(peers.size());
// Pick candidate
PeerData peerData = peers.get(peerIndex);
Peer newPeer = new Peer(peerData);
newPeer.setIsDataPeer(false);
// Update connection attempt info
peerData.setLastAttempted(now);
return newPeer;
}
public boolean connectPeer(Peer newPeer) throws InterruptedException {
@@ -947,18 +936,6 @@ public class Network {
public void peerMisbehaved(Peer peer) {
PeerData peerData = peer.getPeerData();
peerData.setLastMisbehaved(NTP.getTime());
// Only update repository if outbound peer
if (peer.isOutbound()) {
try (Repository repository = RepositoryManager.getRepository()) {
synchronized (this.allKnownPeers) {
repository.getNetworkRepository().save(peerData);
repository.saveChanges();
}
} catch (DataException e) {
LOGGER.warn("Repository issue while updating peer synchronization info", e);
}
}
}
/**
@@ -1148,19 +1125,6 @@ public class Network {
// Make a note that we've successfully completed handshake (and when)
peer.getPeerData().setLastConnected(NTP.getTime());
// Update connection info for outbound peers only
if (peer.isOutbound()) {
try (Repository repository = RepositoryManager.getRepository()) {
synchronized (this.allKnownPeers) {
repository.getNetworkRepository().save(peer.getPeerData());
repository.saveChanges();
}
} catch (DataException e) {
LOGGER.error("[{}] Repository issue while trying to update outbound peer {}",
peer.getPeerConnectionId(), peer, e);
}
}
// Process any pending signature requests, as this peer may have been connected for this purpose only
List<byte[]> pendingSignatureRequests = new ArrayList<>(peer.getPendingSignatureRequests());
if (pendingSignatureRequests != null && !pendingSignatureRequests.isEmpty()) {
@@ -1424,32 +1388,23 @@ public class Network {
}
public boolean forgetPeer(PeerAddress peerAddress) throws DataException {
int numDeleted;
boolean numDeleted;
synchronized (this.allKnownPeers) {
this.allKnownPeers.removeIf(peerData -> peerData.getAddress().equals(peerAddress));
try (Repository repository = RepositoryManager.getRepository()) {
numDeleted = repository.getNetworkRepository().delete(peerAddress);
repository.saveChanges();
}
numDeleted = this.allKnownPeers.removeIf(peerData -> peerData.getAddress().equals(peerAddress));
}
disconnectPeer(peerAddress);
return numDeleted != 0;
return numDeleted;
}
public int forgetAllPeers() throws DataException {
int numDeleted;
synchronized (this.allKnownPeers) {
numDeleted = this.allKnownPeers.size();
this.allKnownPeers.clear();
try (Repository repository = RepositoryManager.getRepository()) {
numDeleted = repository.getNetworkRepository().deleteAllPeers();
repository.saveChanges();
}
}
for (Peer peer : this.getImmutableConnectedPeers()) {
@@ -1498,48 +1453,36 @@ public class Network {
// Prune 'old' peers from repository...
// Pruning peers isn't critical so no need to block for a repository instance.
try (Repository repository = RepositoryManager.tryRepository()) {
if (repository == null) {
LOGGER.warn("Unable to get repository connection : Network.prunePeers()");
return;
}
synchronized (this.allKnownPeers) {
// Fetch all known peers
List<PeerData> peers = new ArrayList<>(this.allKnownPeers);
synchronized (this.allKnownPeers) {
// Fetch all known peers
List<PeerData> peers = new ArrayList<>(this.allKnownPeers);
// 'Old' peers:
// We attempted to connect within the last day
// but we last managed to connect over a week ago.
Predicate<PeerData> isNotOldPeer = peerData -> {
if (peerData.getLastAttempted() == null
|| peerData.getLastAttempted() < now - OLD_PEER_ATTEMPTED_PERIOD) {
return true;
}
if (peerData.getLastConnected() == null
|| peerData.getLastConnected() > now - OLD_PEER_CONNECTION_PERIOD) {
return true;
}
return false;
};
// Disregard peers that are NOT 'old'
peers.removeIf(isNotOldPeer);
// Don't consider already connected peers (simple address match)
peers.removeIf(isConnectedPeer);
for (PeerData peerData : peers) {
LOGGER.debug("Deleting old peer {} from repository", peerData.getAddress().toString());
repository.getNetworkRepository().delete(peerData.getAddress());
// Delete from known peer cache too
this.allKnownPeers.remove(peerData);
// 'Old' peers:
// We attempted to connect within the last day
// but we last managed to connect over a week ago.
Predicate<PeerData> isNotOldPeer = peerData -> {
if (peerData.getLastAttempted() == null
|| peerData.getLastAttempted() < now - OLD_PEER_ATTEMPTED_PERIOD) {
return true;
}
repository.saveChanges();
if (peerData.getLastConnected() == null
|| peerData.getLastConnected() > now - OLD_PEER_CONNECTION_PERIOD) {
return true;
}
return false;
};
// Disregard peers that are NOT 'old'
peers.removeIf(isNotOldPeer);
// Don't consider already connected peers (simple address match)
peers.removeIf(isConnectedPeer);
for (PeerData peerData : peers) {
// Delete from known peer cache too
this.allKnownPeers.remove(peerData);
}
}
}
@@ -1547,8 +1490,8 @@ public class Network {
public boolean mergePeers(String addedBy, long addedWhen, List<PeerAddress> peerAddresses) throws DataException {
mergePeersLock.lock();
try (Repository repository = RepositoryManager.getRepository()) {
return this.mergePeers(repository, addedBy, addedWhen, peerAddresses);
try{
return this.mergePeersUnlocked(addedBy, addedWhen, peerAddresses);
} finally {
mergePeersLock.unlock();
}
@@ -1567,23 +1510,17 @@ public class Network {
try {
// Merging peers isn't critical so don't block for a repository instance.
try (Repository repository = RepositoryManager.tryRepository()) {
if (repository == null) {
LOGGER.warn("Unable to get repository connection : Network.opportunisticMergePeers()");
return;
}
this.mergePeers(repository, addedBy, addedWhen, peerAddresses);
this.mergePeersUnlocked(addedBy, addedWhen, peerAddresses);
} catch (DataException e) {
// Already logged by this.mergePeers()
}
} catch (DataException e) {
// Already logged by this.mergePeersUnlocked()
} finally {
mergePeersLock.unlock();
}
}
private boolean mergePeers(Repository repository, String addedBy, long addedWhen, List<PeerAddress> peerAddresses)
private boolean mergePeersUnlocked(String addedBy, long addedWhen, List<PeerAddress> peerAddresses)
throws DataException {
List<String> fixedNetwork = Settings.getInstance().getFixedNetwork();
if (fixedNetwork != null && !fixedNetwork.isEmpty()) {
@@ -1608,19 +1545,6 @@ public class Network {
this.allKnownPeers.addAll(newPeers);
try {
// Save new peers into database
for (PeerData peerData : newPeers) {
LOGGER.info("Adding new peer {} to repository", peerData.getAddress());
repository.getNetworkRepository().save(peerData);
}
repository.saveChanges();
} catch (DataException e) {
LOGGER.error("Repository issue while merging peers list from {}", addedBy, e);
throw e;
}
return true;
}
}
@@ -1665,6 +1589,33 @@ public class Network {
LOGGER.warn("Interrupted while waiting for networking threads to terminate");
}
try( Repository repository = RepositoryManager.getRepository() ){
// reset all known peers in database
int deletedCount = repository.getNetworkRepository().deleteAllPeers();
LOGGER.debug("Deleted {} known peers", deletedCount);
List<PeerData> knownPeersToProcess;
synchronized (this.allKnownPeers) {
knownPeersToProcess = new ArrayList<>(this.allKnownPeers);
}
int addedPeerCount = 0;
// save all known peers for next start up
for (PeerData knownPeerToProcess : knownPeersToProcess) {
repository.getNetworkRepository().save(knownPeerToProcess);
addedPeerCount++;
}
repository.saveChanges();
LOGGER.debug("Added {} known peers", addedPeerCount);
} catch (DataException e) {
LOGGER.error(e.getMessage(), e);
}
// Close all peer connections
for (Peer peer : this.getImmutableConnectedPeers()) {
peer.shutdown();