diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..b06f7659
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,26 @@
+FROM maven:3-openjdk-11 as builder
+
+WORKDIR /work
+COPY ./ /work/
+RUN mvn clean package
+
+###
+FROM openjdk:11
+
+RUN useradd -r -u 1000 -g users qortal && \
+ mkdir /usr/local/qortal /qortal && \
+ chown 1000:100 /qortal
+
+COPY --from=builder /work/log4j2.properties /usr/local/qortal/
+COPY --from=builder /work/target/qortal*.jar /usr/local/qortal/qortal.jar
+
+USER 1000:100
+
+EXPOSE 12391 12392
+HEALTHCHECK --start-period=5m CMD curl -sf http://127.0.0.1:12391/admin/info || exit 1
+
+WORKDIR /qortal
+VOLUME /qortal
+
+ENTRYPOINT ["java"]
+CMD ["-Djava.net.preferIPv4Stack=false", "-jar", "/usr/local/qortal/qortal.jar"]
diff --git a/README.md b/README.md
index e9001f9c..9dd9ad60 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,4 @@
-# Qortal Data Node
-
-## Important
-
-This code is unfinished, and we haven't had the official genesis block for the data chain yet.
-Therefore it is only possible to use this code if you first create your own test chain. I would
-highly recommend waiting until the code is in a more complete state before trying to run this.
+# Qortal Project - Official Repo
## Build / run
diff --git a/WindowsInstaller/Qortal.aip b/WindowsInstaller/Qortal.aip
index 12636c44..f69f0682 100755
--- a/WindowsInstaller/Qortal.aip
+++ b/WindowsInstaller/Qortal.aip
@@ -17,10 +17,10 @@
-
+
-
+
@@ -212,7 +212,7 @@
-
+
@@ -1173,7 +1173,7 @@
-
+
diff --git a/pom.xml b/pom.xml
index a2a790fa..1d7eebeb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
4.0.0org.qortalqortal
- 3.0.2
+ 3.1.1jartrue
diff --git a/src/main/java/org/qortal/ApplyUpdate.java b/src/main/java/org/qortal/ApplyUpdate.java
index edd6d924..90171191 100644
--- a/src/main/java/org/qortal/ApplyUpdate.java
+++ b/src/main/java/org/qortal/ApplyUpdate.java
@@ -7,14 +7,13 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.security.Security;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
+import java.util.*;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
+import org.qortal.api.ApiKey;
import org.qortal.api.ApiRequest;
import org.qortal.controller.AutoUpdate;
import org.qortal.settings.Settings;
@@ -70,14 +69,40 @@ public class ApplyUpdate {
String baseUri = "http://localhost:" + Settings.getInstance().getApiPort() + "/";
LOGGER.info(() -> String.format("Shutting down node using API via %s", baseUri));
+ // The /admin/stop endpoint requires an API key, which may or may not be already generated
+ boolean apiKeyNewlyGenerated = false;
+ ApiKey apiKey = null;
+ try {
+ apiKey = new ApiKey();
+ if (!apiKey.generated()) {
+ apiKey.generate();
+ apiKeyNewlyGenerated = true;
+ LOGGER.info("Generated API key");
+ }
+ } catch (IOException e) {
+ LOGGER.info("Error loading API key: {}", e.getMessage());
+ }
+
+ // Create GET params
+ Map params = new HashMap<>();
+ if (apiKey != null) {
+ params.put("apiKey", apiKey.toString());
+ }
+
+ // Attempt to stop the node
int attempt;
for (attempt = 0; attempt < MAX_ATTEMPTS; ++attempt) {
final int attemptForLogging = attempt;
LOGGER.info(() -> String.format("Attempt #%d out of %d to shutdown node", attemptForLogging + 1, MAX_ATTEMPTS));
- String response = ApiRequest.perform(baseUri + "admin/stop", null);
- if (response == null)
+ String response = ApiRequest.perform(baseUri + "admin/stop", params);
+ if (response == null) {
// No response - consider node shut down
+ if (apiKeyNewlyGenerated) {
+ // API key was newly generated for this auto update, so we need to remove it
+ ApplyUpdate.removeGeneratedApiKey();
+ }
return true;
+ }
LOGGER.info(() -> String.format("Response from API: %s", response));
@@ -89,6 +114,11 @@ public class ApplyUpdate {
}
}
+ if (apiKeyNewlyGenerated) {
+ // API key was newly generated for this auto update, so we need to remove it
+ ApplyUpdate.removeGeneratedApiKey();
+ }
+
if (attempt == MAX_ATTEMPTS) {
LOGGER.error("Failed to shutdown node - giving up");
return false;
@@ -97,6 +127,19 @@ public class ApplyUpdate {
return true;
}
+ private static void removeGeneratedApiKey() {
+ try {
+ LOGGER.info("Removing newly generated API key...");
+
+ // Delete the API key since it was only generated for this auto update
+ ApiKey apiKey = new ApiKey();
+ apiKey.delete();
+
+ } catch (IOException e) {
+ LOGGER.info("Error loading or deleting API key: {}", e.getMessage());
+ }
+ }
+
private static void replaceJar() {
// Assuming current working directory contains the JAR files
Path realJar = Paths.get(JAR_FILENAME);
diff --git a/src/main/java/org/qortal/api/ApiKey.java b/src/main/java/org/qortal/api/ApiKey.java
index 6a79dd20..3f7cfe35 100644
--- a/src/main/java/org/qortal/api/ApiKey.java
+++ b/src/main/java/org/qortal/api/ApiKey.java
@@ -81,6 +81,15 @@ public class ApiKey {
writer.close();
}
+ public void delete() throws IOException {
+ this.apiKey = null;
+
+ Path filePath = this.getFilePath();
+ if (Files.exists(filePath)) {
+ Files.delete(filePath);
+ }
+ }
+
public boolean generated() {
return (this.apiKey != null);
diff --git a/src/main/java/org/qortal/api/gateway/resource/GatewayResource.java b/src/main/java/org/qortal/api/gateway/resource/GatewayResource.java
index cee1613f..a73de1fb 100644
--- a/src/main/java/org/qortal/api/gateway/resource/GatewayResource.java
+++ b/src/main/java/org/qortal/api/gateway/resource/GatewayResource.java
@@ -65,7 +65,7 @@ public class GatewayResource {
}
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
- return resource.getStatus();
+ return resource.getStatus(false);
}
diff --git a/src/main/java/org/qortal/api/model/NodeStatus.java b/src/main/java/org/qortal/api/model/NodeStatus.java
index be112bc3..ccc1eb01 100644
--- a/src/main/java/org/qortal/api/model/NodeStatus.java
+++ b/src/main/java/org/qortal/api/model/NodeStatus.java
@@ -4,6 +4,7 @@ import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import org.qortal.controller.Controller;
+import org.qortal.controller.Synchronizer;
import org.qortal.network.Network;
@XmlAccessorType(XmlAccessType.FIELD)
@@ -22,7 +23,7 @@ public class NodeStatus {
public NodeStatus() {
this.isMintingPossible = Controller.getInstance().isMintingPossible();
- this.syncPercent = Controller.getInstance().getSyncPercent();
+ this.syncPercent = Synchronizer.getInstance().getSyncPercent();
this.isSynchronizing = this.syncPercent != null;
this.numberOfConnections = Network.getInstance().getHandshakedPeers().size();
diff --git a/src/main/java/org/qortal/api/resource/AdminResource.java b/src/main/java/org/qortal/api/resource/AdminResource.java
index 8d00c751..bde4bed4 100644
--- a/src/main/java/org/qortal/api/resource/AdminResource.java
+++ b/src/main/java/org/qortal/api/resource/AdminResource.java
@@ -44,6 +44,7 @@ import org.qortal.api.model.NodeInfo;
import org.qortal.api.model.NodeStatus;
import org.qortal.block.BlockChain;
import org.qortal.controller.Controller;
+import org.qortal.controller.Synchronizer;
import org.qortal.controller.Synchronizer.SynchronizationResult;
import org.qortal.data.account.MintingAccountData;
import org.qortal.data.account.RewardShareData;
@@ -525,7 +526,7 @@ public class AdminResource {
SynchronizationResult syncResult;
try {
do {
- syncResult = Controller.getInstance().actuallySynchronize(targetPeer, true);
+ syncResult = Synchronizer.getInstance().actuallySynchronize(targetPeer, true);
} while (syncResult == SynchronizationResult.OK);
} finally {
blockchainLock.unlock();
diff --git a/src/main/java/org/qortal/api/resource/ArbitraryResource.java b/src/main/java/org/qortal/api/resource/ArbitraryResource.java
index 82618152..8031bf83 100644
--- a/src/main/java/org/qortal/api/resource/ArbitraryResource.java
+++ b/src/main/java/org/qortal/api/resource/ArbitraryResource.java
@@ -232,9 +232,10 @@ public class ArbitraryResource {
}
)
@SecurityRequirement(name = "apiKey")
- public ArbitraryResourceStatus getDefaultResourceStatus(@PathParam("service") Service service,
- @PathParam("name") String name,
- @QueryParam("build") Boolean build) {
+ public ArbitraryResourceStatus getDefaultResourceStatus(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
+ @PathParam("service") Service service,
+ @PathParam("name") String name,
+ @QueryParam("build") Boolean build) {
Security.requirePriorAuthorizationOrApiKey(request, name, service, null);
return this.getStatus(service, name, null, build);
@@ -252,10 +253,11 @@ public class ArbitraryResource {
}
)
@SecurityRequirement(name = "apiKey")
- public ArbitraryResourceStatus getResourceStatus(@PathParam("service") Service service,
- @PathParam("name") String name,
- @PathParam("identifier") String identifier,
- @QueryParam("build") Boolean build) {
+ public ArbitraryResourceStatus getResourceStatus(@HeaderParam(Security.API_KEY_HEADER) String apiKey,
+ @PathParam("service") Service service,
+ @PathParam("name") String name,
+ @PathParam("identifier") String identifier,
+ @QueryParam("build") Boolean build) {
Security.requirePriorAuthorizationOrApiKey(request, name, service, identifier);
return this.getStatus(service, name, identifier, build);
@@ -574,10 +576,16 @@ public class ArbitraryResource {
@PathParam("service") Service service,
@PathParam("name") String name,
@QueryParam("filepath") String filepath,
- @QueryParam("rebuild") boolean rebuild) {
- Security.checkApiCallAllowed(request);
+ @QueryParam("rebuild") boolean rebuild,
+ @QueryParam("async") boolean async,
+ @QueryParam("attempts") Integer attempts) {
- return this.download(service, name, null, filepath, rebuild);
+ // Authentication can be bypassed in the settings, for those running public QDN nodes
+ if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
+ Security.checkApiCallAllowed(request);
+ }
+
+ return this.download(service, name, null, filepath, rebuild, async, attempts);
}
@GET
@@ -603,10 +611,16 @@ public class ArbitraryResource {
@PathParam("name") String name,
@PathParam("identifier") String identifier,
@QueryParam("filepath") String filepath,
- @QueryParam("rebuild") boolean rebuild) {
- Security.checkApiCallAllowed(request);
+ @QueryParam("rebuild") boolean rebuild,
+ @QueryParam("async") boolean async,
+ @QueryParam("attempts") Integer attempts) {
- return this.download(service, name, identifier, filepath, rebuild);
+ // Authentication can be bypassed in the settings, for those running public QDN nodes
+ if (!Settings.getInstance().isQDNAuthBypassEnabled()) {
+ Security.checkApiCallAllowed(request);
+ }
+
+ return this.download(service, name, identifier, filepath, rebuild, async, attempts);
}
@@ -1017,30 +1031,45 @@ public class ArbitraryResource {
}
}
- private HttpServletResponse download(Service service, String name, String identifier, String filepath, boolean rebuild) {
+ private HttpServletResponse download(Service service, String name, String identifier, String filepath, boolean rebuild, boolean async, Integer maxAttempts) {
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
try {
int attempts = 0;
+ if (maxAttempts == null) {
+ maxAttempts = 5;
+ }
// Loop until we have data
- while (!Controller.isStopping()) {
- attempts++;
- if (!arbitraryDataReader.isBuilding()) {
- try {
- arbitraryDataReader.loadSynchronously(rebuild);
- break;
- } catch (MissingDataException e) {
- if (attempts > 5) {
- // Give up after 5 attempts
- throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data unavailable. Please try again later.");
+ if (async) {
+ // Asynchronous
+ arbitraryDataReader.loadAsynchronously(false, 1);
+ }
+ else {
+ // Synchronous
+ while (!Controller.isStopping()) {
+ attempts++;
+ if (!arbitraryDataReader.isBuilding()) {
+ try {
+ arbitraryDataReader.loadSynchronously(rebuild);
+ break;
+ } catch (MissingDataException e) {
+ if (attempts > maxAttempts) {
+ // Give up after 5 attempts
+ throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA, "Data unavailable. Please try again later.");
+ }
}
}
+ Thread.sleep(3000L);
}
- Thread.sleep(3000L);
}
+
java.nio.file.Path outputPath = arbitraryDataReader.getFilePath();
+ if (outputPath == null) {
+ // Assume the resource doesn't exist
+ throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.FILE_NOT_FOUND, "File not found");
+ }
if (filepath == null || filepath.isEmpty()) {
// No file path supplied - so check if this is a single file resource
@@ -1049,6 +1078,10 @@ public class ArbitraryResource {
// This is a single file resource
filepath = files[0];
}
+ else {
+ throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.INVALID_CRITERIA,
+ "filepath is required for resources containing more than one file");
+ }
}
// TODO: limit file size that can be read into memory
@@ -1085,7 +1118,7 @@ public class ArbitraryResource {
}
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
- return resource.getStatus();
+ return resource.getStatus(false);
}
private List addStatusToResources(List resources) {
@@ -1094,7 +1127,7 @@ public class ArbitraryResource {
for (ArbitraryResourceInfo resourceInfo : resources) {
ArbitraryDataResource resource = new ArbitraryDataResource(resourceInfo.name, ResourceIdType.NAME,
resourceInfo.service, resourceInfo.identifier);
- ArbitraryResourceStatus status = resource.getStatus();
+ ArbitraryResourceStatus status = resource.getStatus(true);
if (status != null) {
resourceInfo.status = status;
}
diff --git a/src/main/java/org/qortal/api/resource/CrossChainBitcoinResource.java b/src/main/java/org/qortal/api/resource/CrossChainBitcoinResource.java
index 9bbf0e43..834c7b81 100644
--- a/src/main/java/org/qortal/api/resource/CrossChainBitcoinResource.java
+++ b/src/main/java/org/qortal/api/resource/CrossChainBitcoinResource.java
@@ -67,11 +67,16 @@ public class CrossChainBitcoinResource {
if (!bitcoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
- Long balance = bitcoin.getWalletBalance(key58);
- if (balance == null)
- throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
+ try {
+ Long balance = bitcoin.getWalletBalanceFromTransactions(key58);
+ if (balance == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
- return balance.toString();
+ return balance.toString();
+
+ } catch (ForeignBlockchainException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
+ }
}
@POST
diff --git a/src/main/java/org/qortal/api/resource/CrossChainDogecoinResource.java b/src/main/java/org/qortal/api/resource/CrossChainDogecoinResource.java
index bb2dcbbc..189a53d3 100644
--- a/src/main/java/org/qortal/api/resource/CrossChainDogecoinResource.java
+++ b/src/main/java/org/qortal/api/resource/CrossChainDogecoinResource.java
@@ -65,11 +65,16 @@ public class CrossChainDogecoinResource {
if (!dogecoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
- Long balance = dogecoin.getWalletBalance(key58);
- if (balance == null)
- throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
+ try {
+ Long balance = dogecoin.getWalletBalanceFromTransactions(key58);
+ if (balance == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
- return balance.toString();
+ return balance.toString();
+
+ } catch (ForeignBlockchainException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
+ }
}
@POST
diff --git a/src/main/java/org/qortal/api/resource/CrossChainLitecoinResource.java b/src/main/java/org/qortal/api/resource/CrossChainLitecoinResource.java
index 8f6fa582..627c00c7 100644
--- a/src/main/java/org/qortal/api/resource/CrossChainLitecoinResource.java
+++ b/src/main/java/org/qortal/api/resource/CrossChainLitecoinResource.java
@@ -67,11 +67,16 @@ public class CrossChainLitecoinResource {
if (!litecoin.isValidDeterministicKey(key58))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_PRIVATE_KEY);
- Long balance = litecoin.getWalletBalance(key58);
- if (balance == null)
- throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
+ try {
+ Long balance = litecoin.getWalletBalanceFromTransactions(key58);
+ if (balance == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
- return balance.toString();
+ return balance.toString();
+
+ } catch (ForeignBlockchainException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE);
+ }
}
@POST
diff --git a/src/main/java/org/qortal/api/resource/TransactionsResource.java b/src/main/java/org/qortal/api/resource/TransactionsResource.java
index 30f242c4..9bc6d497 100644
--- a/src/main/java/org/qortal/api/resource/TransactionsResource.java
+++ b/src/main/java/org/qortal/api/resource/TransactionsResource.java
@@ -9,6 +9,8 @@ import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
@@ -44,6 +46,7 @@ import org.qortal.transform.transaction.TransactionTransformer;
import org.qortal.utils.Base58;
import com.google.common.primitives.Bytes;
+import org.qortal.utils.NTP;
@Path("/transactions")
@Tag(name = "Transactions")
@@ -363,6 +366,83 @@ public class TransactionsResource {
}
}
+ @GET
+ @Path("/unitfee")
+ @Operation(
+ summary = "Get transaction unit fee",
+ responses = {
+ @ApiResponse(
+ content = @Content(
+ mediaType = MediaType.TEXT_PLAIN,
+ schema = @Schema(
+ type = "number"
+ )
+ )
+ )
+ }
+ )
+ @ApiErrors({
+ ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE
+ })
+ public long getTransactionUnitFee(@QueryParam("txType") TransactionType txType,
+ @QueryParam("timestamp") Long timestamp,
+ @QueryParam("level") Integer accountLevel) {
+ try {
+ if (timestamp == null) {
+ timestamp = NTP.getTime();
+ }
+
+ Constructor> constructor = txType.constructor;
+ Transaction transaction = (Transaction) constructor.newInstance(null, null);
+ // FUTURE: add accountLevel parameter to transaction.getUnitFee() if needed
+ return transaction.getUnitFee(timestamp);
+
+ } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA, e);
+ }
+ }
+
+ @POST
+ @Path("/fee")
+ @Operation(
+ summary = "Get recommended fee for supplied transaction data",
+ requestBody = @RequestBody(
+ required = true,
+ content = @Content(
+ mediaType = MediaType.TEXT_PLAIN,
+ schema = @Schema(
+ type = "string"
+ )
+ )
+ )
+ )
+ @ApiErrors({
+ ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE
+ })
+ public long getRecommendedTransactionFee(String rawInputBytes58) {
+ byte[] rawInputBytes = Base58.decode(rawInputBytes58);
+ if (rawInputBytes.length == 0)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.JSON);
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+
+ // Append null signature on the end before transformation
+ byte[] rawBytes = Bytes.concat(rawInputBytes, new byte[TransactionTransformer.SIGNATURE_LENGTH]);
+
+ TransactionData transactionData = TransactionTransformer.fromBytes(rawBytes);
+ if (transactionData == null)
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
+
+ Transaction transaction = Transaction.fromData(repository, transactionData);
+ return transaction.calcRecommendedFee();
+
+ } catch (DataException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
+ } catch (TransformationException e) {
+ throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e);
+ }
+ }
+
@GET
@Path("/creator/{publickey}")
@Operation(
diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataBuildQueueItem.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataBuildQueueItem.java
index ffbf8fe3..4a02f092 100644
--- a/src/main/java/org/qortal/arbitrary/ArbitraryDataBuildQueueItem.java
+++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataBuildQueueItem.java
@@ -13,8 +13,11 @@ public class ArbitraryDataBuildQueueItem extends ArbitraryDataResource {
private final Long creationTimestamp;
private Long buildStartTimestamp = null;
private Long buildEndTimestamp = null;
+ private Integer priority = 0;
private boolean failed = false;
+ private static int HIGH_PRIORITY_THRESHOLD = 5;
+
/* The maximum amount of time to spend on a single build */
// TODO: interrupt an in-progress build
public static long BUILD_TIMEOUT = 60*1000L; // 60 seconds
@@ -27,13 +30,20 @@ public class ArbitraryDataBuildQueueItem extends ArbitraryDataResource {
this.creationTimestamp = NTP.getTime();
}
+ public void prepareForBuild() {
+ this.buildStartTimestamp = NTP.getTime();
+ }
+
public void build() throws IOException, DataException, MissingDataException {
Long now = NTP.getTime();
if (now == null) {
+ this.buildStartTimestamp = null;
throw new DataException("NTP time hasn't synced yet");
}
- this.buildStartTimestamp = now;
+ if (this.buildStartTimestamp == null) {
+ this.buildStartTimestamp = now;
+ }
ArbitraryDataReader arbitraryDataReader =
new ArbitraryDataReader(this.resourceId, this.resourceIdType, this.service, this.identifier);
@@ -70,6 +80,21 @@ public class ArbitraryDataBuildQueueItem extends ArbitraryDataResource {
return this.buildStartTimestamp;
}
+ public Integer getPriority() {
+ if (this.priority != null) {
+ return this.priority;
+ }
+ return 0;
+ }
+
+ public void setPriority(Integer priority) {
+ this.priority = priority;
+ }
+
+ public boolean isHighPriority() {
+ return this.priority >= HIGH_PRIORITY_THRESHOLD;
+ }
+
public void setFailed(boolean failed) {
this.failed = failed;
}
diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataCache.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataCache.java
index cfe445e2..accd808d 100644
--- a/src/main/java/org/qortal/arbitrary/ArbitraryDataCache.java
+++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataCache.java
@@ -61,6 +61,9 @@ public class ArbitraryDataCache {
}
// No need to invalidate the cache
+ // Remember that it's up to date, so that we won't check again for a while
+ ArbitraryDataManager.getInstance().addResourceToCache(this.getArbitraryDataResource());
+
return false;
}
@@ -84,14 +87,7 @@ public class ArbitraryDataCache {
// If the state's sig doesn't match the latest transaction's sig, we need to invalidate
// This means that an updated layer is available
- if (this.shouldInvalidateDueToSignatureMismatch()) {
-
- // Add to the in-memory cache first, so that we won't check again for a while
- ArbitraryDataManager.getInstance().addResourceToCache(this.getArbitraryDataResource());
- return true;
- }
-
- return false;
+ return this.shouldInvalidateDueToSignatureMismatch();
}
/**
diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java
index 1eaeda3c..6b29de8d 100644
--- a/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java
+++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataFile.java
@@ -366,6 +366,21 @@ public class ArbitraryDataFile {
return false;
}
+ public boolean delete(int attempts) {
+ // Keep trying to delete the data until it is deleted, or we reach 10 attempts
+ for (int i=0; i missingHashes() {
+ List missingHashes = new ArrayList<>();
+ try {
+ if (this.metadataHash == null) {
+ // We don't have any metadata so can't check if we have the chunks
+ // Even if this transaction has no chunks, we don't have the file either (already checked above)
+ return null;
+ }
+
+ if (this.metadataFile == null) {
+ this.metadataFile = ArbitraryDataFile.fromHash(this.metadataHash, this.signature);
+ }
+
+ // If the metadata file doesn't exist, we can't check if we have the chunks
+ if (!metadataFile.getFilePath().toFile().exists()) {
+ return null;
+ }
+
+ if (this.metadata == null) {
+ this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath()));
+ }
+
+ // Read the metadata
+ List chunks = metadata.getChunks();
+ for (byte[] chunkHash : chunks) {
+ ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
+ if (!chunk.exists()) {
+ missingHashes.add(chunkHash);
+ }
+ }
+
+ return missingHashes;
+
+ } catch (DataException e) {
+ // Something went wrong, so we can't make a sensible decision
+ return null;
+ }
+ }
+
public boolean containsChunk(byte[] hash) {
for (ArbitraryDataFileChunk chunk : this.chunks) {
if (Arrays.equals(hash, chunk.getHash())) {
diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java
index a6fad12d..568549d8 100644
--- a/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java
+++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataReader.java
@@ -122,10 +122,22 @@ public class ArbitraryDataReader {
* This adds the build task to a queue, and the result will be cached when complete
* To check the status of the build, periodically call isCachedDataAvailable()
* Once it returns true, you can then use getFilePath() to access the data itself.
+ *
+ * @param overwrite - set to true to force rebuild an existing cache
* @return true if added or already present in queue; false if not
*/
- public boolean loadAsynchronously() {
- return ArbitraryDataBuildManager.getInstance().addToBuildQueue(this.createQueueItem());
+ public boolean loadAsynchronously(boolean overwrite, int priority) {
+ ArbitraryDataCache cache = new ArbitraryDataCache(this.uncompressedPath, overwrite,
+ this.resourceId, this.resourceIdType, this.service, this.identifier);
+ if (cache.isCachedDataAvailable()) {
+ // Use cached data
+ this.filePath = this.uncompressedPath;
+ return true;
+ }
+
+ ArbitraryDataBuildQueueItem item = this.createQueueItem();
+ item.setPriority(priority);
+ return ArbitraryDataBuildManager.getInstance().addToBuildQueue(item);
}
/**
@@ -363,7 +375,7 @@ public class ArbitraryDataReader {
}
// Throw a missing data exception, which allows subsequent layers to fetch data
- LOGGER.debug(message);
+ LOGGER.trace(message);
throw new MissingDataException(message);
}
}
@@ -458,12 +470,18 @@ public class ArbitraryDataReader {
throw new DataException(String.format("Unable to unzip file: %s", e.getMessage()));
}
- // Replace filePath pointer with the uncompressed file path
+ if (!this.uncompressedPath.toFile().exists()) {
+ throw new DataException(String.format("Unable to unzip file: %s", this.filePath));
+ }
+
+ // Delete original compressed file
if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) {
if (Files.exists(this.filePath)) {
Files.delete(this.filePath);
}
}
+
+ // Replace filePath pointer with the uncompressed file path
this.filePath = this.uncompressedPath;
}
diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java
index 445ff2f6..3bd47b26 100644
--- a/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java
+++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataRenderer.java
@@ -76,7 +76,7 @@ public class ArbitraryDataRenderer {
if (!arbitraryDataReader.isCachedDataAvailable()) {
// If async is requested, show a loading screen whilst build is in progress
if (async) {
- arbitraryDataReader.loadAsynchronously();
+ arbitraryDataReader.loadAsynchronously(false, 10);
return this.getLoadingResponse(service, resourceId);
}
diff --git a/src/main/java/org/qortal/arbitrary/ArbitraryDataResource.java b/src/main/java/org/qortal/arbitrary/ArbitraryDataResource.java
index 65c92cc6..36bd8f4c 100644
--- a/src/main/java/org/qortal/arbitrary/ArbitraryDataResource.java
+++ b/src/main/java/org/qortal/arbitrary/ArbitraryDataResource.java
@@ -6,6 +6,7 @@ import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataBuildManager;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
+import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.list.ResourceListManager;
@@ -37,6 +38,8 @@ public class ArbitraryDataResource {
private List transactions;
private ArbitraryTransactionData latestPutTransaction;
private int layerCount;
+ private Integer localChunkCount = null;
+ private Integer totalChunkCount = null;
public ArbitraryDataResource(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) {
this.resourceId = resourceId.toLowerCase();
@@ -50,50 +53,56 @@ public class ArbitraryDataResource {
this.identifier = identifier;
}
- public ArbitraryResourceStatus getStatus() {
+ public ArbitraryResourceStatus getStatus(boolean quick) {
+ // Calculate the chunk counts
+ // Avoid this for "quick" statuses, to speed things up
+ if (!quick) {
+ this.calculateChunkCounts();
+ }
+
if (resourceIdType != ResourceIdType.NAME) {
// We only support statuses for resources with a name
- return new ArbitraryResourceStatus(Status.UNSUPPORTED);
+ return new ArbitraryResourceStatus(Status.UNSUPPORTED, this.localChunkCount, this.totalChunkCount);
}
// Check if the name is blocked
if (ResourceListManager.getInstance()
.listContains("blockedNames", this.resourceId, false)) {
- return new ArbitraryResourceStatus(Status.BLOCKED);
+ return new ArbitraryResourceStatus(Status.BLOCKED, this.localChunkCount, this.totalChunkCount);
+ }
+
+ // Check if a build has failed
+ ArbitraryDataBuildQueueItem queueItem =
+ new ArbitraryDataBuildQueueItem(resourceId, resourceIdType, service, identifier);
+ if (ArbitraryDataBuildManager.getInstance().isInFailedBuildsList(queueItem)) {
+ return new ArbitraryResourceStatus(Status.BUILD_FAILED, this.localChunkCount, this.totalChunkCount);
}
// Firstly check the cache to see if it's already built
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(
resourceId, resourceIdType, service, identifier);
if (arbitraryDataReader.isCachedDataAvailable()) {
- return new ArbitraryResourceStatus(Status.READY);
- }
-
- // Next check if there's a build in progress
- ArbitraryDataBuildQueueItem queueItem =
- new ArbitraryDataBuildQueueItem(resourceId, resourceIdType, service, identifier);
- if (ArbitraryDataBuildManager.getInstance().isInBuildQueue(queueItem)) {
- return new ArbitraryResourceStatus(Status.BUILDING);
- }
-
- // Check if a build has failed
- if (ArbitraryDataBuildManager.getInstance().isInFailedBuildsList(queueItem)) {
- return new ArbitraryResourceStatus(Status.BUILD_FAILED);
+ return new ArbitraryResourceStatus(Status.READY, this.localChunkCount, this.totalChunkCount);
}
// Check if we have all data locally for this resource
if (!this.allFilesDownloaded()) {
if (this.isDownloading()) {
- return new ArbitraryResourceStatus(Status.DOWNLOADING);
+ return new ArbitraryResourceStatus(Status.DOWNLOADING, this.localChunkCount, this.totalChunkCount);
}
else if (this.isDataPotentiallyAvailable()) {
- return new ArbitraryResourceStatus(Status.PUBLISHED);
+ return new ArbitraryResourceStatus(Status.PUBLISHED, this.localChunkCount, this.totalChunkCount);
}
- return new ArbitraryResourceStatus(Status.MISSING_DATA);
+ return new ArbitraryResourceStatus(Status.MISSING_DATA, this.localChunkCount, this.totalChunkCount);
+ }
+
+ // Check if there's a build in progress
+ if (ArbitraryDataBuildManager.getInstance().isInBuildQueue(queueItem)) {
+ return new ArbitraryResourceStatus(Status.BUILDING, this.localChunkCount, this.totalChunkCount);
}
// We have all data locally
- return new ArbitraryResourceStatus(Status.DOWNLOADED);
+ return new ArbitraryResourceStatus(Status.DOWNLOADED, this.localChunkCount, this.totalChunkCount);
}
public boolean delete() {
@@ -116,6 +125,9 @@ public class ArbitraryDataResource {
// Also delete cached data for the entire resource
this.deleteCache();
+ // Invalidate the hosted transactions cache as we have removed an item
+ ArbitraryDataStorageManager.getInstance().invalidateHostedTransactionsCache();
+
return true;
} catch (DataException | IOException e) {
@@ -124,6 +136,13 @@ public class ArbitraryDataResource {
}
public void deleteCache() throws IOException {
+ // Don't delete anything if there's a build in progress
+ ArbitraryDataBuildQueueItem queueItem =
+ new ArbitraryDataBuildQueueItem(resourceId, resourceIdType, service, identifier);
+ if (ArbitraryDataBuildManager.getInstance().isInBuildQueue(queueItem)) {
+ return;
+ }
+
String baseDir = Settings.getInstance().getTempDataPath();
String identifier = this.identifier != null ? this.identifier : "default";
Path cachePath = Paths.get(baseDir, "reader", this.resourceIdType.toString(), this.resourceId, this.service.toString(), identifier);
@@ -136,6 +155,12 @@ public class ArbitraryDataResource {
}
private boolean allFilesDownloaded() {
+ // Use chunk counts to speed things up if we can
+ if (this.localChunkCount != null && this.totalChunkCount != null &&
+ this.localChunkCount >= this.totalChunkCount) {
+ return true;
+ }
+
try {
this.fetchTransactions();
@@ -154,6 +179,25 @@ public class ArbitraryDataResource {
}
}
+ private void calculateChunkCounts() {
+ try {
+ this.fetchTransactions();
+
+ List transactionDataList = new ArrayList<>(this.transactions);
+ int localChunkCount = 0;
+ int totalChunkCount = 0;
+
+ for (ArbitraryTransactionData transactionData : transactionDataList) {
+ localChunkCount += ArbitraryTransactionUtils.ourChunkCount(transactionData);
+ totalChunkCount += ArbitraryTransactionUtils.totalChunkCount(transactionData);
+ }
+
+ this.localChunkCount = localChunkCount;
+ this.totalChunkCount = totalChunkCount;
+
+ } catch (DataException e) {}
+ }
+
private boolean isRateLimited() {
try {
this.fetchTransactions();
diff --git a/src/main/java/org/qortal/block/BlockChain.java b/src/main/java/org/qortal/block/BlockChain.java
index 7a6d6605..69779d96 100644
--- a/src/main/java/org/qortal/block/BlockChain.java
+++ b/src/main/java/org/qortal/block/BlockChain.java
@@ -68,9 +68,15 @@ public class BlockChain {
atFindNextTransactionFix,
newBlockSigHeight,
shareBinFix,
- calcChainWeightTimestamp;
+ calcChainWeightTimestamp,
+ transactionV5Timestamp;
}
+ // Custom transaction fees
+ @XmlJavaTypeAdapter(value = org.qortal.api.AmountTypeAdapter.class)
+ private long nameRegistrationUnitFee;
+ private long nameRegistrationUnitFeeTimestamp;
+
/** Map of which blockchain features are enabled when (height/timestamp) */
@XmlJavaTypeAdapter(StringLongMapXmlAdapter.class)
private Map featureTriggers;
@@ -141,7 +147,8 @@ public class BlockChain {
}
private List blockTimingsByHeight;
- private int minAccountLevelToMint = 1;
+ private int minAccountLevelToMint;
+ private int minAccountLevelForBlockSubmissions;
private int minAccountLevelToRewardShare;
private int maxRewardSharesPerMintingAccount;
private int founderEffectiveMintingLevel;
@@ -299,6 +306,16 @@ public class BlockChain {
return this.maxBlockSize;
}
+ // Custom transaction fees
+ public long getNameRegistrationUnitFee() {
+ return this.nameRegistrationUnitFee;
+ }
+
+ public long getNameRegistrationUnitFeeTimestamp() {
+ // FUTURE: we could use a separate structure to indicate fee adjustments for different transaction types
+ return this.nameRegistrationUnitFeeTimestamp;
+ }
+
/** Returns true if approval-needing transaction types require a txGroupId other than NO_GROUP. */
public boolean getRequireGroupForApproval() {
return this.requireGroupForApproval;
@@ -344,6 +361,10 @@ public class BlockChain {
return this.minAccountLevelToMint;
}
+ public int getMinAccountLevelForBlockSubmissions() {
+ return this.minAccountLevelForBlockSubmissions;
+ }
+
public int getMinAccountLevelToRewardShare() {
return this.minAccountLevelToRewardShare;
}
@@ -386,6 +407,10 @@ public class BlockChain {
return this.featureTriggers.get(FeatureTrigger.calcChainWeightTimestamp.name()).longValue();
}
+ public long getTransactionV5Timestamp() {
+ return this.featureTriggers.get(FeatureTrigger.transactionV5Timestamp.name()).longValue();
+ }
+
// More complex getters for aspects that change by height or timestamp
public long getRewardAtHeight(int ourHeight) {
diff --git a/src/main/java/org/qortal/controller/BlockMinter.java b/src/main/java/org/qortal/controller/BlockMinter.java
index 4eea91a9..d7d1dd48 100644
--- a/src/main/java/org/qortal/controller/BlockMinter.java
+++ b/src/main/java/org/qortal/controller/BlockMinter.java
@@ -1,6 +1,8 @@
package org.qortal.controller;
import java.math.BigInteger;
+import java.text.DecimalFormat;
+import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
@@ -16,11 +18,11 @@ import org.qortal.account.PrivateKeyAccount;
import org.qortal.block.Block;
import org.qortal.block.Block.ValidationResult;
import org.qortal.block.BlockChain;
-import org.qortal.data.account.AccountData;
import org.qortal.data.account.MintingAccountData;
import org.qortal.data.account.RewardShareData;
import org.qortal.data.block.BlockData;
import org.qortal.data.block.BlockSummaryData;
+import org.qortal.data.block.CommonBlockData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
@@ -48,11 +50,6 @@ public class BlockMinter extends Thread {
// Recovery
public static final long INVALID_BLOCK_RECOVERY_TIMEOUT = 10 * 60 * 1000L; // ms
- // Min account level to submit blocks
- // This is an unvalidated version of Blockchain.minAccountLevelToMint
- // and exists only to reduce block candidates by default.
- private static int MIN_LEVEL_FOR_BLOCK_SUBMISSION = 3;
-
// Constructors
public BlockMinter() {
@@ -81,6 +78,10 @@ public class BlockMinter extends Thread {
BlockRepository blockRepository = repository.getBlockRepository();
BlockData previousBlockData = null;
+ // Vars to keep track of blocks that were skipped due to chain weight
+ byte[] parentSignatureForLastLowWeightBlock = null;
+ Long timeOfLastLowWeightBlock = null;
+
List newBlocks = new ArrayList<>();
// Flags for tracking change in whether minting is possible,
@@ -137,14 +138,13 @@ public class BlockMinter extends Thread {
continue;
}
- // Optional (non-validated) prevention of block submissions below a defined level
- AccountData accountData = repository.getAccountRepository().getAccount(mintingAccount.getAddress());
- if (accountData != null) {
- Integer level = accountData.getLevel();
- if (level != null && level < MIN_LEVEL_FOR_BLOCK_SUBMISSION) {
- madi.remove();
- continue;
- }
+ // Optional (non-validated) prevention of block submissions below a defined level.
+ // This is an unvalidated version of Blockchain.minAccountLevelToMint
+ // and exists only to reduce block candidates by default.
+ int level = mintingAccount.getEffectiveMintingLevel();
+ if (level < BlockChain.getInstance().getMinAccountLevelForBlockSubmissions()) {
+ madi.remove();
+ continue;
}
}
@@ -156,7 +156,7 @@ public class BlockMinter extends Thread {
// Disregard peers that don't have a recent block, but only if we're not in recovery mode.
// In that mode, we want to allow minting on top of older blocks, to recover stalled networks.
- if (Controller.getInstance().getRecoveryMode() == false)
+ if (Synchronizer.getInstance().getRecoveryMode() == false)
peers.removeIf(Controller.hasNoRecentBlock);
// Don't mint if we don't have enough up-to-date peers as where would the transactions/consensus come from?
@@ -181,7 +181,7 @@ public class BlockMinter extends Thread {
// If our latest block isn't recent then we need to synchronize instead of minting, unless we're in recovery mode.
if (!peers.isEmpty() && lastBlockData.getTimestamp() < minLatestBlockTimestamp)
- if (Controller.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false)
+ if (Synchronizer.getInstance().getRecoveryMode() == false && recoverInvalidBlock == false)
continue;
// There are enough peers with a recent block and our latest block is recent
@@ -195,6 +195,9 @@ public class BlockMinter extends Thread {
// Reduce log timeout
logTimeout = 10 * 1000L;
+
+ // Last low weight block is no longer valid
+ parentSignatureForLastLowWeightBlock = null;
}
// Discard accounts we have already built blocks with
@@ -211,6 +214,14 @@ public class BlockMinter extends Thread {
continue;
}
+ if (parentSignatureForLastLowWeightBlock != null) {
+ // The last iteration found a higher weight block in the network, so sleep for a while
+ // to allow is to sync the higher weight chain. We are sleeping here rather than when
+ // detected as we don't want to hold the blockchain lock open.
+ LOGGER.info("Sleeping for 10 seconds...");
+ Thread.sleep(10 * 1000L);
+ }
+
for (PrivateKeyAccount mintingAccount : newBlocksMintingAccounts) {
// First block does the AT heavy-lifting
if (newBlocks.isEmpty()) {
@@ -302,6 +313,44 @@ public class BlockMinter extends Thread {
}
}
+ try {
+ if (this.higherWeightChainExists(repository, bestWeight)) {
+
+ // Check if the base block has updated since the last time we were here
+ if (parentSignatureForLastLowWeightBlock == null || timeOfLastLowWeightBlock == null ||
+ !Arrays.equals(parentSignatureForLastLowWeightBlock, previousBlockData.getSignature())) {
+ // We've switched to a different chain, so reset the timer
+ timeOfLastLowWeightBlock = NTP.getTime();
+ }
+ parentSignatureForLastLowWeightBlock = previousBlockData.getSignature();
+
+ // If less than 30 seconds has passed since first detection the higher weight chain,
+ // we should skip our block submission to give us the opportunity to sync to the better chain
+ if (NTP.getTime() - timeOfLastLowWeightBlock < 30*1000L) {
+ LOGGER.info("Higher weight chain found in peers, so not signing a block this round");
+ LOGGER.info("Time since detected: {}", NTP.getTime() - timeOfLastLowWeightBlock);
+ continue;
+ }
+ else {
+ // More than 30 seconds have passed, so we should submit our block candidate anyway.
+ LOGGER.info("More than 30 seconds passed, so proceeding to submit block candidate...");
+ }
+ }
+ else {
+ LOGGER.debug("No higher weight chain found in peers");
+ }
+ } catch (DataException e) {
+ LOGGER.debug("Unable to check for a higher weight chain. Proceeding anyway...");
+ }
+
+ // Discard any uncommitted changes as a result of the higher weight chain detection
+ repository.discardChanges();
+
+ // Clear variables that track low weight blocks
+ parentSignatureForLastLowWeightBlock = null;
+ timeOfLastLowWeightBlock = null;
+
+
// Add unconfirmed transactions
addUnconfirmedTransactions(repository, newBlock);
@@ -469,6 +518,61 @@ public class BlockMinter extends Thread {
}
}
+ private BigInteger getOurChainWeightSinceBlock(Repository repository, BlockSummaryData commonBlock, List peerBlockSummaries) throws DataException {
+ final int commonBlockHeight = commonBlock.getHeight();
+ final byte[] commonBlockSig = commonBlock.getSignature();
+ int mutualHeight = commonBlockHeight;
+
+ // Fetch our corresponding block summaries
+ final BlockData ourLatestBlockData = repository.getBlockRepository().getLastBlock();
+ List ourBlockSummaries = repository.getBlockRepository()
+ .getBlockSummaries(commonBlockHeight + 1, ourLatestBlockData.getHeight());
+ if (!ourBlockSummaries.isEmpty()) {
+ Synchronizer.getInstance().populateBlockSummariesMinterLevels(repository, ourBlockSummaries);
+ }
+
+ if (ourBlockSummaries != null && peerBlockSummaries != null) {
+ mutualHeight += Math.min(ourBlockSummaries.size(), peerBlockSummaries.size());
+ }
+ return Block.calcChainWeight(commonBlockHeight, commonBlockSig, ourBlockSummaries, mutualHeight);
+ }
+
+ private boolean higherWeightChainExists(Repository repository, BigInteger blockCandidateWeight) throws DataException {
+ if (blockCandidateWeight == null) {
+ // Can't make decisions without knowing the block candidate weight
+ return false;
+ }
+ NumberFormat formatter = new DecimalFormat("0.###E0");
+
+ List peers = Network.getInstance().getHandshakedPeers();
+ // Loop through handshaked peers and check for any new block candidates
+ for (Peer peer : peers) {
+ if (peer.getCommonBlockData() != null && peer.getCommonBlockData().getCommonBlockSummary() != null) {
+ // This peer has common block data
+ CommonBlockData commonBlockData = peer.getCommonBlockData();
+ BlockSummaryData commonBlockSummaryData = commonBlockData.getCommonBlockSummary();
+ if (commonBlockData.getChainWeight() != null) {
+ // The synchronizer has calculated this peer's chain weight
+ BigInteger ourChainWeightSinceCommonBlock = this.getOurChainWeightSinceBlock(repository, commonBlockSummaryData, commonBlockData.getBlockSummariesAfterCommonBlock());
+ BigInteger ourChainWeight = ourChainWeightSinceCommonBlock.add(blockCandidateWeight);
+ BigInteger peerChainWeight = commonBlockData.getChainWeight();
+ if (peerChainWeight.compareTo(ourChainWeight) >= 0) {
+ // This peer has a higher weight chain than ours
+ LOGGER.debug("Peer {} is on a higher weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
+ return true;
+
+ } else {
+ LOGGER.debug("Peer {} is on a lower weight chain ({}) than ours ({})", peer, formatter.format(peerChainWeight), formatter.format(ourChainWeight));
+ }
+ } else {
+ LOGGER.debug("Peer {} has no chain weight", peer);
+ }
+ } else {
+ LOGGER.debug("Peer {} has no common block data", peer);
+ }
+ }
+ return false;
+ }
private static void moderatedLog(Runnable logFunction) {
// We only log if logging at TRACE or previous log timeout has expired
if (!LOGGER.isTraceEnabled() && lastLogTimestamp != null && lastLogTimestamp + logTimeout > System.currentTimeMillis())
diff --git a/src/main/java/org/qortal/controller/Controller.java b/src/main/java/org/qortal/controller/Controller.java
index c19c57f7..974567f4 100644
--- a/src/main/java/org/qortal/controller/Controller.java
+++ b/src/main/java/org/qortal/controller/Controller.java
@@ -7,25 +7,14 @@ import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.security.SecureRandom;
import java.security.Security;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
+import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
@@ -51,7 +40,6 @@ import org.qortal.block.Block;
import org.qortal.block.BlockChain;
import org.qortal.block.BlockChain.BlockTimingByHeight;
import org.qortal.controller.arbitrary.*;
-import org.qortal.controller.Synchronizer.SynchronizationResult;
import org.qortal.controller.repository.PruneManager;
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
import org.qortal.controller.tradebot.TradeBot;
@@ -93,14 +81,22 @@ public class Controller extends Thread {
public static final String VERSION_PREFIX = "qortal-";
private static final Logger LOGGER = LogManager.getLogger(Controller.class);
- private static final long MISBEHAVIOUR_COOLOFF = 10 * 60 * 1000L; // ms
+ public static final long MISBEHAVIOUR_COOLOFF = 10 * 60 * 1000L; // ms
private static final int MAX_BLOCKCHAIN_TIP_AGE = 5; // blocks
private static final Object shutdownLock = new Object();
private static final String repositoryUrlTemplate = "jdbc:hsqldb:file:%s" + File.separator + "blockchain;create=true;hsqldb.full_log_replay=true";
private static final long NTP_PRE_SYNC_CHECK_PERIOD = 5 * 1000L; // ms
private static final long NTP_POST_SYNC_CHECK_PERIOD = 5 * 60 * 1000L; // ms
private static final long DELETE_EXPIRED_INTERVAL = 5 * 60 * 1000L; // ms
- private static final long RECOVERY_MODE_TIMEOUT = 10 * 60 * 1000L; // ms
+ private static final int MAX_INCOMING_TRANSACTIONS = 5000;
+
+ /** Minimum time before considering an invalid unconfirmed transaction as "stale" */
+ public static final long INVALID_TRANSACTION_STALE_TIMEOUT = 30 * 60 * 1000L; // ms
+ /** Minimum frequency to re-request stale unconfirmed transactions from peers, to recheck validity */
+ public static final long INVALID_TRANSACTION_RECHECK_INTERVAL = 60 * 60 * 1000L; // ms\
+ /** Minimum frequency to re-request expired unconfirmed transactions from peers, to recheck validity
+ * This mainly exists to stop expired transactions from bloating the list */
+ public static final long EXPIRED_TRANSACTION_RECHECK_INTERVAL = 10 * 60 * 1000L; // ms
// To do with online accounts list
private static final long ONLINE_ACCOUNTS_TASKS_INTERVAL = 10 * 1000L; // ms
@@ -114,7 +110,6 @@ public class Controller extends Thread {
private static volatile boolean isStopping = false;
private static BlockMinter blockMinter = null;
- private static volatile boolean requestSync = false;
private static volatile boolean requestSysTrayUpdate = true;
private static Controller instance;
@@ -148,20 +143,11 @@ public class Controller extends Thread {
/** Whether we can mint new blocks, as reported by BlockMinter. */
private volatile boolean isMintingPossible = false;
- /** Synchronization object for sync variables below */
- private final Object syncLock = new Object();
- /** Whether we are attempting to synchronize. */
- private volatile boolean isSynchronizing = false;
- /** Temporary estimate of synchronization progress for SysTray use. */
- private volatile int syncPercent = 0;
+ /** List of incoming transaction that are in the import queue */
+ private List incomingTransactions = Collections.synchronizedList(new ArrayList<>());
- /** Latest block signatures from other peers that we know are on inferior chains. */
- List inferiorChainSignatures = new ArrayList<>();
-
- /** Recovery mode, which is used to bring back a stalled network */
- private boolean recoveryMode = false;
- private boolean peersAvailable = true; // peersAvailable must default to true
- private long timePeersLastAvailable = 0;
+ /** List of recent invalid unconfirmed transactions */
+ private Map invalidUnconfirmedTransactions = Collections.synchronizedMap(new HashMap<>());
/** Lock for only allowing one blockchain-modifying codepath at a time. e.g. synchronization or newly minted block. */
private final ReentrantLock blockchainLock = new ReentrantLock();
@@ -351,20 +337,6 @@ public class Controller extends Thread {
return this.isMintingPossible;
}
- public boolean isSynchronizing() {
- return this.isSynchronizing;
- }
-
- public Integer getSyncPercent() {
- synchronized (this.syncLock) {
- return this.isSynchronizing ? this.syncPercent : null;
- }
- }
-
- public boolean getRecoveryMode() {
- return this.recoveryMode;
- }
-
// Entry point
public static void main(String[] args) {
@@ -469,6 +441,9 @@ public class Controller extends Thread {
}
});
+ LOGGER.info("Starting synchronizer");
+ Synchronizer.getInstance().start();
+
LOGGER.info("Starting block minter");
blockMinter = new BlockMinter();
blockMinter.start();
@@ -479,6 +454,7 @@ public class Controller extends Thread {
// Arbitrary data controllers
LOGGER.info("Starting arbitrary-transaction controllers");
ArbitraryDataManager.getInstance().start();
+ ArbitraryDataFileManager.getInstance().start();
ArbitraryDataBuildManager.getInstance().start();
ArbitraryDataCleanupManager.getInstance().start();
ArbitraryDataStorageManager.getInstance().start();
@@ -541,7 +517,7 @@ public class Controller extends Thread {
@Override
public void run() {
- Thread.currentThread().setName("Controller");
+ Thread.currentThread().setName("Qortal");
final long repositoryBackupInterval = Settings.getInstance().getRepositoryBackupInterval();
final long repositoryCheckpointInterval = Settings.getInstance().getRepositoryCheckpointInterval();
@@ -581,10 +557,10 @@ public class Controller extends Thread {
}
}
- if (requestSync) {
- requestSync = false;
- potentiallySynchronize();
- }
+ // Process incoming transactions queue
+ processIncomingTransactionsQueue();
+ // Clean up invalid incoming transactions list
+ cleanupInvalidTransactionsList(now);
// Clean up arbitrary data request cache
ArbitraryDataManager.getInstance().cleanupRequestCache(now);
@@ -710,27 +686,6 @@ public class Controller extends Thread {
}
}
- private long getRandomRepositoryMaintenanceInterval() {
- final long minInterval = Settings.getInstance().getRepositoryMaintenanceMinInterval();
- final long maxInterval = Settings.getInstance().getRepositoryMaintenanceMaxInterval();
- if (maxInterval == 0) {
- return 0;
- }
- return (new Random().nextLong() % (maxInterval - minInterval)) + minInterval;
- }
-
- /**
- * Export current trade bot states and minting accounts.
- */
- public void exportRepositoryData() {
- try (final Repository repository = RepositoryManager.getRepository()) {
- repository.exportNodeLocalData();
-
- } catch (DataException e) {
- // Fail silently as this is an optional step
- }
- }
-
public static final Predicate hasMisbehaved = peer -> {
final Long lastMisbehaved = peer.getPeerData().getLastMisbehaved();
return lastMisbehaved != null && lastMisbehaved > NTP.getTime() - MISBEHAVIOUR_COOLOFF;
@@ -755,7 +710,7 @@ public class Controller extends Thread {
public static final Predicate hasInferiorChainTip = peer -> {
final PeerChainTipData peerChainTipData = peer.getChainTipData();
- final List inferiorChainTips = getInstance().inferiorChainSignatures;
+ final List inferiorChainTips = Synchronizer.getInstance().inferiorChainSignatures;
return peerChainTipData == null || peerChainTipData.getLastBlockSignature() == null || inferiorChainTips.contains(new ByteArray(peerChainTipData.getLastBlockSignature()));
};
@@ -764,218 +719,34 @@ public class Controller extends Thread {
return peer.isAtLeastVersion(minPeerVersion) == false;
};
- private void potentiallySynchronize() throws InterruptedException {
- // Already synchronizing via another thread?
- if (this.isSynchronizing)
- return;
-
- List peers = Network.getInstance().getHandshakedPeers();
-
- // Disregard peers that have "misbehaved" recently
- peers.removeIf(hasMisbehaved);
-
- // Disregard peers that only have genesis block
- peers.removeIf(hasOnlyGenesisBlock);
-
- // Disregard peers that don't have a recent block
- peers.removeIf(hasNoRecentBlock);
-
- // Disregard peers that are on an old version
- peers.removeIf(hasOldVersion);
-
- checkRecoveryModeForPeers(peers);
- if (recoveryMode) {
- peers = Network.getInstance().getHandshakedPeers();
- peers.removeIf(hasOnlyGenesisBlock);
- peers.removeIf(hasMisbehaved);
- peers.removeIf(hasOldVersion);
+ private long getRandomRepositoryMaintenanceInterval() {
+ final long minInterval = Settings.getInstance().getRepositoryMaintenanceMinInterval();
+ final long maxInterval = Settings.getInstance().getRepositoryMaintenanceMaxInterval();
+ if (maxInterval == 0) {
+ return 0;
}
-
- // Check we have enough peers to potentially synchronize
- if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
- return;
-
- // Disregard peers that have no block signature or the same block signature as us
- peers.removeIf(hasNoOrSameBlock);
-
- // Disregard peers that are on the same block as last sync attempt and we didn't like their chain
- peers.removeIf(hasInferiorChainTip);
-
- final int peersBeforeComparison = peers.size();
-
- // Request recent block summaries from the remaining peers, and locate our common block with each
- Synchronizer.getInstance().findCommonBlocksWithPeers(peers);
-
- // Compare the peers against each other, and against our chain, which will return an updated list excluding those without common blocks
- peers = Synchronizer.getInstance().comparePeers(peers);
-
- // We may have added more inferior chain tips when comparing peers, so remove any peers that are currently on those chains
- peers.removeIf(hasInferiorChainTip);
-
- final int peersRemoved = peersBeforeComparison - peers.size();
- if (peersRemoved > 0 && peers.size() > 0)
- LOGGER.debug(String.format("Ignoring %d peers on inferior chains. Peers remaining: %d", peersRemoved, peers.size()));
-
- if (peers.isEmpty())
- return;
-
- if (peers.size() > 1) {
- StringBuilder finalPeersString = new StringBuilder();
- for (Peer peer : peers)
- finalPeersString = finalPeersString.length() > 0 ? finalPeersString.append(", ").append(peer) : finalPeersString.append(peer);
- LOGGER.debug(String.format("Choosing random peer from: [%s]", finalPeersString.toString()));
- }
-
- // Pick random peer to sync with
- int index = new SecureRandom().nextInt(peers.size());
- Peer peer = peers.get(index);
-
- actuallySynchronize(peer, false);
+ return (new Random().nextLong() % (maxInterval - minInterval)) + minInterval;
}
- public SynchronizationResult actuallySynchronize(Peer peer, boolean force) throws InterruptedException {
- boolean hasStatusChanged = false;
- BlockData priorChainTip = this.getChainTip();
+ /**
+ * Export current trade bot states and minting accounts.
+ */
+ public void exportRepositoryData() {
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ repository.exportNodeLocalData();
- synchronized (this.syncLock) {
- this.syncPercent = (priorChainTip.getHeight() * 100) / peer.getChainTipData().getLastHeight();
-
- // Only update SysTray if we're potentially changing height
- if (this.syncPercent < 100) {
- this.isSynchronizing = true;
- hasStatusChanged = true;
- }
- }
- peer.setSyncInProgress(true);
-
- if (hasStatusChanged)
- updateSysTray();
-
- try {
- SynchronizationResult syncResult = Synchronizer.getInstance().synchronize(peer, force);
- switch (syncResult) {
- case GENESIS_ONLY:
- case NO_COMMON_BLOCK:
- case TOO_DIVERGENT:
- case INVALID_DATA: {
- // These are more serious results that warrant a cool-off
- LOGGER.info(String.format("Failed to synchronize with peer %s (%s) - cooling off", peer, syncResult.name()));
-
- // Don't use this peer again for a while
- Network.getInstance().peerMisbehaved(peer);
- break;
- }
-
- case INFERIOR_CHAIN: {
- // Update our list of inferior chain tips
- ByteArray inferiorChainSignature = new ByteArray(peer.getChainTipData().getLastBlockSignature());
- if (!inferiorChainSignatures.contains(inferiorChainSignature))
- inferiorChainSignatures.add(inferiorChainSignature);
-
- // These are minor failure results so fine to try again
- LOGGER.debug(() -> String.format("Refused to synchronize with peer %s (%s)", peer, syncResult.name()));
-
- // Notify peer of our superior chain
- if (!peer.sendMessage(Network.getInstance().buildHeightMessage(peer, priorChainTip)))
- peer.disconnect("failed to notify peer of our superior chain");
- break;
- }
-
- case NO_REPLY:
- case NO_BLOCKCHAIN_LOCK:
- case REPOSITORY_ISSUE:
- // These are minor failure results so fine to try again
- LOGGER.debug(() -> String.format("Failed to synchronize with peer %s (%s)", peer, syncResult.name()));
- break;
-
- case SHUTTING_DOWN:
- // Just quietly exit
- break;
-
- case OK:
- // fall-through...
- case NOTHING_TO_DO: {
- // Update our list of inferior chain tips
- ByteArray inferiorChainSignature = new ByteArray(peer.getChainTipData().getLastBlockSignature());
- if (!inferiorChainSignatures.contains(inferiorChainSignature))
- inferiorChainSignatures.add(inferiorChainSignature);
-
- LOGGER.debug(() -> String.format("Synchronized with peer %s (%s)", peer, syncResult.name()));
- break;
- }
- }
-
- // Has our chain tip changed?
- BlockData newChainTip;
-
- try (final Repository repository = RepositoryManager.getRepository()) {
- newChainTip = repository.getBlockRepository().getLastBlock();
- } catch (DataException e) {
- LOGGER.warn(String.format("Repository issue when trying to fetch post-synchronization chain tip: %s", e.getMessage()));
- return syncResult;
- }
-
- if (!Arrays.equals(newChainTip.getSignature(), priorChainTip.getSignature())) {
- // Reset our cache of inferior chains
- inferiorChainSignatures.clear();
-
- Network network = Network.getInstance();
- network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newChainTip));
- }
-
- return syncResult;
- } finally {
- isSynchronizing = false;
- peer.setSyncInProgress(false);
+ } catch (DataException e) {
+ // Fail silently as this is an optional step
}
}
- private boolean checkRecoveryModeForPeers(List qualifiedPeers) {
- List handshakedPeers = Network.getInstance().getHandshakedPeers();
-
- if (handshakedPeers.size() > 0) {
- // There is at least one handshaked peer
- if (qualifiedPeers.isEmpty()) {
- // There are no 'qualified' peers - i.e. peers that have a recent block we can sync to
- boolean werePeersAvailable = peersAvailable;
- peersAvailable = false;
-
- // If peers only just became unavailable, update our record of the time they were last available
- if (werePeersAvailable)
- timePeersLastAvailable = NTP.getTime();
-
- // If enough time has passed, enter recovery mode, which lifts some restrictions on who we can sync with and when we can mint
- if (NTP.getTime() - timePeersLastAvailable > RECOVERY_MODE_TIMEOUT) {
- if (recoveryMode == false) {
- LOGGER.info(String.format("Peers have been unavailable for %d minutes. Entering recovery mode...", RECOVERY_MODE_TIMEOUT/60/1000));
- recoveryMode = true;
- }
- }
- } else {
- // We now have at least one peer with a recent block, so we can exit recovery mode and sync normally
- peersAvailable = true;
- if (recoveryMode) {
- LOGGER.info("Peers have become available again. Exiting recovery mode...");
- recoveryMode = false;
- }
- }
- }
- return recoveryMode;
- }
-
- public void addInferiorChainSignature(byte[] inferiorSignature) {
- // Update our list of inferior chain tips
- ByteArray inferiorChainSignature = new ByteArray(inferiorSignature);
- if (!inferiorChainSignatures.contains(inferiorChainSignature))
- inferiorChainSignatures.add(inferiorChainSignature);
- }
public static class StatusChangeEvent implements Event {
public StatusChangeEvent() {
}
}
- private void updateSysTray() {
+ public void updateSysTray() {
if (NTP.getTime() == null) {
SysTray.getInstance().setToolTipText(Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_CLOCK"));
SysTray.getInstance().setTrayIcon(1);
@@ -991,13 +762,13 @@ public class Controller extends Thread {
String actionText;
- synchronized (this.syncLock) {
+ synchronized (Synchronizer.getInstance().syncLock) {
if (this.isMintingPossible) {
actionText = Translator.INSTANCE.translate("SysTray", "MINTING_ENABLED");
SysTray.getInstance().setTrayIcon(2);
}
- else if (this.isSynchronizing) {
- actionText = String.format("%s - %d%%", Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_BLOCKCHAIN"), this.syncPercent);
+ else if (Synchronizer.getInstance().isSynchronizing()) {
+ actionText = String.format("%s - %d%%", Translator.INSTANCE.translate("SysTray", "SYNCHRONIZING_BLOCKCHAIN"), Synchronizer.getInstance().getSyncPercent());
SysTray.getInstance().setTrayIcon(3);
}
else if (numberOfPeers < Settings.getInstance().getMinBlockchainPeers()) {
@@ -1053,6 +824,121 @@ public class Controller extends Thread {
}
}
+ // Incoming transactions queue
+
+ private boolean incomingTransactionQueueContains(byte[] signature) {
+ synchronized (incomingTransactions) {
+ return incomingTransactions.stream().anyMatch(t -> Arrays.equals(t.getSignature(), signature));
+ }
+ }
+
+ private void removeIncomingTransaction(byte[] signature) {
+ incomingTransactions.removeIf(t -> Arrays.equals(t.getSignature(), signature));
+ }
+
+ private void processIncomingTransactionsQueue() {
+ if (this.incomingTransactions.size() == 0) {
+ // Don't bother locking if there are no new transactions to process
+ return;
+ }
+
+ if (Synchronizer.getInstance().isSyncRequested() || Synchronizer.getInstance().isSynchronizing()) {
+ // Prioritize syncing, and don't attempt to lock
+ return;
+ }
+
+ try {
+ ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
+ if (!blockchainLock.tryLock(2, TimeUnit.SECONDS)) {
+ LOGGER.trace(() -> String.format("Too busy to process incoming transactions queue"));
+ return;
+ }
+ } catch (InterruptedException e) {
+ LOGGER.info("Interrupted when trying to acquire blockchain lock");
+ return;
+ }
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ LOGGER.debug("Processing incoming transactions queue (size {})...", this.incomingTransactions.size());
+
+ // Take a copy of incomingTransactions so we can release the lock
+ ListincomingTransactionsCopy = new ArrayList<>(this.incomingTransactions);
+
+ // Iterate through incoming transactions list
+ Iterator iterator = incomingTransactionsCopy.iterator();
+ while (iterator.hasNext()) {
+ if (isStopping) {
+ return;
+ }
+
+ if (Synchronizer.getInstance().isSyncRequestPending()) {
+ LOGGER.debug("Breaking out of transaction processing loop with {} remaining, because a sync request is pending", incomingTransactionsCopy.size());
+ return;
+ }
+
+ TransactionData transactionData = (TransactionData) iterator.next();
+ Transaction transaction = Transaction.fromData(repository, transactionData);
+
+ // Check signature
+ if (!transaction.isSignatureValid()) {
+ LOGGER.trace(() -> String.format("Ignoring %s transaction %s with invalid signature", transactionData.getType().name(), Base58.encode(transactionData.getSignature())));
+ removeIncomingTransaction(transactionData.getSignature());
+ continue;
+ }
+
+ ValidationResult validationResult = transaction.importAsUnconfirmed();
+
+ if (validationResult == ValidationResult.TRANSACTION_ALREADY_EXISTS) {
+ LOGGER.trace(() -> String.format("Ignoring existing transaction %s", Base58.encode(transactionData.getSignature())));
+ removeIncomingTransaction(transactionData.getSignature());
+ continue;
+ }
+
+ if (validationResult == ValidationResult.NO_BLOCKCHAIN_LOCK) {
+ LOGGER.trace(() -> String.format("Couldn't lock blockchain to import unconfirmed transaction", Base58.encode(transactionData.getSignature())));
+ removeIncomingTransaction(transactionData.getSignature());
+ continue;
+ }
+
+ if (validationResult != ValidationResult.OK) {
+ final String signature58 = Base58.encode(transactionData.getSignature());
+ LOGGER.trace(() -> String.format("Ignoring invalid (%s) %s transaction %s", validationResult.name(), transactionData.getType().name(), signature58));
+ Long now = NTP.getTime();
+ if (now != null && now - transactionData.getTimestamp() > INVALID_TRANSACTION_STALE_TIMEOUT) {
+ Long expiryLength = INVALID_TRANSACTION_RECHECK_INTERVAL;
+ if (validationResult == ValidationResult.TIMESTAMP_TOO_OLD) {
+ // Use shorter recheck interval for expired transactions
+ expiryLength = EXPIRED_TRANSACTION_RECHECK_INTERVAL;
+ }
+ Long expiry = now + expiryLength;
+ LOGGER.debug("Adding stale invalid transaction {} to invalidUnconfirmedTransactions...", signature58);
+ // Invalid, unconfirmed transaction has become stale - add to invalidUnconfirmedTransactions so that we don't keep requesting it
+ invalidUnconfirmedTransactions.put(signature58, expiry);
+ }
+ removeIncomingTransaction(transactionData.getSignature());
+ continue;
+ }
+
+ LOGGER.debug(() -> String.format("Imported %s transaction %s", transactionData.getType().name(), Base58.encode(transactionData.getSignature())));
+ removeIncomingTransaction(transactionData.getSignature());
+ }
+ } catch (DataException e) {
+ LOGGER.error(String.format("Repository issue while processing incoming transactions", e));
+ } finally {
+ LOGGER.debug("Finished processing incoming transactions queue");
+ blockchainLock.unlock();
+ }
+ }
+
+ private void cleanupInvalidTransactionsList(Long now) {
+ if (now == null) {
+ return;
+ }
+ // Periodically remove invalid unconfirmed transactions from the list, so that they can be fetched again
+ invalidUnconfirmedTransactions.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < now);
+ }
+
+
// Shutdown
public void shutdown() {
@@ -1060,6 +946,9 @@ public class Controller extends Thread {
if (!isStopping) {
isStopping = true;
+ LOGGER.info("Shutting down synchronizer");
+ Synchronizer.getInstance().shutdown();
+
LOGGER.info("Shutting down API");
ApiService.getInstance().stop();
@@ -1071,6 +960,7 @@ public class Controller extends Thread {
// Arbitrary data controllers
LOGGER.info("Shutting down arbitrary-transaction controllers");
ArbitraryDataManager.getInstance().shutdown();
+ ArbitraryDataFileManager.getInstance().shutdown();
ArbitraryDataBuildManager.getInstance().shutdown();
ArbitraryDataCleanupManager.getInstance().shutdown();
ArbitraryDataStorageManager.getInstance().shutdown();
@@ -1101,6 +991,17 @@ public class Controller extends Thread {
// We were interrupted while waiting for thread to join
}
+ // Make sure we're the only thread modifying the blockchain when shutting down the repository
+ ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
+ try {
+ if (!blockchainLock.tryLock(5, TimeUnit.SECONDS)) {
+ LOGGER.debug("Couldn't acquire blockchain lock even after waiting 5 seconds");
+ // Proceed anyway, as we have to shut down
+ }
+ } catch (InterruptedException e) {
+ LOGGER.info("Interrupted when waiting for blockchain lock");
+ }
+
try {
LOGGER.info("Shutting down repository");
RepositoryManager.closeRepositoryFactory();
@@ -1108,6 +1009,11 @@ public class Controller extends Thread {
LOGGER.error("Error occurred while shutting down repository", e);
}
+ // Release the lock if we acquired it
+ if (blockchainLock.isHeldByCurrentThread()) {
+ blockchainLock.unlock();
+ }
+
LOGGER.info("Shutting down NTP");
NTP.shutdownNow();
@@ -1507,50 +1413,10 @@ public class Controller extends Thread {
private void onNetworkTransactionMessage(Peer peer, Message message) {
TransactionMessage transactionMessage = (TransactionMessage) message;
TransactionData transactionData = transactionMessage.getTransactionData();
-
- /*
- * If we can't obtain blockchain lock immediately,
- * e.g. Synchronizer is active, or another transaction is taking a while to validate,
- * then we're using up a network thread for ages and clogging things up
- * so bail out early
- */
- ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
- if (!blockchainLock.tryLock()) {
- LOGGER.trace(() -> String.format("Too busy to import %s transaction %s from peer %s", transactionData.getType().name(), Base58.encode(transactionData.getSignature()), peer));
- return;
- }
-
- try (final Repository repository = RepositoryManager.getRepository()) {
- Transaction transaction = Transaction.fromData(repository, transactionData);
-
- // Check signature
- if (!transaction.isSignatureValid()) {
- LOGGER.trace(() -> String.format("Ignoring %s transaction %s with invalid signature from peer %s", transactionData.getType().name(), Base58.encode(transactionData.getSignature()), peer));
- return;
+ if (this.incomingTransactions.size() < MAX_INCOMING_TRANSACTIONS) {
+ if (!this.incomingTransactions.contains(transactionData)) {
+ this.incomingTransactions.add(transactionData);
}
-
- ValidationResult validationResult = transaction.importAsUnconfirmed();
-
- if (validationResult == ValidationResult.TRANSACTION_ALREADY_EXISTS) {
- LOGGER.trace(() -> String.format("Ignoring existing transaction %s from peer %s", Base58.encode(transactionData.getSignature()), peer));
- return;
- }
-
- if (validationResult == ValidationResult.NO_BLOCKCHAIN_LOCK) {
- LOGGER.trace(() -> String.format("Couldn't lock blockchain to import unconfirmed transaction %s from peer %s", Base58.encode(transactionData.getSignature()), peer));
- return;
- }
-
- if (validationResult != ValidationResult.OK) {
- LOGGER.trace(() -> String.format("Ignoring invalid (%s) %s transaction %s from peer %s", validationResult.name(), transactionData.getType().name(), Base58.encode(transactionData.getSignature()), peer));
- return;
- }
-
- LOGGER.debug(() -> String.format("Imported %s transaction %s from peer %s", transactionData.getType().name(), Base58.encode(transactionData.getSignature()), peer));
- } catch (DataException e) {
- LOGGER.error(String.format("Repository issue while processing transaction %s from peer %s", Base58.encode(transactionData.getSignature()), peer), e);
- } finally {
- blockchainLock.unlock();
}
}
@@ -1702,7 +1568,7 @@ public class Controller extends Thread {
peer.setChainTipData(newChainTipData);
// Potentially synchronize
- requestSync = true;
+ Synchronizer.getInstance().requestSync();
}
private void onNetworkGetTransactionMessage(Peer peer, Message message) {
@@ -1749,6 +1615,19 @@ public class Controller extends Thread {
try (final Repository repository = RepositoryManager.getRepository()) {
for (byte[] signature : signatures) {
+ String signature58 = Base58.encode(signature);
+ if (invalidUnconfirmedTransactions.containsKey(signature58)) {
+ // Previously invalid transaction - don't keep requesting it
+ // It will be periodically removed from invalidUnconfirmedTransactions to allow for rechecks
+ continue;
+ }
+
+ // Ignore if this transaction is in the queue
+ if (incomingTransactionQueueContains(signature)) {
+ LOGGER.trace(() -> String.format("Ignoring existing queued transaction %s from peer %s", Base58.encode(signature), peer));
+ continue;
+ }
+
// Do we have it already? (Before requesting transaction data itself)
if (repository.getTransactionRepository().exists(signature)) {
LOGGER.trace(() -> String.format("Ignoring existing transaction %s from peer %s", Base58.encode(signature), peer));
@@ -1995,83 +1874,88 @@ public class Controller extends Thread {
private void sendOurOnlineAccountsInfo() {
final Long now = NTP.getTime();
- if (now == null)
- return;
+ if (now != null) {
- List mintingAccounts;
- try (final Repository repository = RepositoryManager.getRepository()) {
- mintingAccounts = repository.getAccountRepository().getMintingAccounts();
+ List mintingAccounts;
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ mintingAccounts = repository.getAccountRepository().getMintingAccounts();
- // We have no accounts, but don't reset timestamp
- if (mintingAccounts.isEmpty())
- return;
+ // We have no accounts, but don't reset timestamp
+ if (mintingAccounts.isEmpty())
+ return;
- // Only reward-share accounts allowed
- Iterator iterator = mintingAccounts.iterator();
- while (iterator.hasNext()) {
- MintingAccountData mintingAccountData = iterator.next();
-
- RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
- if (rewardShareData == null) {
- // Reward-share doesn't even exist - probably not a good sign
- iterator.remove();
- continue;
- }
-
- Account mintingAccount = new Account(repository, rewardShareData.getMinter());
- if (!mintingAccount.canMint()) {
- // Minting-account component of reward-share can no longer mint - disregard
- iterator.remove();
- continue;
- }
- }
- } catch (DataException e) {
- LOGGER.warn(String.format("Repository issue trying to fetch minting accounts: %s", e.getMessage()));
- return;
- }
-
- // 'current' timestamp
- final long onlineAccountsTimestamp = Controller.toOnlineAccountTimestamp(now);
- boolean hasInfoChanged = false;
-
- byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp);
- List ourOnlineAccounts = new ArrayList<>();
-
- MINTING_ACCOUNTS:
- for (MintingAccountData mintingAccountData : mintingAccounts) {
- PrivateKeyAccount mintingAccount = new PrivateKeyAccount(null, mintingAccountData.getPrivateKey());
-
- byte[] signature = mintingAccount.sign(timestampBytes);
- byte[] publicKey = mintingAccount.getPublicKey();
-
- // Our account is online
- OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey);
- synchronized (this.onlineAccounts) {
- Iterator iterator = this.onlineAccounts.iterator();
+ // Only reward-share accounts allowed
+ Iterator iterator = mintingAccounts.iterator();
+ int i = 0;
while (iterator.hasNext()) {
- OnlineAccountData existingOnlineAccountData = iterator.next();
+ MintingAccountData mintingAccountData = iterator.next();
- if (Arrays.equals(existingOnlineAccountData.getPublicKey(), ourOnlineAccountData.getPublicKey())) {
- // If our online account is already present, with same timestamp, then move on to next mintingAccount
- if (existingOnlineAccountData.getTimestamp() == onlineAccountsTimestamp)
- continue MINTING_ACCOUNTS;
-
- // If our online account is already present, but with older timestamp, then remove it
+ RewardShareData rewardShareData = repository.getAccountRepository().getRewardShare(mintingAccountData.getPublicKey());
+ if (rewardShareData == null) {
+ // Reward-share doesn't even exist - probably not a good sign
iterator.remove();
- break;
+ continue;
+ }
+
+ Account mintingAccount = new Account(repository, rewardShareData.getMinter());
+ if (!mintingAccount.canMint()) {
+ // Minting-account component of reward-share can no longer mint - disregard
+ iterator.remove();
+ continue;
+ }
+
+ if (++i > 2) {
+ iterator.remove();
+ continue;
}
}
-
- this.onlineAccounts.add(ourOnlineAccountData);
+ } catch (DataException e) {
+ LOGGER.warn(String.format("Repository issue trying to fetch minting accounts: %s", e.getMessage()));
+ return;
}
- LOGGER.trace(() -> String.format("Added our online account %s with timestamp %d", mintingAccount.getAddress(), onlineAccountsTimestamp));
- ourOnlineAccounts.add(ourOnlineAccountData);
- hasInfoChanged = true;
- }
+ // 'current' timestamp
+ final long onlineAccountsTimestamp = Controller.toOnlineAccountTimestamp(now);
+ boolean hasInfoChanged = false;
- if (!hasInfoChanged)
- return;
+ byte[] timestampBytes = Longs.toByteArray(onlineAccountsTimestamp);
+ List ourOnlineAccounts = new ArrayList<>();
+
+ MINTING_ACCOUNTS:
+ for (MintingAccountData mintingAccountData : mintingAccounts) {
+ PrivateKeyAccount mintingAccount = new PrivateKeyAccount(null, mintingAccountData.getPrivateKey());
+
+ byte[] signature = mintingAccount.sign(timestampBytes);
+ byte[] publicKey = mintingAccount.getPublicKey();
+
+ // Our account is online
+ OnlineAccountData ourOnlineAccountData = new OnlineAccountData(onlineAccountsTimestamp, signature, publicKey);
+ synchronized (this.onlineAccounts) {
+ Iterator iterator = this.onlineAccounts.iterator();
+ while (iterator.hasNext()) {
+ OnlineAccountData existingOnlineAccountData = iterator.next();
+
+ if (Arrays.equals(existingOnlineAccountData.getPublicKey(), ourOnlineAccountData.getPublicKey())) {
+ // If our online account is already present, with same timestamp, then move on to next mintingAccount
+ if (existingOnlineAccountData.getTimestamp() == onlineAccountsTimestamp)
+ continue MINTING_ACCOUNTS;
+
+ // If our online account is already present, but with older timestamp, then remove it
+ iterator.remove();
+ break;
+ }
+ }
+
+ this.onlineAccounts.add(ourOnlineAccountData);
+ }
+
+ LOGGER.trace(() -> String.format("Added our online account %s with timestamp %d", mintingAccount.getAddress(), onlineAccountsTimestamp));
+ ourOnlineAccounts.add(ourOnlineAccountData);
+ hasInfoChanged = true;
+ }
+
+ if (!hasInfoChanged)
+ return;
Message messageV1 = new OnlineAccountsMessage(ourOnlineAccounts);
Message messageV2 = new OnlineAccountsV2Message(ourOnlineAccounts);
@@ -2080,7 +1964,8 @@ public class Controller extends Thread {
peer.getPeersVersion() >= ONLINE_ACCOUNTS_V2_PEER_VERSION ? messageV2 : messageV1
);
- LOGGER.trace(()-> String.format("Broadcasted %d online account%s with timestamp %d", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp));
+ LOGGER.trace(() -> String.format("Broadcasted %d online account%s with timestamp %d", ourOnlineAccounts.size(), (ourOnlineAccounts.size() != 1 ? "s" : ""), onlineAccountsTimestamp));
+ }
}
public static long toOnlineAccountTimestamp(long timestamp) {
diff --git a/src/main/java/org/qortal/controller/Synchronizer.java b/src/main/java/org/qortal/controller/Synchronizer.java
index d5e489c8..b98c5fa2 100644
--- a/src/main/java/org/qortal/controller/Synchronizer.java
+++ b/src/main/java/org/qortal/controller/Synchronizer.java
@@ -1,9 +1,11 @@
package org.qortal.controller;
import java.math.BigInteger;
+import java.security.SecureRandom;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.*;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
@@ -20,6 +22,7 @@ import org.qortal.data.block.CommonBlockData;
import org.qortal.data.network.PeerChainTipData;
import org.qortal.data.transaction.RewardShareTransactionData;
import org.qortal.data.transaction.TransactionData;
+import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.BlockMessage;
import org.qortal.network.message.BlockSummariesMessage;
@@ -35,11 +38,10 @@ import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
import org.qortal.utils.Base58;
+import org.qortal.utils.ByteArray;
import org.qortal.utils.NTP;
-import static org.qortal.network.Peer.FETCH_BLOCKS_TIMEOUT;
-
-public class Synchronizer {
+public class Synchronizer extends Thread {
private static final Logger LOGGER = LogManager.getLogger(Synchronizer.class);
@@ -57,12 +59,32 @@ public class Synchronizer {
/** Maximum number of block signatures we ask from peer in one go */
private static final int MAXIMUM_REQUEST_SIZE = 200; // XXX move to Settings?
+ private static final long RECOVERY_MODE_TIMEOUT = 10 * 60 * 1000L; // ms
+ private boolean running;
+
+ /** Latest block signatures from other peers that we know are on inferior chains. */
+ List inferiorChainSignatures = new ArrayList<>();
+
+ /** Recovery mode, which is used to bring back a stalled network */
+ private boolean recoveryMode = false;
+ private boolean peersAvailable = true; // peersAvailable must default to true
+ private long timePeersLastAvailable = 0;
// Keep track of the size of the last re-org, so it can be logged
private int lastReorgSize;
+ /** Synchronization object for sync variables below */
+ public final Object syncLock = new Object();
+ /** Whether we are attempting to synchronize. */
+ private volatile boolean isSynchronizing = false;
+ /** Temporary estimate of synchronization progress for SysTray use. */
+ private volatile int syncPercent = 0;
+
+ private static volatile boolean requestSync = false;
+ private boolean syncRequestPending = false;
+
// Keep track of invalid blocks so that we don't keep trying to sync them
private Map invalidBlockSignatures = Collections.synchronizedMap(new HashMap<>());
public Long timeValidBlockLastReceived = null;
@@ -77,6 +99,7 @@ public class Synchronizer {
// Constructors
private Synchronizer() {
+ this.running = true;
}
public static Synchronizer getInstance() {
@@ -87,6 +110,284 @@ public class Synchronizer {
}
+ @Override
+ public void run() {
+ Thread.currentThread().setName("Synchronizer");
+
+ try {
+ while (running && !Controller.isStopping()) {
+ Thread.sleep(1000);
+
+ if (requestSync) {
+ requestSync = false;
+ boolean success = Synchronizer.getInstance().potentiallySynchronize();
+ if (!success) {
+ // Something went wrong, so try again next time
+ requestSync = true;
+ }
+ // Remember that we have a pending sync request if this attempt failed
+ syncRequestPending = !success;
+ }
+ }
+ } catch (InterruptedException e) {
+ // Clear interrupted flag so we can shutdown trim threads
+ Thread.interrupted();
+ // Fall-through to exit
+ }
+ }
+
+ public void shutdown() {
+ this.running = false;
+ this.interrupt();
+ }
+
+
+
+ public boolean isSynchronizing() {
+ return this.isSynchronizing;
+ }
+
+ public boolean isSyncRequestPending() {
+ return this.syncRequestPending;
+ }
+
+ public Integer getSyncPercent() {
+ synchronized (this.syncLock) {
+ return this.isSynchronizing ? this.syncPercent : null;
+ }
+ }
+
+ public void requestSync() {
+ requestSync = true;
+ }
+
+ public boolean isSyncRequested() {
+ return requestSync;
+ }
+
+ public boolean getRecoveryMode() {
+ return this.recoveryMode;
+ }
+
+
+ public boolean potentiallySynchronize() throws InterruptedException {
+ // Already synchronizing via another thread?
+ if (this.isSynchronizing)
+ return true;
+
+ List peers = Network.getInstance().getHandshakedPeers();
+
+ // Disregard peers that have "misbehaved" recently
+ peers.removeIf(Controller.hasMisbehaved);
+
+ // Disregard peers that only have genesis block
+ peers.removeIf(Controller.hasOnlyGenesisBlock);
+
+ // Disregard peers that don't have a recent block
+ peers.removeIf(Controller.hasNoRecentBlock);
+
+ // Disregard peers that are on an old version
+ peers.removeIf(Controller.hasOldVersion);
+
+ checkRecoveryModeForPeers(peers);
+ if (recoveryMode) {
+ peers = Network.getInstance().getHandshakedPeers();
+ peers.removeIf(Controller.hasOnlyGenesisBlock);
+ peers.removeIf(Controller.hasMisbehaved);
+ peers.removeIf(Controller.hasOldVersion);
+ }
+
+ // Check we have enough peers to potentially synchronize
+ if (peers.size() < Settings.getInstance().getMinBlockchainPeers())
+ return true;
+
+ // Disregard peers that have no block signature or the same block signature as us
+ peers.removeIf(Controller.hasNoOrSameBlock);
+
+ // Disregard peers that are on the same block as last sync attempt and we didn't like their chain
+ peers.removeIf(Controller.hasInferiorChainTip);
+
+ final int peersBeforeComparison = peers.size();
+
+ // Request recent block summaries from the remaining peers, and locate our common block with each
+ Synchronizer.getInstance().findCommonBlocksWithPeers(peers);
+
+ // Compare the peers against each other, and against our chain, which will return an updated list excluding those without common blocks
+ peers = Synchronizer.getInstance().comparePeers(peers);
+
+ // We may have added more inferior chain tips when comparing peers, so remove any peers that are currently on those chains
+ peers.removeIf(Controller.hasInferiorChainTip);
+
+ final int peersRemoved = peersBeforeComparison - peers.size();
+ if (peersRemoved > 0 && peers.size() > 0)
+ LOGGER.debug(String.format("Ignoring %d peers on inferior chains. Peers remaining: %d", peersRemoved, peers.size()));
+
+ if (peers.isEmpty())
+ return true;
+
+ if (peers.size() > 1) {
+ StringBuilder finalPeersString = new StringBuilder();
+ for (Peer peer : peers)
+ finalPeersString = finalPeersString.length() > 0 ? finalPeersString.append(", ").append(peer) : finalPeersString.append(peer);
+ LOGGER.debug(String.format("Choosing random peer from: [%s]", finalPeersString.toString()));
+ }
+
+ // Pick random peer to sync with
+ int index = new SecureRandom().nextInt(peers.size());
+ Peer peer = peers.get(index);
+
+ SynchronizationResult syncResult = actuallySynchronize(peer, false);
+ if (syncResult == SynchronizationResult.NO_BLOCKCHAIN_LOCK) {
+ // No blockchain lock - force a retry by returning false
+ return false;
+ }
+
+ return true;
+ }
+
+ public SynchronizationResult actuallySynchronize(Peer peer, boolean force) throws InterruptedException {
+ boolean hasStatusChanged = false;
+ BlockData priorChainTip = Controller.getInstance().getChainTip();
+
+ synchronized (this.syncLock) {
+ this.syncPercent = (priorChainTip.getHeight() * 100) / peer.getChainTipData().getLastHeight();
+
+ // Only update SysTray if we're potentially changing height
+ if (this.syncPercent < 100) {
+ this.isSynchronizing = true;
+ hasStatusChanged = true;
+ }
+ }
+ peer.setSyncInProgress(true);
+
+ if (hasStatusChanged)
+ Controller.getInstance().updateSysTray();
+
+ try {
+ SynchronizationResult syncResult = Synchronizer.getInstance().synchronize(peer, force);
+ switch (syncResult) {
+ case GENESIS_ONLY:
+ case NO_COMMON_BLOCK:
+ case TOO_DIVERGENT:
+ case INVALID_DATA: {
+ // These are more serious results that warrant a cool-off
+ LOGGER.info(String.format("Failed to synchronize with peer %s (%s) - cooling off", peer, syncResult.name()));
+
+ // Don't use this peer again for a while
+ Network.getInstance().peerMisbehaved(peer);
+ break;
+ }
+
+ case INFERIOR_CHAIN: {
+ // Update our list of inferior chain tips
+ ByteArray inferiorChainSignature = new ByteArray(peer.getChainTipData().getLastBlockSignature());
+ if (!inferiorChainSignatures.contains(inferiorChainSignature))
+ inferiorChainSignatures.add(inferiorChainSignature);
+
+ // These are minor failure results so fine to try again
+ LOGGER.debug(() -> String.format("Refused to synchronize with peer %s (%s)", peer, syncResult.name()));
+
+ // Notify peer of our superior chain
+ if (!peer.sendMessage(Network.getInstance().buildHeightMessage(peer, priorChainTip)))
+ peer.disconnect("failed to notify peer of our superior chain");
+ break;
+ }
+
+ case NO_REPLY:
+ case NO_BLOCKCHAIN_LOCK:
+ case REPOSITORY_ISSUE:
+ // These are minor failure results so fine to try again
+ LOGGER.debug(() -> String.format("Failed to synchronize with peer %s (%s)", peer, syncResult.name()));
+ break;
+
+ case SHUTTING_DOWN:
+ // Just quietly exit
+ break;
+
+ case OK:
+ // fall-through...
+ case NOTHING_TO_DO: {
+ // Update our list of inferior chain tips
+ ByteArray inferiorChainSignature = new ByteArray(peer.getChainTipData().getLastBlockSignature());
+ if (!inferiorChainSignatures.contains(inferiorChainSignature))
+ inferiorChainSignatures.add(inferiorChainSignature);
+
+ LOGGER.debug(() -> String.format("Synchronized with peer %s (%s)", peer, syncResult.name()));
+ break;
+ }
+ }
+
+ if (!running) {
+ // We've stopped
+ return SynchronizationResult.SHUTTING_DOWN;
+ }
+
+ // Has our chain tip changed?
+ BlockData newChainTip;
+
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ newChainTip = repository.getBlockRepository().getLastBlock();
+ } catch (DataException e) {
+ LOGGER.warn(String.format("Repository issue when trying to fetch post-synchronization chain tip: %s", e.getMessage()));
+ return syncResult;
+ }
+
+ if (!Arrays.equals(newChainTip.getSignature(), priorChainTip.getSignature())) {
+ // Reset our cache of inferior chains
+ inferiorChainSignatures.clear();
+
+ Network network = Network.getInstance();
+ network.broadcast(broadcastPeer -> network.buildHeightMessage(broadcastPeer, newChainTip));
+ }
+
+ return syncResult;
+ } finally {
+ this.isSynchronizing = false;
+ peer.setSyncInProgress(false);
+ }
+ }
+
+ private boolean checkRecoveryModeForPeers(List qualifiedPeers) {
+ List handshakedPeers = Network.getInstance().getHandshakedPeers();
+
+ if (handshakedPeers.size() > 0) {
+ // There is at least one handshaked peer
+ if (qualifiedPeers.isEmpty()) {
+ // There are no 'qualified' peers - i.e. peers that have a recent block we can sync to
+ boolean werePeersAvailable = peersAvailable;
+ peersAvailable = false;
+
+ // If peers only just became unavailable, update our record of the time they were last available
+ if (werePeersAvailable)
+ timePeersLastAvailable = NTP.getTime();
+
+ // If enough time has passed, enter recovery mode, which lifts some restrictions on who we can sync with and when we can mint
+ if (NTP.getTime() - timePeersLastAvailable > RECOVERY_MODE_TIMEOUT) {
+ if (recoveryMode == false) {
+ LOGGER.info(String.format("Peers have been unavailable for %d minutes. Entering recovery mode...", RECOVERY_MODE_TIMEOUT/60/1000));
+ recoveryMode = true;
+ }
+ }
+ } else {
+ // We now have at least one peer with a recent block, so we can exit recovery mode and sync normally
+ peersAvailable = true;
+ if (recoveryMode) {
+ LOGGER.info("Peers have become available again. Exiting recovery mode...");
+ recoveryMode = false;
+ }
+ }
+ }
+ return recoveryMode;
+ }
+
+ public void addInferiorChainSignature(byte[] inferiorSignature) {
+ // Update our list of inferior chain tips
+ ByteArray inferiorChainSignature = new ByteArray(inferiorSignature);
+ if (!inferiorChainSignatures.contains(inferiorChainSignature))
+ inferiorChainSignatures.add(inferiorChainSignature);
+ }
+
+
/**
* Iterate through a list of supplied peers, and attempt to find our common block with each.
* If a common block is found, its summary will be retained in the peer's commonBlockSummary property, for processing later.
@@ -259,6 +560,8 @@ public class Synchronizer {
// Create a placeholder to track of common blocks that we can discard due to being inferior chains
int dropPeersAfterCommonBlockHeight = 0;
+ NumberFormat accurateFormatter = new DecimalFormat("0.################E0");
+
// Remove peers with no common block data
Iterator iterator = peers.iterator();
while (iterator.hasNext()) {
@@ -279,7 +582,7 @@ public class Synchronizer {
// We have already determined that the correct chain diverged from a lower height. We are safe to skip these peers.
for (Peer peer : peersSharingCommonBlock) {
LOGGER.debug(String.format("Peer %s has common block at height %d but the superior chain is at height %d. Removing it from this round.", peer, commonBlockSummary.getHeight(), dropPeersAfterCommonBlockHeight));
- Controller.getInstance().addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature());
+ this.addInferiorChainSignature(peer.getChainTipData().getLastBlockSignature());
}
continue;
}
@@ -381,9 +684,7 @@ public class Synchronizer {
if (ourBlockSummaries.size() > 0)
ourChainWeight = Block.calcChainWeight(commonBlockSummary.getHeight(), commonBlockSummary.getSignature(), ourBlockSummaries, maxHeightForChainWeightComparisons);
- NumberFormat formatter = new DecimalFormat("0.###E0");
- NumberFormat accurateFormatter = new DecimalFormat("0.################E0");
- LOGGER.debug(String.format("Our chain weight based on %d blocks is %s", (usingSameLengthChainWeight ? minChainLength : ourBlockSummaries.size()), formatter.format(ourChainWeight)));
+ LOGGER.debug(String.format("Our chain weight based on %d blocks is %s", (usingSameLengthChainWeight ? minChainLength : ourBlockSummaries.size()), accurateFormatter.format(ourChainWeight)));
LOGGER.debug(String.format("Listing peers with common block %.8s...", Base58.encode(commonBlockSummary.getSignature())));
for (Peer peer : peersSharingCommonBlock) {
@@ -405,7 +706,7 @@ public class Synchronizer {
LOGGER.debug(String.format("About to calculate chain weight based on %d blocks for peer %s with common block %.8s (peer has %d blocks after common block)", (usingSameLengthChainWeight ? minChainLength : peerBlockSummariesAfterCommonBlock.size()), peer, Base58.encode(commonBlockSummary.getSignature()), peerAdditionalBlocksAfterCommonBlock));
BigInteger peerChainWeight = Block.calcChainWeight(commonBlockSummary.getHeight(), commonBlockSummary.getSignature(), peerBlockSummariesAfterCommonBlock, maxHeightForChainWeightComparisons);
peer.getCommonBlockData().setChainWeight(peerChainWeight);
- LOGGER.debug(String.format("Chain weight of peer %s based on %d blocks (%d - %d) is %s", peer, (usingSameLengthChainWeight ? minChainLength : peerBlockSummariesAfterCommonBlock.size()), peerBlockSummariesAfterCommonBlock.get(0).getHeight(), peerBlockSummariesAfterCommonBlock.get(peerBlockSummariesAfterCommonBlock.size()-1).getHeight(), formatter.format(peerChainWeight)));
+ LOGGER.debug(String.format("Chain weight of peer %s based on %d blocks (%d - %d) is %s", peer, (usingSameLengthChainWeight ? minChainLength : peerBlockSummariesAfterCommonBlock.size()), peerBlockSummariesAfterCommonBlock.get(0).getHeight(), peerBlockSummariesAfterCommonBlock.get(peerBlockSummariesAfterCommonBlock.size()-1).getHeight(), accurateFormatter.format(peerChainWeight)));
// Compare against our chain - if our blockchain has greater weight then don't synchronize with peer (or any others in this group)
if (ourChainWeight.compareTo(peerChainWeight) > 0) {
@@ -571,9 +872,11 @@ public class Synchronizer {
// Make sure we're the only thread modifying the blockchain
// If we're already synchronizing with another peer then this will also return fast
ReentrantLock blockchainLock = Controller.getInstance().getBlockchainLock();
- if (!blockchainLock.tryLock())
+ if (!blockchainLock.tryLock(3, TimeUnit.SECONDS)) {
// Wasn't peer's fault we couldn't sync
+ LOGGER.info("Synchronizer couldn't acquire blockchain lock");
return SynchronizationResult.NO_BLOCKCHAIN_LOCK;
+ }
try {
try (final Repository repository = RepositoryManager.getRepository()) {
@@ -793,7 +1096,7 @@ public class Synchronizer {
return SynchronizationResult.REPOSITORY_ISSUE;
if (ourLatestBlockData.getTimestamp() < minLatestBlockTimestamp) {
- LOGGER.info(String.format("Ditching our chain after height %d as our latest block is very old", commonBlockHeight));
+ LOGGER.info(String.format("Ditching our chain after height %d", commonBlockHeight));
} else {
// Compare chain weights
@@ -853,8 +1156,9 @@ public class Synchronizer {
BigInteger ourChainWeight = Block.calcChainWeight(commonBlockHeight, commonBlockSig, ourBlockSummaries, mutualHeight);
BigInteger peerChainWeight = Block.calcChainWeight(commonBlockHeight, commonBlockSig, peerBlockSummaries, mutualHeight);
- NumberFormat formatter = new DecimalFormat("0.###E0");
- LOGGER.debug(String.format("Our chain weight: %s, peer's chain weight: %s (higher is better)", formatter.format(ourChainWeight), formatter.format(peerChainWeight)));
+ NumberFormat accurateFormatter = new DecimalFormat("0.################E0");
+ LOGGER.debug(String.format("commonBlockHeight: %d, commonBlockSig: %.8s, ourBlockSummaries.size(): %d, peerBlockSummaries.size(): %d", commonBlockHeight, Base58.encode(commonBlockSig), ourBlockSummaries.size(), peerBlockSummaries.size()));
+ LOGGER.debug(String.format("Our chain weight: %s, peer's chain weight: %s (higher is better)", accurateFormatter.format(ourChainWeight), accurateFormatter.format(peerChainWeight)));
// If our blockchain has greater weight then don't synchronize with peer
if (ourChainWeight.compareTo(peerChainWeight) >= 0) {
@@ -1222,7 +1526,7 @@ public class Synchronizer {
return new Block(repository, blockMessage.getBlockData(), blockMessage.getTransactions(), blockMessage.getAtStates());
}
- private void populateBlockSummariesMinterLevels(Repository repository, List blockSummaries) throws DataException {
+ public void populateBlockSummariesMinterLevels(Repository repository, List blockSummaries) throws DataException {
final int firstBlockHeight = blockSummaries.get(0).getHeight();
for (int i = 0; i < blockSummaries.size(); ++i) {
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuildManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuildManager.java
index 3df82d66..ebff6913 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuildManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuildManager.java
@@ -37,11 +37,16 @@ public class ArbitraryDataBuildManager extends Thread {
@Override
public void run() {
+ Thread.currentThread().setName("Arbitrary Data Build Manager");
+
try {
// Use a fixed thread pool to execute the arbitrary data build actions (currently just a single thread)
// This can be expanded to have multiple threads processing the build queue when needed
- ExecutorService arbitraryDataBuildExecutor = Executors.newFixedThreadPool(1);
- arbitraryDataBuildExecutor.execute(new ArbitraryDataBuilderThread());
+ int threadCount = 5;
+ ExecutorService arbitraryDataBuildExecutor = Executors.newFixedThreadPool(threadCount);
+ for (int i = 0; i < threadCount; i++) {
+ arbitraryDataBuildExecutor.execute(new ArbitraryDataBuilderThread());
+ }
while (!isStopping) {
// Nothing to do yet
@@ -101,7 +106,7 @@ public class ArbitraryDataBuildManager extends Thread {
return true;
}
- LOGGER.info("Added {} to build queue", queueItem);
+ log(queueItem, String.format("Added %s to build queue", queueItem));
// Added to queue
return true;
@@ -149,7 +154,7 @@ public class ArbitraryDataBuildManager extends Thread {
return true;
}
- LOGGER.info("Added {} to failed builds list", queueItem);
+ log(queueItem, String.format("Added %s to failed builds list", queueItem));
// Added to queue
return true;
@@ -182,4 +187,17 @@ public class ArbitraryDataBuildManager extends Thread {
public boolean getBuildInProgress() {
return this.buildInProgress;
}
+
+ private void log(ArbitraryDataBuildQueueItem queueItem, String message) {
+ if (queueItem == null) {
+ return;
+ }
+
+ if (queueItem.isHighPriority()) {
+ LOGGER.info(message);
+ }
+ else {
+ LOGGER.debug(message);
+ }
+ }
}
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java
index da7c7293..0fb685a3 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataBuilderThread.java
@@ -9,6 +9,7 @@ import org.qortal.repository.DataException;
import org.qortal.utils.NTP;
import java.io.IOException;
+import java.util.Comparator;
import java.util.Map;
@@ -20,13 +21,14 @@ public class ArbitraryDataBuilderThread implements Runnable {
}
+ @Override
public void run() {
- Thread.currentThread().setName("Arbitrary Data Build Manager");
+ Thread.currentThread().setName("Arbitrary Data Builder Thread");
ArbitraryDataBuildManager buildManager = ArbitraryDataBuildManager.getInstance();
while (!Controller.isStopping()) {
try {
- Thread.sleep(1000);
+ Thread.sleep(100);
if (buildManager.arbitraryDataBuildQueue == null) {
continue;
@@ -35,48 +37,57 @@ public class ArbitraryDataBuilderThread implements Runnable {
continue;
}
- // Find resources that are queued for building
- Map.Entry next = buildManager.arbitraryDataBuildQueue
- .entrySet().stream()
- .filter(e -> e.getValue().isQueued())
- .findFirst().get();
-
- if (next == null) {
- continue;
- }
-
Long now = NTP.getTime();
if (now == null) {
continue;
}
- ArbitraryDataBuildQueueItem queueItem = next.getValue();
+ ArbitraryDataBuildQueueItem queueItem = null;
- if (queueItem == null) {
- this.removeFromQueue(queueItem);
+ // Find resources that are queued for building (sorted by highest priority first)
+ synchronized (buildManager.arbitraryDataBuildQueue) {
+ Map.Entry next = buildManager.arbitraryDataBuildQueue
+ .entrySet().stream()
+ .filter(e -> e.getValue().isQueued())
+ .sorted(Comparator.comparing(item -> item.getValue().getPriority()))
+ .reduce((first, second) -> second).orElse(null);
+
+ if (next == null) {
+ continue;
+ }
+
+ queueItem = next.getValue();
+
+ if (queueItem == null) {
+ this.removeFromQueue(queueItem);
+ continue;
+ }
+
+ // Ignore builds that have failed recently
+ if (buildManager.isInFailedBuildsList(queueItem)) {
+ this.removeFromQueue(queueItem);
+ continue;
+ }
+
+ // Set the start timestamp, to prevent other threads from building it at the same time
+ queueItem.prepareForBuild();
}
- // Ignore builds that have failed recently
- if (buildManager.isInFailedBuildsList(queueItem)) {
- continue;
- }
-
-
try {
// Perform the build
- LOGGER.info("Building {}...", queueItem);
+ log(queueItem, String.format("Building %s... priority: %d", queueItem, queueItem.getPriority()));
queueItem.build();
this.removeFromQueue(queueItem);
- LOGGER.info("Finished building {}", queueItem);
+ log(queueItem, String.format("Finished building %s", queueItem));
} catch (MissingDataException e) {
- LOGGER.info("Missing data for {}: {}", queueItem, e.getMessage());
+ log(queueItem, String.format("Missing data for %s: %s", queueItem, e.getMessage()));
queueItem.setFailed(true);
this.removeFromQueue(queueItem);
// Don't add to the failed builds list, as we may want to retry sooner
} catch (IOException | DataException | RuntimeException e) {
- LOGGER.info("Error building {}: {}", queueItem, e.getMessage());
+ log(queueItem, String.format("Error building %s: %s", queueItem, e.getMessage()));
// Something went wrong - so remove it from the queue, and add to failed builds list
queueItem.setFailed(true);
buildManager.addToFailedBuildsList(queueItem);
@@ -95,4 +106,17 @@ public class ArbitraryDataBuilderThread implements Runnable {
}
ArbitraryDataBuildManager.getInstance().arbitraryDataBuildQueue.remove(queueItem.getUniqueKey());
}
+
+ private void log(ArbitraryDataBuildQueueItem queueItem, String message) {
+ if (queueItem == null) {
+ return;
+ }
+
+ if (queueItem.isHighPriority()) {
+ LOGGER.info(message);
+ }
+ else {
+ LOGGER.debug(message);
+ }
+ }
}
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java
index 8c263568..64916df5 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataCleanupManager.java
@@ -108,6 +108,10 @@ public class ArbitraryDataCleanupManager extends Thread {
try (final Repository repository = RepositoryManager.getRepository()) {
List signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true);
// LOGGER.info("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
+ if (isStopping) {
+ return;
+ }
+
if (signatures == null || signatures.isEmpty()) {
offset = 0;
continue;
@@ -117,6 +121,10 @@ public class ArbitraryDataCleanupManager extends Thread {
// Loop through the signatures in this batch
for (int i=0; i expiredPaths = this.findPathsWithNoAssociatedTransaction(repository);
for (Path expiredPath : expiredPaths) {
+ if (isStopping) {
+ return;
+ }
LOGGER.info("Found path with no associated transaction: {}", expiredPath.toString());
this.safeDeleteDirectory(expiredPath.toFile(), "no matching transaction");
}
@@ -300,6 +317,9 @@ public class ArbitraryDataCleanupManager extends Thread {
// when they reach their storage limit
Path dataPath = Paths.get(Settings.getInstance().getDataPath());
for (int i=0; i handshakedPeers = Network.getInstance().getHandshakedPeers();
- LOGGER.debug(String.format("Sending data file list request for signature %s to %d peers...", signature58, handshakedPeers.size()));
+ List missingHashes = null;
+
+ // Find hashes that we are missing
+ try {
+ ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
+ arbitraryDataFile.setMetadataHash(metadataHash);
+ missingHashes = arbitraryDataFile.missingHashes();
+ } catch (DataException e) {
+ // Leave missingHashes as null, so that all hashes are requested
+ }
+ int hashCount = missingHashes != null ? missingHashes.size() : 0;
+
+ LOGGER.debug(String.format("Sending data file list request for signature %s with %d hashes to %d peers...", signature58, hashCount, handshakedPeers.size()));
// Build request
- Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, now, 0);
+ Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, missingHashes, now, 0);
// Save our request into requests map
Triple requestEntry = new Triple<>(signature58, null, NTP.getTime());
@@ -304,6 +319,64 @@ public class ArbitraryDataFileListManager {
return true;
}
+ public boolean fetchArbitraryDataFileList(Peer peer, byte[] signature) {
+ String signature58 = Base58.encode(signature);
+
+ // Require an NTP sync
+ Long now = NTP.getTime();
+ if (now == null) {
+ return false;
+ }
+
+ int hashCount = 0;
+ LOGGER.debug(String.format("Sending data file list request for signature %s with %d hashes to peer %s...", signature58, hashCount, peer));
+
+ // Build request
+ // Use a time in the past, so that the recipient peer doesn't try and relay it
+ // Also, set hashes to null since it's easier to request all hashes than it is to determine which ones we need
+ // This could be optimized in the future
+ long timestamp = now - 60000L;
+ List hashes = null;
+ Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, hashes, timestamp, 0);
+
+ // Save our request into requests map
+ Triple requestEntry = new Triple<>(signature58, null, NTP.getTime());
+
+ // Assign random ID to this message
+ int id;
+ do {
+ id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1;
+
+ // Put queue into map (keyed by message ID) so we can poll for a response
+ // If putIfAbsent() doesn't return null, then this ID is already taken
+ } while (arbitraryDataFileListRequests.put(id, requestEntry) != null);
+ getArbitraryDataFileListMessage.setId(id);
+
+ // Send the request
+ peer.sendMessage(getArbitraryDataFileListMessage);
+
+ // Poll to see if data has arrived
+ final long singleWait = 100;
+ long totalWait = 0;
+ while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) {
+ try {
+ Thread.sleep(singleWait);
+ } catch (InterruptedException e) {
+ break;
+ }
+
+ requestEntry = arbitraryDataFileListRequests.get(id);
+ if (requestEntry == null)
+ return false;
+
+ if (requestEntry.getA() == null)
+ break;
+
+ totalWait += singleWait;
+ }
+ return true;
+ }
+
public void deleteFileListRequestsForSignature(byte[] signature) {
String signature58 = Base58.encode(signature);
for (Iterator>> it = arbitraryDataFileListRequests.entrySet().iterator(); it.hasNext();) {
@@ -377,6 +450,14 @@ public class ArbitraryDataFileListManager {
// }
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
+ // Keep track of the hashes this peer reports to have access to
+ Long now = NTP.getTime();
+ for (byte[] hash : hashes) {
+ String hash58 = Base58.encode(hash);
+ String sig58 = Base58.encode(signature);
+ ArbitraryDataFileManager.getInstance().arbitraryDataFileHashResponses.put(hash58, new Triple<>(peer, sig58, now));
+ }
+
// Go and fetch the actual data, since this isn't a relay request
arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, hashes);
}
@@ -395,10 +476,8 @@ public class ArbitraryDataFileListManager {
Long now = NTP.getTime();
for (byte[] hash : hashes) {
String hash58 = Base58.encode(hash);
- Triple value = new Triple<>(signature58, peer, now);
- if (arbitraryDataFileManager.arbitraryRelayMap.putIfAbsent(hash58, value) == null) {
- LOGGER.debug("Added {} to relay map: {}, {}, {}", hash58, signature58, peer, now);
- }
+ ArbitraryRelayInfo relayMap = new ArbitraryRelayInfo(hash58, signature58, peer, now);
+ ArbitraryDataFileManager.getInstance().addToRelayMap(relayMap);
}
// Forward to requesting peer
@@ -422,6 +501,7 @@ public class ArbitraryDataFileListManager {
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
byte[] signature = getArbitraryDataFileListMessage.getSignature();
String signature58 = Base58.encode(signature);
+ List requestedHashes = getArbitraryDataFileListMessage.getHashes();
Long now = NTP.getTime();
Triple newEntry = new Triple<>(signature58, peer, now);
@@ -451,36 +531,37 @@ public class ArbitraryDataFileListManager {
// Load file(s) and add any that exist to the list of hashes
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
- if (metadataHash != null) {
- arbitraryDataFile.setMetadataHash(metadataHash);
+ arbitraryDataFile.setMetadataHash(metadataHash);
- // Assume all chunks exists, unless one can't be found below
- allChunksExist = true;
+ // If the peer didn't supply a hash list, we need to return all hashes for this transaction
+ if (requestedHashes == null || requestedHashes.isEmpty()) {
+ requestedHashes = new ArrayList<>();
- // If we have the metadata file, add its hash
- if (arbitraryDataFile.getMetadataFile().exists()) {
- hashes.add(arbitraryDataFile.getMetadataHash());
+ // Add the metadata file
+ if (arbitraryDataFile.getMetadataHash() != null) {
+ requestedHashes.add(arbitraryDataFile.getMetadataHash());
}
+
+ // Add the chunk hashes
+ if (arbitraryDataFile.getChunkHashes().size() > 0) {
+ requestedHashes.addAll(arbitraryDataFile.getChunkHashes());
+ }
+ // Add complete file if there are no hashes
else {
- allChunksExist = false;
+ requestedHashes.add(arbitraryDataFile.getHash());
}
+ }
- for (ArbitraryDataFileChunk chunk : arbitraryDataFile.getChunks()) {
- if (chunk.exists()) {
- hashes.add(chunk.getHash());
- //LOGGER.trace("Added hash {}", chunk.getHash58());
- } else {
- LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
- allChunksExist = false;
- }
- }
- } else {
- // This transaction has no chunks, so include the complete file if we have it
- if (arbitraryDataFile.exists()) {
- hashes.add(arbitraryDataFile.getHash());
- allChunksExist = true;
- }
- else {
+ // Assume all chunks exists, unless one can't be found below
+ allChunksExist = true;
+
+ for (byte[] requestedHash : requestedHashes) {
+ ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(requestedHash, signature);
+ if (chunk.exists()) {
+ hashes.add(chunk.getHash());
+ //LOGGER.trace("Added hash {}", chunk.getHash58());
+ } else {
+ LOGGER.trace("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
allChunksExist = false;
}
}
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileManager.java
index 8eeda508..6d352b79 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileManager.java
@@ -4,10 +4,10 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.controller.Controller;
+import org.qortal.data.arbitrary.ArbitraryRelayInfo;
import org.qortal.data.network.ArbitraryPeerData;
import org.qortal.data.network.PeerData;
import org.qortal.data.transaction.ArbitraryTransactionData;
-import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.*;
@@ -22,13 +22,16 @@ import org.qortal.utils.Triple;
import java.security.SecureRandom;
import java.util.*;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import java.util.stream.Collectors;
-public class ArbitraryDataFileManager {
+public class ArbitraryDataFileManager extends Thread {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileManager.class);
private static ArbitraryDataFileManager instance;
+ private volatile boolean isStopping = false;
/**
@@ -37,10 +40,16 @@ public class ArbitraryDataFileManager {
private Map arbitraryDataFileRequests = Collections.synchronizedMap(new HashMap<>());
/**
- * Map to keep track of hashes that we might need to relay, keyed by the hash of the file (base58 encoded).
- * Value is comprised of the base58-encoded signature, the peer that is hosting it, and the timestamp that it was added
+ * Map to keep track of hashes that we might need to relay
*/
- public Map> arbitraryRelayMap = Collections.synchronizedMap(new HashMap<>());
+ public List arbitraryRelayMap = Collections.synchronizedList(new ArrayList<>());
+
+ /**
+ * Map to keep track of any arbitrary data file hash responses
+ * Key: string - the hash encoded in base58
+ * Value: Triple
+ */
+ public Map> arbitraryDataFileHashResponses = Collections.synchronizedMap(new HashMap<>());
private ArbitraryDataFileManager() {
@@ -53,6 +62,32 @@ public class ArbitraryDataFileManager {
return instance;
}
+ @Override
+ public void run() {
+ Thread.currentThread().setName("Arbitrary Data File Manager");
+
+ try {
+ // Use a fixed thread pool to execute the arbitrary data file requests
+ int threadCount = 10;
+ ExecutorService arbitraryDataFileRequestExecutor = Executors.newFixedThreadPool(threadCount);
+ for (int i = 0; i < threadCount; i++) {
+ arbitraryDataFileRequestExecutor.execute(new ArbitraryDataFileRequestThread());
+ }
+
+ while (!isStopping) {
+ // Nothing to do yet
+ Thread.sleep(1000);
+ }
+ } catch (InterruptedException e) {
+ // Fall-through to exit thread...
+ }
+ }
+
+ public void shutdown() {
+ isStopping = true;
+ this.interrupt();
+ }
+
public void cleanupRequestCache(Long now) {
if (now == null) {
@@ -62,29 +97,14 @@ public class ArbitraryDataFileManager {
arbitraryDataFileRequests.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < requestMinimumTimestamp);
final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT;
- arbitraryRelayMap.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < relayMinimumTimestamp);
+ arbitraryRelayMap.removeIf(entry -> entry == null || entry.getTimestamp() == null || entry.getTimestamp() < relayMinimumTimestamp);
+ arbitraryDataFileHashResponses.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < relayMinimumTimestamp);
}
// Fetch data files by hash
- public boolean fetchAllArbitraryDataFiles(Repository repository, Peer peer, byte[] signature) {
- try {
- TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
- if (!(transactionData instanceof ArbitraryTransactionData))
- return false;
-
- ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
-
- // We use null to represent all hashes associated with this transaction
- return this.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, null);
-
- } catch (DataException e) {}
-
- return false;
- }
-
public boolean fetchArbitraryDataFiles(Repository repository,
Peer peer,
byte[] signature,
@@ -95,43 +115,46 @@ public class ArbitraryDataFileManager {
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
arbitraryDataFile.setMetadataHash(metadataHash);
-
- // If hashes are null, we will treat this to mean all data hashes associated with this file
- if (hashes == null) {
- if (metadataHash == null) {
- // This transaction has no metadata/chunks, so use the main file hash
- hashes = Arrays.asList(arbitraryDataFile.getHash());
- }
- else if (!arbitraryDataFile.getMetadataFile().exists()) {
- // We don't have the metadata file yet, so request it
- hashes = Arrays.asList(arbitraryDataFile.getMetadataFile().getHash());
- }
- else {
- // Add the chunk hashes
- hashes = arbitraryDataFile.getChunkHashes();
- }
- }
-
boolean receivedAtLeastOneFile = false;
// Now fetch actual data from this peer
for (byte[] hash : hashes) {
+ if (isStopping) {
+ return false;
+ }
+ String hash58 = Base58.encode(hash);
if (!arbitraryDataFile.chunkExists(hash)) {
// Only request the file if we aren't already requesting it from someone else
if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
+ LOGGER.debug("Requesting data file {} from peer {}", hash58, peer);
+ Long startTime = NTP.getTime();
ArbitraryDataFileMessage receivedArbitraryDataFileMessage = fetchArbitraryDataFile(peer, null, signature, hash, null);
+ Long endTime = NTP.getTime();
if (receivedArbitraryDataFileMessage != null) {
- LOGGER.debug("Received data file {} from peer {}", receivedArbitraryDataFileMessage.getArbitraryDataFile().getHash58(), peer);
+ LOGGER.debug("Received data file {} from peer {}. Time taken: {} ms", receivedArbitraryDataFileMessage.getArbitraryDataFile().getHash58(), peer, (endTime-startTime));
receivedAtLeastOneFile = true;
+
+ // Remove this hash from arbitraryDataFileHashResponses now that we have received it
+ arbitraryDataFileHashResponses.remove(hash58);
}
else {
- LOGGER.debug("Peer {} didn't respond with data file {} for signature {}", peer, Base58.encode(hash), Base58.encode(signature));
+ LOGGER.debug("Peer {} didn't respond with data file {} for signature {}. Time taken: {} ms", peer, Base58.encode(hash), Base58.encode(signature), (endTime-startTime));
+
+ // Remove this hash from arbitraryDataFileHashResponses now that we have failed to receive it
+ arbitraryDataFileHashResponses.remove(hash58);
+
+ // Stop asking for files from this peer
+ break;
}
}
else {
LOGGER.trace("Already requesting data file {} for signature {}", arbitraryDataFile, Base58.encode(signature));
}
}
+ else {
+ // Remove this hash from arbitraryDataFileHashResponses because we have a local copy
+ arbitraryDataFileHashResponses.remove(hash58);
+ }
}
if (receivedAtLeastOneFile) {
@@ -147,22 +170,23 @@ public class ArbitraryDataFileManager {
// Invalidate the hosted transactions cache as we are now hosting something new
ArbitraryDataStorageManager.getInstance().invalidateHostedTransactionsCache();
- }
- // Check if we have all the files we need for this transaction
- if (arbitraryDataFile.allFilesExist()) {
+ // Check if we have all the files we need for this transaction
+ if (arbitraryDataFile.allFilesExist()) {
- // We have all the chunks for this transaction, so we should invalidate the transaction's name's
- // data cache so that it is rebuilt the next time we serve it
- ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData);
+ // We have all the chunks for this transaction, so we should invalidate the transaction's name's
+ // data cache so that it is rebuilt the next time we serve it
+ ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData);
- // We may also need to broadcast to the network that we are now hosting files for this transaction,
- // but only if these files are in accordance with our storage policy
- if (ArbitraryDataStorageManager.getInstance().canStoreData(arbitraryTransactionData)) {
- // Use a null peer address to indicate our own
- Message newArbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, 0, Arrays.asList(signature));
- Network.getInstance().broadcast(broadcastPeer -> newArbitrarySignatureMessage);
+ // We may also need to broadcast to the network that we are now hosting files for this transaction,
+ // but only if these files are in accordance with our storage policy
+ if (ArbitraryDataStorageManager.getInstance().canStoreData(arbitraryTransactionData)) {
+ // Use a null peer address to indicate our own
+ Message newArbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, 0, Arrays.asList(signature));
+ Network.getInstance().broadcast(broadcastPeer -> newArbitrarySignatureMessage);
+ }
}
+
}
return receivedAtLeastOneFile;
@@ -171,11 +195,11 @@ public class ArbitraryDataFileManager {
private ArbitraryDataFileMessage fetchArbitraryDataFile(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
boolean fileAlreadyExists = existingFile.exists();
+ String hash58 = Base58.encode(hash);
Message message = null;
// Fetch the file if it doesn't exist locally
if (!fileAlreadyExists) {
- String hash58 = Base58.encode(hash);
LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
arbitraryDataFileRequests.put(hash58, NTP.getTime());
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
@@ -191,9 +215,17 @@ public class ArbitraryDataFileManager {
// We may need to remove the file list request, if we have all the files for this transaction
this.handleFileListRequests(signature);
- if (message == null || message.getType() != Message.MessageType.ARBITRARY_DATA_FILE) {
+ if (message == null) {
+ LOGGER.debug("Received null message from peer {}", peer);
return null;
}
+ if (message.getType() != Message.MessageType.ARBITRARY_DATA_FILE) {
+ LOGGER.debug("Received message with invalid type: {} from peer {}", message.getType(), peer);
+ return null;
+ }
+ }
+ else {
+ LOGGER.debug(String.format("File hash %s already exists, so skipping the request", hash58));
}
ArbitraryDataFileMessage arbitraryDataFileMessage = (ArbitraryDataFileMessage) message;
@@ -208,16 +240,7 @@ public class ArbitraryDataFileManager {
ArbitraryDataFile dataFile = arbitraryDataFileMessage.getArbitraryDataFile();
// Keep trying to delete the data until it is deleted, or we reach 10 attempts
- for (int i=0; i<10; i++) {
- if (dataFile.delete()) {
- break;
- }
- try {
- Thread.sleep(1000L);
- } catch (InterruptedException e) {
- // Fall through to exit method
- }
- }
+ dataFile.delete(10);
}
}
@@ -359,6 +382,48 @@ public class ArbitraryDataFileManager {
}
+ // Relays
+
+ private List getRelayInfoListForHash(String hash58) {
+ synchronized (arbitraryRelayMap) {
+ return arbitraryRelayMap.stream()
+ .filter(relayInfo -> Objects.equals(relayInfo.getHash58(), hash58))
+ .collect(Collectors.toList());
+ }
+ }
+
+ private ArbitraryRelayInfo getRandomRelayInfoEntryForHash(String hash58) {
+ LOGGER.trace("Fetching random relay info for hash: {}", hash58);
+ List relayInfoList = this.getRelayInfoListForHash(hash58);
+ if (relayInfoList != null && !relayInfoList.isEmpty()) {
+
+ // Pick random item
+ int index = new SecureRandom().nextInt(relayInfoList.size());
+ LOGGER.trace("Returning random relay info for hash: {} (index {})", hash58, index);
+ return relayInfoList.get(index);
+ }
+ LOGGER.trace("No relay info exists for hash: {}", hash58);
+ return null;
+ }
+
+ public void addToRelayMap(ArbitraryRelayInfo newEntry) {
+ if (newEntry == null || !newEntry.isValid()) {
+ return;
+ }
+
+ // Remove existing entry for this peer if it exists, to renew the timestamp
+ this.removeFromRelayMap(newEntry);
+
+ // Re-add
+ arbitraryRelayMap.add(newEntry);
+ LOGGER.debug("Added entry to relay map: {}", newEntry);
+ }
+
+ private void removeFromRelayMap(ArbitraryRelayInfo entry) {
+ arbitraryRelayMap.removeIf(relayInfo -> relayInfo.equals(entry));
+ }
+
+
// Network handlers
public void onNetworkGetArbitraryDataFileMessage(Peer peer, Message message) {
@@ -377,7 +442,7 @@ public class ArbitraryDataFileManager {
try {
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
- Triple relayInfo = this.arbitraryRelayMap.get(hash58);
+ ArbitraryRelayInfo relayInfo = this.getRandomRelayInfoEntryForHash(hash58);
if (arbitraryDataFile.exists()) {
LOGGER.trace("Hash {} exists", hash58);
@@ -394,15 +459,12 @@ public class ArbitraryDataFileManager {
else if (relayInfo != null) {
LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
// We need to ask this peer for the file
- Peer peerToAsk = relayInfo.getB();
+ Peer peerToAsk = relayInfo.getPeer();
if (peerToAsk != null) {
// Forward the message to this peer
LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58);
this.fetchArbitraryDataFile(peerToAsk, peer, signature, hash, message);
-
- // Remove from the map regardless of outcome, as the relay attempt is now considered complete
- arbitraryRelayMap.remove(hash58);
}
else {
LOGGER.debug("Peer {} not found in relay info", peer);
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileRequestThread.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileRequestThread.java
new file mode 100644
index 00000000..0c2834d0
--- /dev/null
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataFileRequestThread.java
@@ -0,0 +1,117 @@
+package org.qortal.controller.arbitrary;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.qortal.controller.Controller;
+import org.qortal.data.transaction.ArbitraryTransactionData;
+import org.qortal.network.Peer;
+import org.qortal.repository.DataException;
+import org.qortal.repository.Repository;
+import org.qortal.repository.RepositoryManager;
+import org.qortal.utils.ArbitraryTransactionUtils;
+import org.qortal.utils.Base58;
+import org.qortal.utils.NTP;
+import org.qortal.utils.Triple;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+
+public class ArbitraryDataFileRequestThread implements Runnable {
+
+ private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileRequestThread.class);
+
+ public ArbitraryDataFileRequestThread() {
+
+ }
+
+ @Override
+ public void run() {
+ Thread.currentThread().setName("Arbitrary Data File Request Thread");
+
+ try {
+ while (!Controller.isStopping()) {
+ Long now = NTP.getTime();
+ this.processFileHashes(now);
+ }
+ } catch (InterruptedException e) {
+ // Fall-through to exit thread...
+ }
+ }
+
+ private void processFileHashes(Long now) throws InterruptedException {
+ if (Controller.isStopping()) {
+ return;
+ }
+
+ ArbitraryDataFileManager arbitraryDataFileManager = ArbitraryDataFileManager.getInstance();
+ String signature58 = null;
+ String hash58 = null;
+ Peer peer = null;
+ boolean shouldProcess = false;
+
+ synchronized (arbitraryDataFileManager.arbitraryDataFileHashResponses) {
+ Iterator iterator = arbitraryDataFileManager.arbitraryDataFileHashResponses.entrySet().iterator();
+ while (iterator.hasNext()) {
+ if (Controller.isStopping()) {
+ return;
+ }
+
+ Map.Entry entry = (Map.Entry) iterator.next();
+ if (entry == null || entry.getKey() == null || entry.getValue() == null) {
+ iterator.remove();
+ continue;
+ }
+
+ hash58 = (String) entry.getKey();
+ Triple value = (Triple) entry.getValue();
+ if (value == null) {
+ iterator.remove();
+ continue;
+ }
+
+ peer = value.getA();
+ signature58 = value.getB();
+ Long timestamp = value.getC();
+
+ if (now - timestamp >= ArbitraryDataManager.ARBITRARY_RELAY_TIMEOUT || signature58 == null || peer == null) {
+ // Ignore - to be deleted
+ iterator.remove();
+ continue;
+ }
+
+ // We want to process this file
+ shouldProcess = true;
+ iterator.remove();
+ break;
+ }
+ }
+
+ if (!shouldProcess) {
+ // Nothing to do
+ Thread.sleep(1000L);
+ return;
+ }
+
+ byte[] hash = Base58.decode(hash58);
+ byte[] signature = Base58.decode(signature58);
+
+ // Fetch the transaction data
+ try (final Repository repository = RepositoryManager.getRepository()) {
+ ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
+ if (arbitraryTransactionData == null) {
+ return;
+ }
+
+ if (signature == null || hash == null || peer == null || arbitraryTransactionData == null) {
+ return;
+ }
+
+ LOGGER.debug("Fetching file {} from peer {} via request thread...", hash58, peer);
+ arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, Arrays.asList(hash));
+
+ } catch (DataException e) {
+ LOGGER.debug("Unable to process file hashes: {}", e.getMessage());
+ }
+ }
+}
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataManager.java
index 20b4885a..acde16eb 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataManager.java
@@ -38,10 +38,10 @@ public class ArbitraryDataManager extends Thread {
private int powDifficulty = 14; // Must not be final, as unit tests need to reduce this value
/** Request timeout when transferring arbitrary data */
- public static final long ARBITRARY_REQUEST_TIMEOUT = 10 * 1000L; // ms
+ public static final long ARBITRARY_REQUEST_TIMEOUT = 12 * 1000L; // ms
/** Maximum time to hold information about an in-progress relay */
- public static final long ARBITRARY_RELAY_TIMEOUT = 30 * 1000L; // ms
+ public static final long ARBITRARY_RELAY_TIMEOUT = 60 * 1000L; // ms
/** Maximum number of hops that an arbitrary signatures request is allowed to make */
private static int ARBITRARY_SIGNATURES_REQUEST_MAX_HOPS = 3;
@@ -80,6 +80,9 @@ public class ArbitraryDataManager extends Thread {
Thread.currentThread().setName("Arbitrary Data Manager");
try {
+ // Wait for node to finish starting up and making connections
+ Thread.sleep(2 * 60 * 1000L);
+
while (!isStopping) {
Thread.sleep(2000);
diff --git a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java
index 483ab92f..2844cef8 100644
--- a/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java
+++ b/src/main/java/org/qortal/controller/arbitrary/ArbitraryDataRenderManager.java
@@ -32,7 +32,7 @@ public class ArbitraryDataRenderManager extends Thread {
@Override
public void run() {
- Thread.currentThread().setName("Arbitrary Data Manager");
+ Thread.currentThread().setName("Arbitrary Data Render Manager");
try {
while (!isStopping) {
diff --git a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
index 3b92db51..54fba699 100644
--- a/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
+++ b/src/main/java/org/qortal/controller/repository/AtStatesPruner.java
@@ -3,6 +3,7 @@ package org.qortal.controller.repository;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
+import org.qortal.controller.Synchronizer;
import org.qortal.data.block.BlockData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
@@ -47,7 +48,7 @@ public class AtStatesPruner implements Runnable {
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Controller.getInstance().isSynchronizing())
+ if (Synchronizer.getInstance().isSynchronizing())
continue;
// Prune AT states for all blocks up until our latest minus pruneBlockLimit
diff --git a/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
index 98a1a889..d3bdc345 100644
--- a/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
+++ b/src/main/java/org/qortal/controller/repository/AtStatesTrimmer.java
@@ -3,6 +3,7 @@ package org.qortal.controller.repository;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
+import org.qortal.controller.Synchronizer;
import org.qortal.data.block.BlockData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
@@ -34,7 +35,7 @@ public class AtStatesTrimmer implements Runnable {
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Controller.getInstance().isSynchronizing())
+ if (Synchronizer.getInstance().isSynchronizing())
continue;
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
diff --git a/src/main/java/org/qortal/controller/repository/BlockArchiver.java b/src/main/java/org/qortal/controller/repository/BlockArchiver.java
index a329e912..ef26610c 100644
--- a/src/main/java/org/qortal/controller/repository/BlockArchiver.java
+++ b/src/main/java/org/qortal/controller/repository/BlockArchiver.java
@@ -3,6 +3,7 @@ package org.qortal.controller.repository;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
+import org.qortal.controller.Synchronizer;
import org.qortal.data.block.BlockData;
import org.qortal.repository.*;
import org.qortal.settings.Settings;
@@ -51,7 +52,7 @@ public class BlockArchiver implements Runnable {
}
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Controller.getInstance().isSynchronizing()) {
+ if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
diff --git a/src/main/java/org/qortal/controller/repository/BlockPruner.java b/src/main/java/org/qortal/controller/repository/BlockPruner.java
index 1258ee38..03fb38b9 100644
--- a/src/main/java/org/qortal/controller/repository/BlockPruner.java
+++ b/src/main/java/org/qortal/controller/repository/BlockPruner.java
@@ -3,6 +3,7 @@ package org.qortal.controller.repository;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.controller.Controller;
+import org.qortal.controller.Synchronizer;
import org.qortal.data.block.BlockData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
@@ -51,7 +52,7 @@ public class BlockPruner implements Runnable {
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Controller.getInstance().isSynchronizing()) {
+ if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
diff --git a/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java b/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java
index 0b941c0c..5e54b905 100644
--- a/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java
+++ b/src/main/java/org/qortal/controller/repository/NamesDatabaseIntegrityCheck.java
@@ -268,42 +268,6 @@ public class NamesDatabaseIntegrityCheck {
return registerNameTransactions;
}
- private List fetchUpdateNameTransactions() {
- List updateNameTransactions = new ArrayList<>();
-
- for (TransactionData transactionData : this.nameTransactions) {
- if (transactionData.getType() == TransactionType.UPDATE_NAME) {
- UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData;
- updateNameTransactions.add(updateNameTransactionData);
- }
- }
- return updateNameTransactions;
- }
-
- private List fetchSellNameTransactions() {
- List sellNameTransactions = new ArrayList<>();
-
- for (TransactionData transactionData : this.nameTransactions) {
- if (transactionData.getType() == TransactionType.SELL_NAME) {
- SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData;
- sellNameTransactions.add(sellNameTransactionData);
- }
- }
- return sellNameTransactions;
- }
-
- private List fetchBuyNameTransactions() {
- List buyNameTransactions = new ArrayList<>();
-
- for (TransactionData transactionData : this.nameTransactions) {
- if (transactionData.getType() == TransactionType.BUY_NAME) {
- BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData;
- buyNameTransactions.add(buyNameTransactionData);
- }
- }
- return buyNameTransactions;
- }
-
private void fetchAllNameTransactions(Repository repository) throws DataException {
List nameTransactions = new ArrayList<>();
@@ -319,41 +283,34 @@ public class NamesDatabaseIntegrityCheck {
this.nameTransactions = nameTransactions;
}
- private List fetchAllTransactionsInvolvingName(String name, Repository repository) throws DataException {
- List transactions = new ArrayList<>();
+ public List fetchAllTransactionsInvolvingName(String name, Repository repository) throws DataException {
+ List signatures = new ArrayList<>();
String reducedName = Unicode.sanitize(name);
- // Fetch all the confirmed name-modification transactions
- if (this.nameTransactions.isEmpty()) {
- this.fetchAllNameTransactions(repository);
- }
+ List registerNameTransactions = repository.getTransactionRepository().getSignaturesMatchingCustomCriteria(
+ TransactionType.REGISTER_NAME, Arrays.asList("(name = ? OR reduced_name = ?)"), Arrays.asList(name, reducedName));
+ signatures.addAll(registerNameTransactions);
- for (TransactionData transactionData : this.nameTransactions) {
+ List updateNameTransactions = repository.getTransactionRepository().getSignaturesMatchingCustomCriteria(
+ TransactionType.UPDATE_NAME,
+ Arrays.asList("(name = ? OR new_name = ? OR (reduced_new_name != '' AND reduced_new_name = ?))"),
+ Arrays.asList(name, name, reducedName));
+ signatures.addAll(updateNameTransactions);
- if ((transactionData instanceof RegisterNameTransactionData)) {
- RegisterNameTransactionData registerNameTransactionData = (RegisterNameTransactionData) transactionData;
- if (Objects.equals(registerNameTransactionData.getReducedName(), reducedName)) {
- transactions.add(transactionData);
- }
- }
- if ((transactionData instanceof UpdateNameTransactionData)) {
- UpdateNameTransactionData updateNameTransactionData = (UpdateNameTransactionData) transactionData;
- if (Objects.equals(updateNameTransactionData.getName(), name) ||
- Objects.equals(updateNameTransactionData.getReducedNewName(), reducedName)) {
- transactions.add(transactionData);
- }
- }
- if ((transactionData instanceof BuyNameTransactionData)) {
- BuyNameTransactionData buyNameTransactionData = (BuyNameTransactionData) transactionData;
- if (Objects.equals(buyNameTransactionData.getName(), name)) {
- transactions.add(transactionData);
- }
- }
- if ((transactionData instanceof SellNameTransactionData)) {
- SellNameTransactionData sellNameTransactionData = (SellNameTransactionData) transactionData;
- if (Objects.equals(sellNameTransactionData.getName(), name)) {
- transactions.add(transactionData);
- }
+ List sellNameTransactions = repository.getTransactionRepository().getSignaturesMatchingCustomCriteria(
+ TransactionType.SELL_NAME, Arrays.asList("name = ?"), Arrays.asList(name));
+ signatures.addAll(sellNameTransactions);
+
+ List buyNameTransactions = repository.getTransactionRepository().getSignaturesMatchingCustomCriteria(
+ TransactionType.BUY_NAME, Arrays.asList("name = ?"), Arrays.asList(name));
+ signatures.addAll(buyNameTransactions);
+
+ List transactions = new ArrayList<>();
+ for (byte[] signature : signatures) {
+ TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
+ // Filter out any unconfirmed transactions
+ if (transactionData.getBlockHeight() != null && transactionData.getBlockHeight() > 0) {
+ transactions.add(transactionData);
}
}
return transactions;
diff --git a/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java b/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
index c7f248d5..dfd9d45e 100644
--- a/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
+++ b/src/main/java/org/qortal/controller/repository/OnlineAccountsSignaturesTrimmer.java
@@ -4,6 +4,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.block.BlockChain;
import org.qortal.controller.Controller;
+import org.qortal.controller.Synchronizer;
import org.qortal.data.block.BlockData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
@@ -36,7 +37,7 @@ public class OnlineAccountsSignaturesTrimmer implements Runnable {
continue;
// Don't even attempt if we're mid-sync as our repository requests will be delayed for ages
- if (Controller.getInstance().isSynchronizing())
+ if (Synchronizer.getInstance().isSynchronizing())
continue;
// Trim blockchain by removing 'old' online accounts signatures
diff --git a/src/main/java/org/qortal/crosschain/Bitcoiny.java b/src/main/java/org/qortal/crosschain/Bitcoiny.java
index 3665f4ba..05d3aaa9 100644
--- a/src/main/java/org/qortal/crosschain/Bitcoiny.java
+++ b/src/main/java/org/qortal/crosschain/Bitcoiny.java
@@ -1,12 +1,6 @@
package org.qortal.crosschain;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
import java.util.stream.Collectors;
import org.apache.logging.log4j.LogManager;
@@ -39,6 +33,7 @@ import org.qortal.utils.Amounts;
import org.qortal.utils.BitTwiddling;
import com.google.common.hash.HashCode;
+import org.qortal.utils.NTP;
/** Bitcoin-like (Bitcoin, Litecoin, etc.) support */
public abstract class Bitcoiny implements ForeignBlockchain {
@@ -53,6 +48,12 @@ public abstract class Bitcoiny implements ForeignBlockchain {
protected final NetworkParameters params;
+ /** Cache recent transactions to speed up subsequent lookups */
+ protected List transactionsCache;
+ protected Long transactionsCacheTimestamp;
+ protected String transactionsCacheXpub;
+ protected static long TRANSACTIONS_CACHE_TIMEOUT = 2 * 60 * 1000L; // 2 minutes
+
/** Keys that have been previously marked as fully spent,
* i.e. keys with transactions but with no unspent outputs. */
protected final Set spentKeys = Collections.synchronizedSet(new HashSet<>());
@@ -228,6 +229,25 @@ public abstract class Bitcoiny implements ForeignBlockchain {
return transaction.getOutputs();
}
+ /**
+ * Returns transactions for passed script
+ *
+ * @throws ForeignBlockchainException if error occurs
+ */
+ public List getAddressTransactions(byte[] scriptPubKey, boolean includeUnconfirmed) throws ForeignBlockchainException {
+ int retries = 0;
+ ForeignBlockchainException e2 = null;
+ while (retries <= 3) {
+ try {
+ return this.blockchain.getAddressTransactions(scriptPubKey, includeUnconfirmed);
+ } catch (ForeignBlockchainException e) {
+ e2 = e;
+ retries++;
+ }
+ }
+ throw(e2);
+ }
+
/**
* Returns list of transaction hashes pertaining to passed address.
*
@@ -262,7 +282,17 @@ public abstract class Bitcoiny implements ForeignBlockchain {
* @throws ForeignBlockchainException if error occurs
*/
public BitcoinyTransaction getTransaction(String txHash) throws ForeignBlockchainException {
- return this.blockchain.getTransaction(txHash);
+ int retries = 0;
+ ForeignBlockchainException e2 = null;
+ while (retries <= 3) {
+ try {
+ return this.blockchain.getTransaction(txHash);
+ } catch (ForeignBlockchainException e) {
+ e2 = e;
+ retries++;
+ }
+ }
+ throw(e2);
}
/**
@@ -337,70 +367,99 @@ public abstract class Bitcoiny implements ForeignBlockchain {
return balance.value;
}
+ public Long getWalletBalanceFromTransactions(String key58) throws ForeignBlockchainException {
+ long balance = 0;
+ Comparator oldestTimestampFirstComparator = Comparator.comparingInt(SimpleTransaction::getTimestamp);
+ List transactions = getWalletTransactions(key58).stream().sorted(oldestTimestampFirstComparator).collect(Collectors.toList());
+ for (SimpleTransaction transaction : transactions) {
+ balance += transaction.getTotalAmount();
+ }
+ return balance;
+ }
+
public List getWalletTransactions(String key58) throws ForeignBlockchainException {
- Context.propagate(bitcoinjContext);
-
- Wallet wallet = walletFromDeterministicKey58(key58);
- DeterministicKeyChain keyChain = wallet.getActiveKeyChain();
-
- keyChain.setLookaheadSize(Bitcoiny.WALLET_KEY_LOOKAHEAD_INCREMENT);
- keyChain.maybeLookAhead();
-
- List keys = new ArrayList<>(keyChain.getLeafKeys());
-
- Set walletTransactions = new HashSet<>();
- Set keySet = new HashSet<>();
-
- // Set the number of consecutive empty batches required before giving up
- final int numberOfAdditionalBatchesToSearch = 5;
-
- int unusedCounter = 0;
- int ki = 0;
- do {
- boolean areAllKeysUnused = true;
-
- for (; ki < keys.size(); ++ki) {
- DeterministicKey dKey = keys.get(ki);
-
- // Check for transactions
- Address address = Address.fromKey(this.params, dKey, ScriptType.P2PKH);
- keySet.add(address.toString());
- byte[] script = ScriptBuilder.createOutputScript(address).getProgram();
-
- // Ask for transaction history - if it's empty then key has never been used
- List historicTransactionHashes = this.blockchain.getAddressTransactions(script, false);
-
- if (!historicTransactionHashes.isEmpty()) {
- areAllKeysUnused = false;
-
- for (TransactionHash transactionHash : historicTransactionHashes)
- walletTransactions.add(this.getTransaction(transactionHash.txHash));
+ synchronized (this) {
+ // Serve from the cache if it's recent, and matches this xpub
+ if (Objects.equals(transactionsCacheXpub, key58)) {
+ if (transactionsCache != null && transactionsCacheTimestamp != null) {
+ Long now = NTP.getTime();
+ boolean isCacheStale = (now != null && now - transactionsCacheTimestamp >= TRANSACTIONS_CACHE_TIMEOUT);
+ if (!isCacheStale) {
+ return transactionsCache;
+ }
}
}
- if (areAllKeysUnused) {
- // No transactions
- if (unusedCounter >= numberOfAdditionalBatchesToSearch) {
- // ... and we've hit our search limit
- break;
+ Context.propagate(bitcoinjContext);
+
+ Wallet wallet = walletFromDeterministicKey58(key58);
+ DeterministicKeyChain keyChain = wallet.getActiveKeyChain();
+
+ keyChain.setLookaheadSize(Bitcoiny.WALLET_KEY_LOOKAHEAD_INCREMENT);
+ keyChain.maybeLookAhead();
+
+ List keys = new ArrayList<>(keyChain.getLeafKeys());
+
+ Set walletTransactions = new HashSet<>();
+ Set keySet = new HashSet<>();
+
+ // Set the number of consecutive empty batches required before giving up
+ final int numberOfAdditionalBatchesToSearch = 7;
+
+ int unusedCounter = 0;
+ int ki = 0;
+ do {
+ boolean areAllKeysUnused = true;
+
+ for (; ki < keys.size(); ++ki) {
+ DeterministicKey dKey = keys.get(ki);
+
+ // Check for transactions
+ Address address = Address.fromKey(this.params, dKey, ScriptType.P2PKH);
+ keySet.add(address.toString());
+ byte[] script = ScriptBuilder.createOutputScript(address).getProgram();
+
+ // Ask for transaction history - if it's empty then key has never been used
+ List historicTransactionHashes = this.getAddressTransactions(script, false);
+
+ if (!historicTransactionHashes.isEmpty()) {
+ areAllKeysUnused = false;
+
+ for (TransactionHash transactionHash : historicTransactionHashes)
+ walletTransactions.add(this.getTransaction(transactionHash.txHash));
+ }
}
- // We haven't hit our search limit yet so increment the counter and keep looking
- unusedCounter++;
- }
- else {
- // Some keys in this batch were used, so reset the counter
- unusedCounter = 0;
- }
- // Generate some more keys
- keys.addAll(generateMoreKeys(keyChain));
+ if (areAllKeysUnused) {
+ // No transactions
+ if (unusedCounter >= numberOfAdditionalBatchesToSearch) {
+ // ... and we've hit our search limit
+ break;
+ }
+ // We haven't hit our search limit yet so increment the counter and keep looking
+ unusedCounter++;
+ } else {
+ // Some keys in this batch were used, so reset the counter
+ unusedCounter = 0;
+ }
- // Process new keys
- } while (true);
+ // Generate some more keys
+ keys.addAll(generateMoreKeys(keyChain));
- Comparator newestTimestampFirstComparator = Comparator.comparingInt(SimpleTransaction::getTimestamp).reversed();
+ // Process new keys
+ } while (true);
- return walletTransactions.stream().map(t -> convertToSimpleTransaction(t, keySet)).sorted(newestTimestampFirstComparator).collect(Collectors.toList());
+ Comparator newestTimestampFirstComparator = Comparator.comparingInt(SimpleTransaction::getTimestamp).reversed();
+
+ // Update cache and return
+ transactionsCacheTimestamp = NTP.getTime();
+ transactionsCacheXpub = key58;
+ transactionsCache = walletTransactions.stream()
+ .map(t -> convertToSimpleTransaction(t, keySet))
+ .sorted(newestTimestampFirstComparator).collect(Collectors.toList());
+
+ return transactionsCache;
+ }
}
protected SimpleTransaction convertToSimpleTransaction(BitcoinyTransaction t, Set keySet) {
@@ -411,19 +470,23 @@ public abstract class Bitcoiny implements ForeignBlockchain {
List inputs = new ArrayList<>();
List outputs = new ArrayList<>();
+ boolean anyOutputAddressInWallet = false;
+
for (BitcoinyTransaction.Input input : t.inputs) {
try {
BitcoinyTransaction t2 = getTransaction(input.outputTxHash);
List senders = t2.outputs.get(input.outputVout).addresses;
long inputAmount = t2.outputs.get(input.outputVout).value;
totalInputAmount += inputAmount;
- for (String sender : senders) {
- boolean addressInWallet = false;
- if (keySet.contains(sender)) {
- total += inputAmount;
- addressInWallet = true;
+ if (senders != null) {
+ for (String sender : senders) {
+ boolean addressInWallet = false;
+ if (keySet.contains(sender)) {
+ total += inputAmount;
+ addressInWallet = true;
+ }
+ inputs.add(new SimpleTransaction.Input(sender, inputAmount, addressInWallet));
}
- inputs.add(new SimpleTransaction.Input(sender, inputAmount, addressInWallet));
}
} catch (ForeignBlockchainException e) {
LOGGER.trace("Failed to retrieve transaction information {}", input.outputTxHash);
@@ -431,22 +494,32 @@ public abstract class Bitcoiny implements ForeignBlockchain {
}
if (t.outputs != null && !t.outputs.isEmpty()) {
for (BitcoinyTransaction.Output output : t.outputs) {
- for (String address : output.addresses) {
- boolean addressInWallet = false;
- if (keySet.contains(address)) {
- if (total > 0L) {
- amount -= (total - output.value);
- } else {
- amount += output.value;
+ if (output.addresses != null) {
+ for (String address : output.addresses) {
+ boolean addressInWallet = false;
+ if (keySet.contains(address)) {
+ if (total > 0L) {
+ amount -= (total - output.value);
+ } else {
+ amount += output.value;
+ }
+ addressInWallet = true;
+ anyOutputAddressInWallet = true;
}
- addressInWallet = true;
+ outputs.add(new SimpleTransaction.Output(address, output.value, addressInWallet));
}
- outputs.add(new SimpleTransaction.Output(address, output.value, addressInWallet));
}
totalOutputAmount += output.value;
}
}
long fee = totalInputAmount - totalOutputAmount;
+
+ if (!anyOutputAddressInWallet) {
+ // No outputs relate to this wallet - check if any inputs did (which is signified by a positive total)
+ if (total > 0) {
+ amount = total * -1;
+ }
+ }
return new SimpleTransaction(t.txHash, t.timestamp, amount, fee, inputs, outputs);
}
diff --git a/src/main/java/org/qortal/crosschain/Dogecoin.java b/src/main/java/org/qortal/crosschain/Dogecoin.java
index 4acd95aa..6a70bb00 100644
--- a/src/main/java/org/qortal/crosschain/Dogecoin.java
+++ b/src/main/java/org/qortal/crosschain/Dogecoin.java
@@ -19,12 +19,13 @@ public class Dogecoin extends Bitcoiny {
public static final String CURRENCY_CODE = "DOGE";
- private static final Coin DEFAULT_FEE_PER_KB = Coin.valueOf(500000000); // 5 DOGE per 1000 bytes
+ private static final Coin DEFAULT_FEE_PER_KB = Coin.valueOf(1000000); // 0.01 DOGE per 1000 bytes
- private static final long MINIMUM_ORDER_AMOUNT = 300000000L; // 3 DOGE minimum order. The RPC dust threshold is around 2 DOGE
+ private static final long MINIMUM_ORDER_AMOUNT = 100000000L; // 1 DOGE minimum order. See recommendations:
+ // https://github.com/dogecoin/dogecoin/blob/master/doc/fee-recommendation.md
// Temporary values until a dynamic fee system is written.
- private static final long MAINNET_FEE = 110000000L;
+ private static final long MAINNET_FEE = 100000L;
private static final long NON_MAINNET_FEE = 10000L; // TODO: calibrate this
private static final Map DEFAULT_ELECTRUMX_PORTS = new EnumMap<>(ConnectionType.class);
diff --git a/src/main/java/org/qortal/crosschain/ElectrumX.java b/src/main/java/org/qortal/crosschain/ElectrumX.java
index 4ab7e0b1..7f1eb4c4 100644
--- a/src/main/java/org/qortal/crosschain/ElectrumX.java
+++ b/src/main/java/org/qortal/crosschain/ElectrumX.java
@@ -5,19 +5,7 @@ import java.math.BigDecimal;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Random;
-import java.util.Scanner;
-import java.util.Set;
+import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -50,6 +38,9 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
/** Error message sent by some ElectrumX servers when they don't support returning verbose transactions. */
private static final String VERBOSE_TRANSACTIONS_UNSUPPORTED_MESSAGE = "verbose transactions are currently unsupported";
+ private static final int RESPONSE_TIME_READINGS = 5;
+ private static final long MAX_AVG_RESPONSE_TIME = 500L; // ms
+
public static class Server {
String hostname;
@@ -57,6 +48,7 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
ConnectionType connectionType;
int port;
+ private List responseTimes = new ArrayList<>();
public Server(String hostname, ConnectionType connectionType, int port) {
this.hostname = hostname;
@@ -64,6 +56,25 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
this.port = port;
}
+ public void addResponseTime(long responseTime) {
+ while (this.responseTimes.size() > RESPONSE_TIME_READINGS) {
+ this.responseTimes.remove(0);
+ }
+ this.responseTimes.add(responseTime);
+ }
+
+ public long averageResponseTime() {
+ if (this.responseTimes.size() < RESPONSE_TIME_READINGS) {
+ // Not enough readings yet
+ return 0L;
+ }
+ OptionalDouble average = this.responseTimes.stream().mapToDouble(a -> a).average();
+ if (average.isPresent()) {
+ return Double.valueOf(average.getAsDouble()).longValue();
+ }
+ return 0L;
+ }
+
@Override
public boolean equals(Object other) {
if (other == this)
@@ -103,7 +114,7 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
private Scanner scanner;
private int nextId = 1;
- private static final int TX_CACHE_SIZE = 200;
+ private static final int TX_CACHE_SIZE = 1000;
@SuppressWarnings("serial")
private final Map transactionCache = Collections.synchronizedMap(new LinkedHashMap<>(TX_CACHE_SIZE + 1, 0.75F, true) {
// This method is called just after a new entry has been added
@@ -539,6 +550,17 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
while (haveConnection()) {
Object response = connectedRpc(method, params);
+
+ // If we have more servers and this one replied slowly, try another
+ if (!this.remainingServers.isEmpty()) {
+ long averageResponseTime = this.currentServer.averageResponseTime();
+ if (averageResponseTime > MAX_AVG_RESPONSE_TIME) {
+ LOGGER.info("Slow average response time {}ms from {} - trying another server...", averageResponseTime, this.currentServer.hostname);
+ this.closeServer();
+ break;
+ }
+ }
+
if (response != null)
return response;
@@ -628,6 +650,7 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
String request = requestJson.toJSONString() + "\n";
LOGGER.trace(() -> String.format("Request: %s", request));
+ long startTime = System.currentTimeMillis();
final String response;
try {
@@ -638,7 +661,11 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
return null;
}
+ long endTime = System.currentTimeMillis();
+ long responseTime = endTime-startTime;
+
LOGGER.trace(() -> String.format("Response: %s", response));
+ LOGGER.trace(() -> String.format("Time taken: %dms", endTime-startTime));
if (response.isEmpty())
// Empty response - try another server?
@@ -649,6 +676,11 @@ public class ElectrumX extends BitcoinyBlockchainProvider {
// Unexpected response - try another server?
return null;
+ // Keep track of response times
+ if (this.currentServer != null) {
+ this.currentServer.addResponseTime(responseTime);
+ }
+
JSONObject responseJson = (JSONObject) responseObj;
Object errorObj = responseJson.get("error");
diff --git a/src/main/java/org/qortal/crosschain/Litecoin.java b/src/main/java/org/qortal/crosschain/Litecoin.java
index 42ee70de..21ecd1db 100644
--- a/src/main/java/org/qortal/crosschain/Litecoin.java
+++ b/src/main/java/org/qortal/crosschain/Litecoin.java
@@ -50,8 +50,12 @@ public class Litecoin extends Bitcoiny {
new Server("electrum.ltc.xurious.com", Server.ConnectionType.TCP, 50001),
new Server("electrum.ltc.xurious.com", Server.ConnectionType.SSL, 50002),
new Server("electrum-ltc.bysh.me", Server.ConnectionType.SSL, 50002),
- new Server("ltc.rentonisk.com", Server.ConnectionType.TCP, 50001),
- new Server("ltc.rentonisk.com", Server.ConnectionType.SSL, 50002),
+ new Server("electrum2.cipig.net", Server.ConnectionType.SSL, 20063),
+ new Server("electrum3.cipig.net", Server.ConnectionType.SSL, 20063),
+ new Server("electrum3.cipig.net", ConnectionType.TCP, 10063),
+ new Server("electrum2.cipig.net", Server.ConnectionType.TCP, 10063),
+ new Server("electrum1.cipig.net", Server.ConnectionType.SSL, 20063),
+ new Server("electrum1.cipig.net", Server.ConnectionType.TCP, 10063),
new Server("electrum-ltc.petrkr.net", Server.ConnectionType.SSL, 60002),
new Server("ltc.litepay.ch", Server.ConnectionType.SSL, 50022),
new Server("electrum-ltc-bysh.me", Server.ConnectionType.TCP, 50002),
diff --git a/src/main/java/org/qortal/data/arbitrary/ArbitraryRelayInfo.java b/src/main/java/org/qortal/data/arbitrary/ArbitraryRelayInfo.java
new file mode 100644
index 00000000..94f41d18
--- /dev/null
+++ b/src/main/java/org/qortal/data/arbitrary/ArbitraryRelayInfo.java
@@ -0,0 +1,60 @@
+package org.qortal.data.arbitrary;
+
+import org.qortal.network.Peer;
+import java.util.Objects;
+
+public class ArbitraryRelayInfo {
+
+ private final String hash58;
+ private final String signature58;
+ private final Peer peer;
+ private final Long timestamp;
+
+ public ArbitraryRelayInfo(String hash58, String signature58, Peer peer, Long timestamp) {
+ this.hash58 = hash58;
+ this.signature58 = signature58;
+ this.peer = peer;
+ this.timestamp = timestamp;
+ }
+
+ public boolean isValid() {
+ return this.getHash58() != null && this.getSignature58() != null
+ && this.getPeer() != null && this.getTimestamp() != null;
+ }
+
+ public String getHash58() {
+ return this.hash58;
+ }
+
+ public String getSignature58() {
+ return signature58;
+ }
+
+ public Peer getPeer() {
+ return peer;
+ }
+
+ public Long getTimestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public String toString() {
+ return String.format("%s = %s, %s, %d", this.hash58, this.signature58, this.peer, this.timestamp);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == this)
+ return true;
+
+ if (!(other instanceof ArbitraryRelayInfo))
+ return false;
+
+ ArbitraryRelayInfo otherRelayInfo = (ArbitraryRelayInfo) other;
+
+ return this.peer == otherRelayInfo.getPeer()
+ && Objects.equals(this.hash58, otherRelayInfo.getHash58())
+ && Objects.equals(this.signature58, otherRelayInfo.getSignature58());
+ }
+}
diff --git a/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceStatus.java b/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceStatus.java
index 35b83507..5e6ac055 100644
--- a/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceStatus.java
+++ b/src/main/java/org/qortal/data/arbitrary/ArbitraryResourceStatus.java
@@ -30,13 +30,21 @@ public class ArbitraryResourceStatus {
private String title;
private String description;
+ private Integer localChunkCount;
+ private Integer totalChunkCount;
+
public ArbitraryResourceStatus() {
}
- public ArbitraryResourceStatus(Status status) {
+ public ArbitraryResourceStatus(Status status, Integer localChunkCount, Integer totalChunkCount) {
this.id = status.toString();
this.title = status.title;
this.description = status.description;
+ this.localChunkCount = localChunkCount;
+ this.totalChunkCount = totalChunkCount;
}
+ public ArbitraryResourceStatus(Status status) {
+ this(status, null, null);
+ }
}
diff --git a/src/main/java/org/qortal/list/ResourceList.java b/src/main/java/org/qortal/list/ResourceList.java
index fbdc8470..099aa168 100644
--- a/src/main/java/org/qortal/list/ResourceList.java
+++ b/src/main/java/org/qortal/list/ResourceList.java
@@ -13,6 +13,7 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
public class ResourceList {
@@ -20,7 +21,7 @@ public class ResourceList {
private static final Logger LOGGER = LogManager.getLogger(ResourceList.class);
private String name;
- private List list = new ArrayList<>();
+ private List list = Collections.synchronizedList(new ArrayList<>());
/**
* ResourceList
diff --git a/src/main/java/org/qortal/list/ResourceListManager.java b/src/main/java/org/qortal/list/ResourceListManager.java
index 4d4d559d..4182f87c 100644
--- a/src/main/java/org/qortal/list/ResourceListManager.java
+++ b/src/main/java/org/qortal/list/ResourceListManager.java
@@ -5,6 +5,7 @@ import org.apache.logging.log4j.Logger;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.Objects;
@@ -13,7 +14,7 @@ public class ResourceListManager {
private static final Logger LOGGER = LogManager.getLogger(ResourceListManager.class);
private static ResourceListManager instance;
- private List lists = new ArrayList<>();
+ private List lists = Collections.synchronizedList(new ArrayList<>());
public ResourceListManager() {
diff --git a/src/main/java/org/qortal/network/Handshake.java b/src/main/java/org/qortal/network/Handshake.java
index d88654cf..cdcff1d7 100644
--- a/src/main/java/org/qortal/network/Handshake.java
+++ b/src/main/java/org/qortal/network/Handshake.java
@@ -74,6 +74,12 @@ public enum Handshake {
peer.setPeersConnectionTimestamp(peersConnectionTimestamp);
peer.setPeersVersion(versionString, version);
+ // Ensure the peer is running at least the version specified in MIN_PEER_VERSION
+ if (peer.isAtLeastVersion(MIN_PEER_VERSION) == false) {
+ LOGGER.debug(String.format("Ignoring peer %s because it is on an old version (%s)", peer, versionString));
+ return null;
+ }
+
if (Settings.getInstance().getAllowConnectionsWithOlderPeerVersions() == false) {
// Ensure the peer is running at least the minimum version allowed for connections
final String minPeerVersion = Settings.getInstance().getMinPeerVersion();
@@ -258,6 +264,9 @@ public enum Handshake {
private static final long PEER_VERSION_131 = 0x0100030001L;
+ /** Minimum peer version that we are allowed to communicate with */
+ private static final String MIN_PEER_VERSION = "3.1.0";
+
private static final int POW_BUFFER_SIZE_PRE_131 = 8 * 1024 * 1024; // bytes
private static final int POW_DIFFICULTY_PRE_131 = 8; // leading zero bits
// Can always be made harder in the future...
diff --git a/src/main/java/org/qortal/network/Network.java b/src/main/java/org/qortal/network/Network.java
index dde82112..c9ae3b7a 100644
--- a/src/main/java/org/qortal/network/Network.java
+++ b/src/main/java/org/qortal/network/Network.java
@@ -6,7 +6,7 @@ import org.bouncycastle.crypto.params.Ed25519PrivateKeyParameters;
import org.bouncycastle.crypto.params.Ed25519PublicKeyParameters;
import org.qortal.block.BlockChain;
import org.qortal.controller.Controller;
-import org.qortal.controller.arbitrary.ArbitraryDataFileManager;
+import org.qortal.controller.arbitrary.ArbitraryDataFileListManager;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.crypto.Crypto;
import org.qortal.data.block.BlockData;
@@ -307,12 +307,7 @@ public class Network {
return false;
}
- try (final Repository repository = RepositoryManager.getRepository()) {
- return ArbitraryDataFileManager.getInstance().fetchAllArbitraryDataFiles(repository, connectedPeer, signature);
- } catch (DataException e) {
- LOGGER.info("Unable to fetch arbitrary data files");
- }
- return false;
+ return ArbitraryDataFileListManager.getInstance().fetchArbitraryDataFileList(connectedPeer, signature);
}
/**
@@ -1169,11 +1164,13 @@ public class Network {
if (consecutiveReadings >= consecutiveReadingsRequired) {
// Last 10 readings were the same - i.e. more than one peer agreed on the new IP address...
String ip = ipAddressHistory.get(size - 1);
- if (!Objects.equals(ip, this.ourExternalIpAddress)) {
- // ... and the readings were different to our current recorded value, so
- // update our external IP address value
- this.ourExternalIpAddress = ip;
- this.onExternalIpUpdate(ip);
+ if (ip != null && !Objects.equals(ip, "null")) {
+ if (!Objects.equals(ip, this.ourExternalIpAddress)) {
+ // ... and the readings were different to our current recorded value, so
+ // update our external IP address value
+ this.ourExternalIpAddress = ip;
+ this.onExternalIpUpdate(ip);
+ }
}
}
}
@@ -1181,7 +1178,7 @@ public class Network {
public void onExternalIpUpdate(String ipAddress) {
LOGGER.info("External IP address updated to {}", ipAddress);
- ArbitraryDataManager.getInstance().broadcastHostedSignatureList();
+ //ArbitraryDataManager.getInstance().broadcastHostedSignatureList();
}
diff --git a/src/main/java/org/qortal/network/message/ArbitraryDataFileMessage.java b/src/main/java/org/qortal/network/message/ArbitraryDataFileMessage.java
index d87e9685..b9f24e29 100644
--- a/src/main/java/org/qortal/network/message/ArbitraryDataFileMessage.java
+++ b/src/main/java/org/qortal/network/message/ArbitraryDataFileMessage.java
@@ -1,6 +1,8 @@
package org.qortal.network.message;
import com.google.common.primitives.Ints;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.repository.DataException;
import org.qortal.transform.Transformer;
@@ -12,6 +14,8 @@ import java.nio.ByteBuffer;
public class ArbitraryDataFileMessage extends Message {
+ private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileMessage.class);
+
private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH;
private final byte[] signature;
@@ -52,6 +56,7 @@ public class ArbitraryDataFileMessage extends Message {
return new ArbitraryDataFileMessage(id, signature, arbitraryDataFile);
}
catch (DataException e) {
+ LOGGER.info("Unable to process received file: {}", e.getMessage());
return null;
}
}
diff --git a/src/main/java/org/qortal/network/message/GetArbitraryDataFileListMessage.java b/src/main/java/org/qortal/network/message/GetArbitraryDataFileListMessage.java
index e19bbb25..af19eec1 100644
--- a/src/main/java/org/qortal/network/message/GetArbitraryDataFileListMessage.java
+++ b/src/main/java/org/qortal/network/message/GetArbitraryDataFileListMessage.java
@@ -3,11 +3,14 @@ package org.qortal.network.message;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import org.qortal.transform.Transformer;
+import org.qortal.transform.transaction.TransactionTransformer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
import static org.qortal.transform.Transformer.INT_LENGTH;
import static org.qortal.transform.Transformer.LONG_LENGTH;
@@ -15,19 +18,22 @@ import static org.qortal.transform.Transformer.LONG_LENGTH;
public class GetArbitraryDataFileListMessage extends Message {
private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH;
+ private static final int HASH_LENGTH = TransactionTransformer.SHA256_LENGTH;
private final byte[] signature;
+ private List hashes;
private final long requestTime;
private int requestHops;
- public GetArbitraryDataFileListMessage(byte[] signature, long requestTime, int requestHops) {
- this(-1, signature, requestTime, requestHops);
+ public GetArbitraryDataFileListMessage(byte[] signature, List hashes, long requestTime, int requestHops) {
+ this(-1, signature, hashes, requestTime, requestHops);
}
- private GetArbitraryDataFileListMessage(int id, byte[] signature, long requestTime, int requestHops) {
+ private GetArbitraryDataFileListMessage(int id, byte[] signature, List hashes, long requestTime, int requestHops) {
super(id, MessageType.GET_ARBITRARY_DATA_FILE_LIST);
this.signature = signature;
+ this.hashes = hashes;
this.requestTime = requestTime;
this.requestHops = requestHops;
}
@@ -36,10 +42,11 @@ public class GetArbitraryDataFileListMessage extends Message {
return this.signature;
}
- public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException {
- if (bytes.remaining() != SIGNATURE_LENGTH + LONG_LENGTH + INT_LENGTH)
- return null;
+ public List getHashes() {
+ return this.hashes;
+ }
+ public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException {
byte[] signature = new byte[SIGNATURE_LENGTH];
bytes.get(signature);
@@ -48,7 +55,23 @@ public class GetArbitraryDataFileListMessage extends Message {
int requestHops = bytes.getInt();
- return new GetArbitraryDataFileListMessage(id, signature, requestTime, requestHops);
+ List hashes = null;
+ if (bytes.hasRemaining()) {
+ int hashCount = bytes.getInt();
+
+ if (bytes.remaining() != hashCount * HASH_LENGTH) {
+ return null;
+ }
+
+ hashes = new ArrayList<>();
+ for (int i = 0; i < hashCount; ++i) {
+ byte[] hash = new byte[HASH_LENGTH];
+ bytes.get(hash);
+ hashes.add(hash);
+ }
+ }
+
+ return new GetArbitraryDataFileListMessage(id, signature, hashes, requestTime, requestHops);
}
@Override
@@ -62,6 +85,14 @@ public class GetArbitraryDataFileListMessage extends Message {
bytes.write(Ints.toByteArray(this.requestHops));
+ if (this.hashes != null) {
+ bytes.write(Ints.toByteArray(this.hashes.size()));
+
+ for (byte[] hash : this.hashes) {
+ bytes.write(hash);
+ }
+ }
+
return bytes.toByteArray();
} catch (IOException e) {
return null;
diff --git a/src/main/java/org/qortal/repository/BlockArchiveReader.java b/src/main/java/org/qortal/repository/BlockArchiveReader.java
index cff272a8..b6d7cdd6 100644
--- a/src/main/java/org/qortal/repository/BlockArchiveReader.java
+++ b/src/main/java/org/qortal/repository/BlockArchiveReader.java
@@ -145,20 +145,22 @@ public class BlockArchiveReader {
}
private String getFilenameForHeight(int height) {
- Iterator it = this.fileListCache.entrySet().iterator();
- while (it.hasNext()) {
- Map.Entry pair = (Map.Entry)it.next();
- if (pair == null && pair.getKey() == null && pair.getValue() == null) {
- continue;
- }
- Triple heightInfo = (Triple) pair.getValue();
- Integer startHeight = heightInfo.getA();
- Integer endHeight = heightInfo.getB();
+ synchronized (this.fileListCache) {
+ Iterator it = this.fileListCache.entrySet().iterator();
+ while (it.hasNext()) {
+ Map.Entry pair = (Map.Entry) it.next();
+ if (pair == null && pair.getKey() == null && pair.getValue() == null) {
+ continue;
+ }
+ Triple heightInfo = (Triple) pair.getValue();
+ Integer startHeight = heightInfo.getA();
+ Integer endHeight = heightInfo.getB();
- if (height >= startHeight && height <= endHeight) {
- // Found the correct file
- String filename = (String) pair.getKey();
- return filename;
+ if (height >= startHeight && height <= endHeight) {
+ // Found the correct file
+ String filename = (String) pair.getKey();
+ return filename;
+ }
}
}
diff --git a/src/main/java/org/qortal/repository/BlockArchiveWriter.java b/src/main/java/org/qortal/repository/BlockArchiveWriter.java
index 39c28fd6..5127bf9b 100644
--- a/src/main/java/org/qortal/repository/BlockArchiveWriter.java
+++ b/src/main/java/org/qortal/repository/BlockArchiveWriter.java
@@ -5,6 +5,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.block.Block;
import org.qortal.controller.Controller;
+import org.qortal.controller.Synchronizer;
import org.qortal.data.block.BlockArchiveData;
import org.qortal.data.block.BlockData;
import org.qortal.settings.Settings;
@@ -100,7 +101,7 @@ public class BlockArchiveWriter {
if (Controller.isStopping()) {
return BlockArchiveWriteResult.STOPPING;
}
- if (Controller.getInstance().isSynchronizing()) {
+ if (Synchronizer.getInstance().isSynchronizing()) {
continue;
}
diff --git a/src/main/java/org/qortal/repository/TransactionRepository.java b/src/main/java/org/qortal/repository/TransactionRepository.java
index b0e3a864..20096eb8 100644
--- a/src/main/java/org/qortal/repository/TransactionRepository.java
+++ b/src/main/java/org/qortal/repository/TransactionRepository.java
@@ -108,6 +108,23 @@ public interface TransactionRepository {
public List getSignaturesMatchingCriteria(TransactionType txType, byte[] publicKey,
Integer minBlockHeight, Integer maxBlockHeight) throws DataException;
+ /**
+ * Returns signatures for transactions that match search criteria.
+ *
+ * Alternate version that allows for custom where clauses and bind params.
+ * Only use for very specific use cases, such as the names integrity check.
+ * Not advised to be used otherwise, given that it could be possible for
+ * unsanitized inputs to be passed in if not careful.
+ *
+ * @param txType
+ * @param whereClauses
+ * @param bindParams
+ * @return
+ * @throws DataException
+ */
+ public List getSignaturesMatchingCustomCriteria(TransactionType txType, List whereClauses,
+ List