Merge remote-tracking branch 'qortal-data/master' into qdn

This commit is contained in:
CalDescent 2022-01-08 12:29:48 +00:00
commit 0ced712974
151 changed files with 16274 additions and 794 deletions

3
.gitignore vendored
View File

@ -28,3 +28,6 @@
/WindowsInstaller/Install Files/qortal.jar
/*.7z
/tmp
/data*
/src/test/resources/arbitrary/*/.qortal/cache
apikey.txt

View File

@ -1,4 +1,10 @@
# Qortal Project - Official Repo
# Qortal Data Node
## Important
This code is unfinished, and we haven't had the official genesis block for the data chain yet.
Therefore it is only possible to use this code if you first create your own test chain. I would
highly recommend waiting until the code is in a more complete state before trying to run this.
## Build / run

View File

@ -61,7 +61,7 @@ appender.rolling.type = RollingFile
appender.rolling.name = FILE
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
appender.rolling.filePattern = ${dirname:-}${filename}.%i
appender.rolling.filePattern = ./${filename}.%i
appender.rolling.policy.type = SizeBasedTriggeringPolicy
appender.rolling.policy.size = 4MB
# Set the immediate flush to true (default)

20
pom.xml
View File

@ -16,18 +16,21 @@
<commons-text.version>1.8</commons-text.version>
<commons-io.version>2.6</commons-io.version>
<commons-compress.version>1.21</commons-compress.version>
<commons-lang3.version>3.12.0</commons-lang3.version>
<xz.version>1.9</xz.version>
<dagger.version>1.2.2</dagger.version>
<guava.version>28.1-jre</guava.version>
<hsqldb.version>2.5.1</hsqldb.version>
<jersey.version>2.29.1</jersey.version>
<jetty.version>9.4.29.v20200521</jetty.version>
<log4j.version>2.12.1</log4j.version>
<log4j.version>2.17.1</log4j.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<slf4j.version>1.7.12</slf4j.version>
<swagger-api.version>2.0.9</swagger-api.version>
<swagger-ui.version>3.23.8</swagger-ui.version>
<package-info-maven-plugin.version>1.1.0</package-info-maven-plugin.version>
<jsoup.version>1.13.1</jsoup.version>
<java-diff-utils.version>4.10</java-diff-utils.version>
</properties>
<build>
<sourceDirectory>src/main/java</sourceDirectory>
@ -462,6 +465,11 @@
<artifactId>commons-compress</artifactId>
<version>${commons-compress.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>${commons-lang3.version}</version>
</dependency>
<dependency>
<groupId>org.tukaani</groupId>
<artifactId>xz</artifactId>
@ -667,5 +675,15 @@
<artifactId>bctls-jdk15on</artifactId>
<version>${bouncycastle.version}</version>
</dependency>
<dependency>
<groupId>org.jsoup</groupId>
<artifactId>jsoup</artifactId>
<version>${jsoup.version}</version>
</dependency>
<dependency>
<groupId>io.github.java-diff-utils</groupId>
<artifactId>java-diff-utils</artifactId>
<version>${java-diff-utils.version}</version>
</dependency>
</dependencies>
</project>

View File

@ -132,7 +132,11 @@ public enum ApiError {
FOREIGN_BLOCKCHAIN_TOO_SOON(1203, 408),
// Trade portal
ORDER_SIZE_TOO_SMALL(1300, 402);
ORDER_SIZE_TOO_SMALL(1300, 402),
// Data
FILE_NOT_FOUND(1401, 404),
NO_REPLY(1402, 404);
private static final Map<Integer, ApiError> map = stream(ApiError.values()).collect(toMap(apiError -> apiError.code, apiError -> apiError));

View File

@ -0,0 +1,98 @@
package org.qortal.api;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.SecureRandom;
public class ApiKey {
private String apiKey;
public ApiKey() throws IOException {
this.load();
}
public void generate() throws IOException {
byte[] apiKey = new byte[16];
new SecureRandom().nextBytes(apiKey);
this.apiKey = Base58.encode(apiKey);
this.save();
}
/* Filesystem */
private Path getFilePath() {
return Paths.get(Settings.getInstance().getApiKeyPath(), "apikey.txt");
}
private boolean load() throws IOException {
Path path = this.getFilePath();
File apiKeyFile = new File(path.toString());
if (!apiKeyFile.exists()) {
// Try settings - to allow legacy API keys to be supported
return this.loadLegacyApiKey();
}
try {
this.apiKey = new String(Files.readAllBytes(path));
} catch (IOException e) {
throw new IOException(String.format("Couldn't read contents from file %s", path.toString()));
}
return true;
}
private boolean loadLegacyApiKey() {
String legacyApiKey = Settings.getInstance().getApiKey();
if (legacyApiKey != null && !legacyApiKey.isEmpty()) {
this.apiKey = Settings.getInstance().getApiKey();
try {
// Save it to the apikey file
this.save();
} catch (IOException e) {
// Ignore failures as it will be reloaded from settings next time
}
return true;
}
return false;
}
public void save() throws IOException {
if (this.apiKey == null || this.apiKey.isEmpty()) {
throw new IllegalStateException("Unable to save a blank API key");
}
Path filePath = this.getFilePath();
BufferedWriter writer = new BufferedWriter(new FileWriter(filePath.toString()));
writer.write(this.apiKey);
writer.close();
}
public boolean generated() {
return (this.apiKey != null);
}
public boolean exists() {
return this.getFilePath().toFile().exists();
}
@Override
public String toString() {
return this.apiKey;
}
}

View File

@ -14,8 +14,7 @@ import java.security.SecureRandom;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.checkerframework.checker.units.qual.A;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.rewrite.handler.RedirectPatternRule;
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
@ -52,12 +51,11 @@ import org.qortal.settings.Settings;
public class ApiService {
private static final Logger LOGGER = LogManager.getLogger(ApiService.class);
private static ApiService instance;
private final ResourceConfig config;
private Server server;
private ApiKey apiKey;
private ApiService() {
this.config = new ResourceConfig();
@ -78,6 +76,15 @@ public class ApiService {
return this.config.getClasses();
}
public void setApiKey(ApiKey apiKey) {
this.apiKey = apiKey;
}
public ApiKey getApiKey() {
return this.apiKey;
}
public void start() {
try {
// Create API server
@ -207,9 +214,6 @@ public class ApiService {
context.addServlet(TradeBotWebSocket.class, "/websockets/crosschain/tradebot");
context.addServlet(PresenceWebSocket.class, "/websockets/presence");
// Warn about API security if needed
this.checkApiSecurity();
// Start server
this.server.start();
} catch (Exception e) {
@ -229,23 +233,4 @@ public class ApiService {
this.server = null;
}
private void checkApiSecurity() {
// Warn about API security if needed
boolean allConnectionsAllowed = false;
if (Settings.getInstance().isApiKeyDisabled()) {
for (String pattern : Settings.getInstance().getApiWhitelist()) {
if (pattern.startsWith("0.0.0.0/") || pattern.startsWith("::/") || pattern.endsWith("/0")) {
allConnectionsAllowed = true;
}
}
if (allConnectionsAllowed) {
LOGGER.warn("Warning: API key validation is currently disabled, and the API whitelist " +
"is allowing all connections. This can be a security risk.");
LOGGER.warn("To fix, set the apiKeyDisabled setting to false, or allow only specific local " +
"IP addresses using the apiWhitelist setting.");
}
}
}
}

View File

@ -0,0 +1,171 @@
package org.qortal.api;
import io.swagger.v3.jaxrs2.integration.resources.OpenApiResource;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
import org.eclipse.jetty.rewrite.handler.RewritePatternRule;
import org.eclipse.jetty.server.*;
import org.eclipse.jetty.server.handler.ErrorHandler;
import org.eclipse.jetty.server.handler.InetAccessHandler;
import org.eclipse.jetty.servlet.FilterHolder;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.servlets.CrossOriginFilter;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.servlet.ServletContainer;
import org.qortal.api.resource.AnnotationPostProcessor;
import org.qortal.api.resource.ApiDefinition;
import org.qortal.settings.Settings;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import java.io.InputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.KeyStore;
import java.security.SecureRandom;
public class DomainMapService {
private static DomainMapService instance;
private final ResourceConfig config;
private Server server;
private DomainMapService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.domainmap.resource");
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);
}
public static DomainMapService getInstance() {
if (instance == null)
instance = new DomainMapService();
return instance;
}
public Iterable<Class<?>> getResources() {
return this.config.getClasses();
}
public void start() {
try {
// Create API server
// SSL support if requested
String keystorePathname = Settings.getInstance().getSslKeystorePathname();
String keystorePassword = Settings.getInstance().getSslKeystorePassword();
if (keystorePathname != null && keystorePassword != null) {
// SSL version
if (!Files.isReadable(Path.of(keystorePathname)))
throw new RuntimeException("Failed to start SSL API due to broken keystore");
// BouncyCastle-specific SSLContext build
SSLContext sslContext = SSLContext.getInstance("TLS", "BCJSSE");
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX", "BCJSSE");
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType(), "BC");
try (InputStream keystoreStream = Files.newInputStream(Paths.get(keystorePathname))) {
keyStore.load(keystoreStream, keystorePassword.toCharArray());
}
keyManagerFactory.init(keyStore, keystorePassword.toCharArray());
sslContext.init(keyManagerFactory.getKeyManagers(), null, new SecureRandom());
SslContextFactory.Server sslContextFactory = new SslContextFactory.Server();
sslContextFactory.setSslContext(sslContext);
this.server = new Server();
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setSecureScheme("https");
httpConfig.setSecurePort(Settings.getInstance().getDomainMapPort());
SecureRequestCustomizer src = new SecureRequestCustomizer();
httpConfig.addCustomizer(src);
HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(httpConfig);
SslConnectionFactory sslConnectionFactory = new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString());
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
new DetectorConnectionFactory(sslConnectionFactory),
httpConnectionFactory);
portUnifiedConnector.setHost(Settings.getInstance().getBindAddress());
portUnifiedConnector.setPort(Settings.getInstance().getDomainMapPort());
this.server.addConnector(portUnifiedConnector);
} else {
// Non-SSL
InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress());
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getDomainMapPort());
this.server = new Server(endpoint);
}
// Error handler
ErrorHandler errorHandler = new ApiErrorHandler();
this.server.setErrorHandler(errorHandler);
// Request logging
if (Settings.getInstance().isDomainMapLoggingEnabled()) {
RequestLogWriter logWriter = new RequestLogWriter("domainmap-requests.log");
logWriter.setAppend(true);
logWriter.setTimeZone("UTC");
RequestLog requestLog = new CustomRequestLog(logWriter, CustomRequestLog.EXTENDED_NCSA_FORMAT);
this.server.setRequestLog(requestLog);
}
// Access handler (currently no whitelist is used)
InetAccessHandler accessHandler = new InetAccessHandler();
this.server.setHandler(accessHandler);
// URL rewriting
RewriteHandler rewriteHandler = new RewriteHandler();
accessHandler.setHandler(rewriteHandler);
// Context
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.NO_SESSIONS);
context.setContextPath("/");
rewriteHandler.setHandler(context);
// Cross-origin resource sharing
FilterHolder corsFilterHolder = new FilterHolder(CrossOriginFilter.class);
corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, "*");
corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, "GET, POST, DELETE");
corsFilterHolder.setInitParameter(CrossOriginFilter.CHAIN_PREFLIGHT_PARAM, "false");
context.addFilter(corsFilterHolder, "/*", null);
// API servlet
ServletContainer container = new ServletContainer(this.config);
ServletHolder apiServlet = new ServletHolder(container);
apiServlet.setInitOrder(1);
context.addServlet(apiServlet, "/*");
// Start server
this.server.start();
} catch (Exception e) {
// Failed to start
throw new RuntimeException("Failed to start API", e);
}
}
public void stop() {
try {
// Stop server
this.server.stop();
} catch (Exception e) {
// Failed to stop
}
this.server = null;
}
}

View File

@ -0,0 +1,170 @@
package org.qortal.api;
import io.swagger.v3.jaxrs2.integration.resources.OpenApiResource;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.rewrite.handler.RewriteHandler;
import org.eclipse.jetty.server.*;
import org.eclipse.jetty.server.handler.ErrorHandler;
import org.eclipse.jetty.server.handler.InetAccessHandler;
import org.eclipse.jetty.servlet.FilterHolder;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.servlets.CrossOriginFilter;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.servlet.ServletContainer;
import org.qortal.api.resource.AnnotationPostProcessor;
import org.qortal.api.resource.ApiDefinition;
import org.qortal.settings.Settings;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import java.io.InputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.KeyStore;
import java.security.SecureRandom;
public class GatewayService {
private static GatewayService instance;
private final ResourceConfig config;
private Server server;
private GatewayService() {
this.config = new ResourceConfig();
this.config.packages("org.qortal.api.gateway.resource");
this.config.register(OpenApiResource.class);
this.config.register(ApiDefinition.class);
this.config.register(AnnotationPostProcessor.class);
}
public static GatewayService getInstance() {
if (instance == null)
instance = new GatewayService();
return instance;
}
public Iterable<Class<?>> getResources() {
return this.config.getClasses();
}
public void start() {
try {
// Create API server
// SSL support if requested
String keystorePathname = Settings.getInstance().getSslKeystorePathname();
String keystorePassword = Settings.getInstance().getSslKeystorePassword();
if (keystorePathname != null && keystorePassword != null) {
// SSL version
if (!Files.isReadable(Path.of(keystorePathname)))
throw new RuntimeException("Failed to start SSL API due to broken keystore");
// BouncyCastle-specific SSLContext build
SSLContext sslContext = SSLContext.getInstance("TLS", "BCJSSE");
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX", "BCJSSE");
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType(), "BC");
try (InputStream keystoreStream = Files.newInputStream(Paths.get(keystorePathname))) {
keyStore.load(keystoreStream, keystorePassword.toCharArray());
}
keyManagerFactory.init(keyStore, keystorePassword.toCharArray());
sslContext.init(keyManagerFactory.getKeyManagers(), null, new SecureRandom());
SslContextFactory.Server sslContextFactory = new SslContextFactory.Server();
sslContextFactory.setSslContext(sslContext);
this.server = new Server();
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setSecureScheme("https");
httpConfig.setSecurePort(Settings.getInstance().getGatewayPort());
SecureRequestCustomizer src = new SecureRequestCustomizer();
httpConfig.addCustomizer(src);
HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(httpConfig);
SslConnectionFactory sslConnectionFactory = new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString());
ServerConnector portUnifiedConnector = new ServerConnector(this.server,
new DetectorConnectionFactory(sslConnectionFactory),
httpConnectionFactory);
portUnifiedConnector.setHost(Settings.getInstance().getBindAddress());
portUnifiedConnector.setPort(Settings.getInstance().getGatewayPort());
this.server.addConnector(portUnifiedConnector);
} else {
// Non-SSL
InetAddress bindAddr = InetAddress.getByName(Settings.getInstance().getBindAddress());
InetSocketAddress endpoint = new InetSocketAddress(bindAddr, Settings.getInstance().getGatewayPort());
this.server = new Server(endpoint);
}
// Error handler
ErrorHandler errorHandler = new ApiErrorHandler();
this.server.setErrorHandler(errorHandler);
// Request logging
if (Settings.getInstance().isGatewayLoggingEnabled()) {
RequestLogWriter logWriter = new RequestLogWriter("gateway-requests.log");
logWriter.setAppend(true);
logWriter.setTimeZone("UTC");
RequestLog requestLog = new CustomRequestLog(logWriter, CustomRequestLog.EXTENDED_NCSA_FORMAT);
this.server.setRequestLog(requestLog);
}
// Access handler (currently no whitelist is used)
InetAccessHandler accessHandler = new InetAccessHandler();
this.server.setHandler(accessHandler);
// URL rewriting
RewriteHandler rewriteHandler = new RewriteHandler();
accessHandler.setHandler(rewriteHandler);
// Context
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.NO_SESSIONS);
context.setContextPath("/");
rewriteHandler.setHandler(context);
// Cross-origin resource sharing
FilterHolder corsFilterHolder = new FilterHolder(CrossOriginFilter.class);
corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, "*");
corsFilterHolder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, "GET, POST, DELETE");
corsFilterHolder.setInitParameter(CrossOriginFilter.CHAIN_PREFLIGHT_PARAM, "false");
context.addFilter(corsFilterHolder, "/*", null);
// API servlet
ServletContainer container = new ServletContainer(this.config);
ServletHolder apiServlet = new ServletHolder(container);
apiServlet.setInitOrder(1);
context.addServlet(apiServlet, "/*");
// Start server
this.server.start();
} catch (Exception e) {
// Failed to start
throw new RuntimeException("Failed to start API", e);
}
}
public void stop() {
try {
// Stop server
this.server.stop();
} catch (Exception e) {
// Failed to stop
}
this.server = null;
}
}

View File

@ -0,0 +1,45 @@
package org.qortal.api;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
public class HTMLParser {
private static final Logger LOGGER = LogManager.getLogger(HTMLParser.class);
private String linkPrefix;
private byte[] data;
public HTMLParser(String resourceId, String inPath, String prefix, boolean usePrefix, byte[] data) {
String inPathWithoutFilename = inPath.substring(0, inPath.lastIndexOf('/'));
this.linkPrefix = usePrefix ? String.format("%s/%s%s", prefix, resourceId, inPathWithoutFilename) : "";
this.data = data;
}
public void setDocumentBaseUrl() {
String fileContents = new String(data);
Document document = Jsoup.parse(fileContents);
String baseUrl = this.linkPrefix + "/";
Elements head = document.getElementsByTag("head");
if (!head.isEmpty()) {
String baseElement = String.format("<base href=\"%s\">", baseUrl);
head.get(0).prepend(baseElement);
}
String html = document.html();
this.data = html.getBytes();
}
public static boolean isHtmlFile(String path) {
if (path.endsWith(".html") || path.endsWith(".htm")) {
return true;
}
return false;
}
public byte[] getData() {
return this.data;
}
}

View File

@ -1,38 +1,98 @@
package org.qortal.api;
import org.qortal.arbitrary.ArbitraryDataResource;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataRenderManager;
import org.qortal.settings.Settings;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import javax.servlet.http.HttpServletRequest;
import org.qortal.settings.Settings;
public abstract class Security {
public static final String API_KEY_HEADER = "X-API-KEY";
public static void checkApiCallAllowed(HttpServletRequest request) {
// If API key checking has been disabled, we will allow the request in all cases
boolean isApiKeyDisabled = Settings.getInstance().isApiKeyDisabled();
if (isApiKeyDisabled)
return;
// We may want to allow automatic authentication for local requests, if enabled in settings
boolean localAuthBypassEnabled = Settings.getInstance().isLocalAuthBypassEnabled();
if (localAuthBypassEnabled) {
try {
InetAddress remoteAddr = InetAddress.getByName(request.getRemoteAddr());
if (remoteAddr.isLoopbackAddress()) {
// Request originates from loopback address, so allow it
return;
}
} catch (UnknownHostException e) {
// Ignore failure, and fallback to API key authentication
}
}
String expectedApiKey = Settings.getInstance().getApiKey();
// Retrieve the API key
ApiKey apiKey = Security.getApiKey(request);
if (!apiKey.generated()) {
// Not generated an API key yet, so disallow sensitive API calls
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "API key not generated");
}
// We require an API key to be passed
String passedApiKey = request.getHeader(API_KEY_HEADER);
if (passedApiKey == null) {
// Try query string - this is needed to avoid a CORS preflight. See: https://stackoverflow.com/a/43881141
passedApiKey = request.getParameter("apiKey");
}
if (passedApiKey == null) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Missing 'X-API-KEY' header");
}
if ((expectedApiKey != null && !expectedApiKey.equals(passedApiKey)) ||
(passedApiKey != null && !passedApiKey.equals(expectedApiKey)))
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED);
// The API keys must match
if (!apiKey.toString().equals(passedApiKey)) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "API key invalid");
}
}
InetAddress remoteAddr;
public static void disallowLoopbackRequests(HttpServletRequest request) {
try {
remoteAddr = InetAddress.getByName(request.getRemoteAddr());
InetAddress remoteAddr = InetAddress.getByName(request.getRemoteAddr());
if (remoteAddr.isLoopbackAddress()) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Local requests not allowed");
}
} catch (UnknownHostException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED);
}
}
if (!remoteAddr.isLoopbackAddress())
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED);
public static void requirePriorAuthorization(HttpServletRequest request, String resourceId, Service service, String identifier) {
ArbitraryDataResource resource = new ArbitraryDataResource(resourceId, null, service, identifier);
if (!ArbitraryDataRenderManager.getInstance().isAuthorized(resource)) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Call /render/authorize first");
}
}
public static void requirePriorAuthorizationOrApiKey(HttpServletRequest request, String resourceId, Service service, String identifier) {
try {
Security.checkApiCallAllowed(request);
} catch (ApiException e) {
// API call wasn't allowed, but maybe it was pre-authorized
Security.requirePriorAuthorization(request, resourceId, service, identifier);
}
}
public static ApiKey getApiKey(HttpServletRequest request) {
ApiKey apiKey = ApiService.getInstance().getApiKey();
if (apiKey == null) {
try {
apiKey = new ApiKey();
} catch (IOException e) {
// Couldn't load API key - so we need to treat it as not generated, and therefore unauthorized
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.UNAUTHORIZED);
}
ApiService.getInstance().setApiKey(apiKey);
}
return apiKey;
}
}

View File

@ -0,0 +1,58 @@
package org.qortal.api.domainmap.resource;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
import org.qortal.arbitrary.ArbitraryDataRenderer;
import org.qortal.arbitrary.misc.Service;
import org.qortal.settings.Settings;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.core.Context;
import java.util.Map;
@Path("/")
@Tag(name = "Gateway")
public class DomainMapResource {
@Context HttpServletRequest request;
@Context HttpServletResponse response;
@Context ServletContext context;
@GET
public HttpServletResponse getIndexByDomainMap() {
return this.getDomainMap("/");
}
@GET
@Path("{path:.*}")
public HttpServletResponse getPathByDomainMap(@PathParam("path") String inPath) {
return this.getDomainMap(inPath);
}
private HttpServletResponse getDomainMap(String inPath) {
Map<String, String> domainMap = Settings.getInstance().getSimpleDomainMap();
if (domainMap != null && domainMap.containsKey(request.getServerName())) {
// Build synchronously, so that we don't need to make the summary API endpoints available over
// the domain map server. This means that there will be no loading screen, but this is potentially
// preferred in this situation anyway (e.g. to avoid confusing search engine robots).
return this.get(domainMap.get(request.getServerName()), ResourceIdType.NAME, Service.WEBSITE, inPath, null, "", false, false);
}
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
}
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
String secret58, String prefix, boolean usePrefix, boolean async) {
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
secret58, prefix, usePrefix, async, request, response, context);
return renderer.render();
}
}

View File

@ -0,0 +1,126 @@
package org.qortal.api.gateway.resource;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.api.Security;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
import org.qortal.arbitrary.ArbitraryDataReader;
import org.qortal.arbitrary.ArbitraryDataRenderer;
import org.qortal.arbitrary.ArbitraryDataResource;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
@Path("/")
@Tag(name = "Gateway")
public class GatewayResource {
@Context HttpServletRequest request;
@Context HttpServletResponse response;
@Context ServletContext context;
/**
* We need to allow resource status checking (and building) via the gateway, as the node's API port
* may not be forwarded and will almost certainly not be authenticated. Since gateways allow for
* all resources to be loaded except those that are blocked, there is no need for authentication.
*/
@GET
@Path("/arbitrary/resource/status/{service}/{name}")
public ArbitraryResourceStatus getDefaultResourceStatus(@PathParam("service") Service service,
@PathParam("name") String name,
@QueryParam("build") Boolean build) {
return this.getStatus(service, name, null, build);
}
@GET
@Path("/arbitrary/resource/status/{service}/{name}/{identifier}")
public ArbitraryResourceStatus getResourceStatus(@PathParam("service") Service service,
@PathParam("name") String name,
@PathParam("identifier") String identifier,
@QueryParam("build") Boolean build) {
return this.getStatus(service, name, identifier, build);
}
private ArbitraryResourceStatus getStatus(Service service, String name, String identifier, Boolean build) {
// If "build=true" has been specified in the query string, build the resource before returning its status
if (build != null && build == true) {
ArbitraryDataReader reader = new ArbitraryDataReader(name, ArbitraryDataFile.ResourceIdType.NAME, service, null);
try {
if (!reader.isBuilding()) {
reader.loadSynchronously(false);
}
} catch (Exception e) {
// No need to handle exception, as it will be reflected in the status
}
}
ArbitraryDataResource resource = new ArbitraryDataResource(name, ResourceIdType.NAME, service, identifier);
return resource.getStatus();
}
@GET
public HttpServletResponse getRoot() {
return ArbitraryDataRenderer.getResponse(response, 200, "");
}
@GET
@Path("{name}/{path:.*}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getPathByName(@PathParam("name") String name,
@PathParam("path") String inPath) {
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
Security.disallowLoopbackRequests(request);
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, inPath, null, "", true, true);
}
@GET
@Path("{name}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexByName(@PathParam("name") String name) {
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
Security.disallowLoopbackRequests(request);
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, "/", null, "", true, true);
}
// Optional /site alternative for backwards support
@GET
@Path("/site/{name}/{path:.*}")
public HttpServletResponse getSitePathByName(@PathParam("name") String name,
@PathParam("path") String inPath) {
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
Security.disallowLoopbackRequests(request);
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, inPath, null, "/site", true, true);
}
@GET
@Path("/site/{name}")
public HttpServletResponse getSiteIndexByName(@PathParam("name") String name) {
// Block requests from localhost, to prevent websites/apps from running javascript that fetches unvetted data
Security.disallowLoopbackRequests(request);
return this.get(name, ResourceIdType.NAME, Service.WEBSITE, "/", null, "/site", true, true);
}
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
String secret58, String prefix, boolean usePrefix, boolean async) {
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
secret58, prefix, usePrefix, async, request, response, context);
return renderer.render();
}
}

View File

@ -0,0 +1,15 @@
package org.qortal.api.model;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class PeersSummary {
public int inboundConnections;
public int outboundConnections;
public PeersSummary() {
}
}

View File

@ -39,12 +39,10 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LoggerContext;
import org.apache.logging.log4j.core.appender.RollingFileAppender;
import org.checkerframework.checker.units.qual.A;
import org.qortal.account.Account;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.api.*;
import org.qortal.api.model.ActivitySummary;
import org.qortal.api.model.NodeInfo;
import org.qortal.api.model.NodeStatus;
@ -80,7 +78,8 @@ public class AdminResource {
@Path("/unused")
@Parameter(in = ParameterIn.PATH, name = "assetid", description = "Asset ID, 0 is native coin", schema = @Schema(type = "integer"))
@Parameter(in = ParameterIn.PATH, name = "otherassetid", description = "Asset ID, 0 is native coin", schema = @Schema(type = "integer"))
@Parameter(in = ParameterIn.PATH, name = "address", description = "an account address", example = "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v")
@Parameter(in = ParameterIn.PATH, name = "address", description = "An account address", example = "QgV4s3xnzLhVBEJxcYui4u4q11yhUHsd9v")
@Parameter(in = ParameterIn.PATH, name = "path", description = "Local path to folder containing the files", schema = @Schema(type = "String", defaultValue = "/Users/user/Documents/MyStaticWebsite"))
@Parameter(in = ParameterIn.QUERY, name = "count", description = "Maximum number of entries to return, 0 means none", schema = @Schema(type = "integer", defaultValue = "20"))
@Parameter(in = ParameterIn.QUERY, name = "limit", description = "Maximum number of entries to return, 0 means unlimited", schema = @Schema(type = "integer", defaultValue = "20"))
@Parameter(in = ParameterIn.QUERY, name = "offset", description = "Starting entry in results, 0 is first entry", schema = @Schema(type = "integer"))
@ -716,4 +715,40 @@ public class AdminResource {
}
}
@POST
@Path("/apikey/generate")
@Operation(
summary = "Generate an API key",
description = "This request is unauthenticated if no API key has been generated yet. " +
"If an API key already exists, it needs to be passed as a header and this endpoint " +
"will then generate a new key which replaces the existing one.",
responses = {
@ApiResponse(
description = "API key string",
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "string"))
)
}
)
@SecurityRequirement(name = "apiKey")
public String generateApiKey() {
ApiKey apiKey = Security.getApiKey(request);
// If the API key is already generated, we need to authenticate this request
if (apiKey.generated() && apiKey.exists()) {
Security.checkApiCallAllowed(request);
}
// Not generated yet - so we are safe to generate one
// FUTURE: we may want to restrict this to local/loopback only?
try {
apiKey.generate();
} catch (IOException e) {
throw ApiExceptionFactory.INSTANCE.createCustomException(request, ApiError.UNAUTHORIZED, "Unable to generate API key");
}
return apiKey.toString();
}
}

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@ -43,6 +44,7 @@ public class BootstrapResource {
)
}
)
@SecurityRequirement(name = "apiKey")
public String createBootstrap() {
Security.checkApiCallAllowed(request);
@ -77,6 +79,7 @@ public class BootstrapResource {
)
}
)
@SecurityRequirement(name = "apiKey")
public boolean validateBootstrap() {
Security.checkApiCallAllowed(request);

View File

@ -5,6 +5,7 @@ import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.util.Arrays;
@ -79,6 +80,7 @@ public class CrossChainBitcoinACCTv1Resource {
}
)
@ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_DATA, ApiError.INVALID_REFERENCE, ApiError.TRANSFORMATION_ERROR, ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String buildTrade(CrossChainBuildRequest tradeRequest) {
Security.checkApiCallAllowed(request);
@ -174,6 +176,7 @@ public class CrossChainBitcoinACCTv1Resource {
}
)
@ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String buildTradeMessage(CrossChainTradeRequest tradeRequest) {
Security.checkApiCallAllowed(request);
@ -257,6 +260,7 @@ public class CrossChainBitcoinACCTv1Resource {
}
)
@ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_DATA, ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String buildRedeemMessage(CrossChainDualSecretRequest secretRequest) {
Security.checkApiCallAllowed(request);
@ -360,4 +364,4 @@ public class CrossChainBitcoinACCTv1Resource {
}
}
}
}

View File

@ -6,6 +6,7 @@ import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.util.List;
@ -56,6 +57,7 @@ public class CrossChainBitcoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getBitcoinWalletBalance(String key58) {
Security.checkApiCallAllowed(request);
@ -94,6 +96,7 @@ public class CrossChainBitcoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public List<SimpleTransaction> getBitcoinWalletTransactions(String key58) {
Security.checkApiCallAllowed(request);
@ -130,6 +133,7 @@ public class CrossChainBitcoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String sendBitcoin(BitcoinSendRequest bitcoinSendRequest) {
Security.checkApiCallAllowed(request);
@ -164,4 +168,4 @@ public class CrossChainBitcoinResource {
return spendTransaction.getTxId().toString();
}
}
}

View File

@ -5,6 +5,7 @@ import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.api.ApiError;
@ -67,6 +68,7 @@ public class CrossChainDogecoinACCTv1Resource {
}
)
@ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_DATA, ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public boolean buildRedeemMessage(CrossChainSecretRequest secretRequest) {
Security.checkApiCallAllowed(request);

View File

@ -6,6 +6,7 @@ import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.bitcoinj.core.Transaction;
import org.qortal.api.ApiError;
@ -54,6 +55,7 @@ public class CrossChainDogecoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getDogecoinWalletBalance(String key58) {
Security.checkApiCallAllowed(request);
@ -92,6 +94,7 @@ public class CrossChainDogecoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public List<SimpleTransaction> getDogecoinWalletTransactions(String key58) {
Security.checkApiCallAllowed(request);
@ -128,6 +131,7 @@ public class CrossChainDogecoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String sendBitcoin(DogecoinSendRequest dogecoinSendRequest) {
Security.checkApiCallAllowed(request);

View File

@ -4,6 +4,7 @@ import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.math.BigDecimal;
@ -105,6 +106,7 @@ public class CrossChainHtlcResource {
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN})
@SecurityRequirement(name = "apiKey")
public CrossChainBitcoinyHTLCStatus checkHtlcStatus(@PathParam("blockchain") String blockchainName,
@PathParam("refundPKH") String refundPKH,
@PathParam("locktime") int lockTime,
@ -188,6 +190,7 @@ public class CrossChainHtlcResource {
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN})
@SecurityRequirement(name = "apiKey")
public boolean redeemHtlc(@PathParam("ataddress") String atAddress) {
Security.checkApiCallAllowed(request);
@ -246,6 +249,7 @@ public class CrossChainHtlcResource {
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN})
@SecurityRequirement(name = "apiKey")
public boolean redeemAllHtlc() {
Security.checkApiCallAllowed(request);
boolean success = false;
@ -430,6 +434,7 @@ public class CrossChainHtlcResource {
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN})
@SecurityRequirement(name = "apiKey")
public boolean refundHtlc(@PathParam("ataddress") String atAddress) {
Security.checkApiCallAllowed(request);
@ -478,6 +483,7 @@ public class CrossChainHtlcResource {
}
)
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN})
@SecurityRequirement(name = "apiKey")
public boolean refundAllHtlc() {
Security.checkApiCallAllowed(request);
boolean success = false;

View File

@ -5,6 +5,7 @@ import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.api.ApiError;
@ -72,6 +73,7 @@ public class CrossChainLitecoinACCTv1Resource {
}
)
@ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_DATA, ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public boolean buildRedeemMessage(CrossChainSecretRequest secretRequest) {
Security.checkApiCallAllowed(request);

View File

@ -6,6 +6,7 @@ import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.util.List;
@ -56,6 +57,7 @@ public class CrossChainLitecoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String getLitecoinWalletBalance(String key58) {
Security.checkApiCallAllowed(request);
@ -94,6 +96,7 @@ public class CrossChainLitecoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public List<SimpleTransaction> getLitecoinWalletTransactions(String key58) {
Security.checkApiCallAllowed(request);
@ -130,6 +133,7 @@ public class CrossChainLitecoinResource {
}
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_CRITERIA, ApiError.INVALID_ADDRESS, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE})
@SecurityRequirement(name = "apiKey")
public String sendBitcoin(LitecoinSendRequest litecoinSendRequest) {
Security.checkApiCallAllowed(request);
@ -164,4 +168,4 @@ public class CrossChainLitecoinResource {
return spendTransaction.getTxId().toString();
}
}
}

View File

@ -7,6 +7,7 @@ import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import java.util.List;
@ -69,6 +70,7 @@ public class CrossChainTradeBotResource {
}
)
@ApiErrors({ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public List<TradeBotData> getTradeBotStates(
@Parameter(
description = "Limit to specific blockchain",
@ -110,6 +112,7 @@ public class CrossChainTradeBotResource {
)
@ApiErrors({ApiError.INVALID_PUBLIC_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_CRITERIA, ApiError.INSUFFICIENT_BALANCE, ApiError.REPOSITORY_ISSUE, ApiError.ORDER_SIZE_TOO_SMALL})
@SuppressWarnings("deprecation")
@SecurityRequirement(name = "apiKey")
public String tradeBotCreator(TradeBotCreateRequest tradeBotCreateRequest) {
Security.checkApiCallAllowed(request);
@ -179,6 +182,7 @@ public class CrossChainTradeBotResource {
)
@ApiErrors({ApiError.INVALID_PRIVATE_KEY, ApiError.INVALID_ADDRESS, ApiError.INVALID_CRITERIA, ApiError.FOREIGN_BLOCKCHAIN_BALANCE_ISSUE, ApiError.FOREIGN_BLOCKCHAIN_NETWORK_ISSUE, ApiError.REPOSITORY_ISSUE})
@SuppressWarnings("deprecation")
@SecurityRequirement(name = "apiKey")
public String tradeBotResponder(TradeBotRespondRequest tradeBotRespondRequest) {
Security.checkApiCallAllowed(request);
@ -260,6 +264,7 @@ public class CrossChainTradeBotResource {
}
)
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String tradeBotDelete(String tradePrivateKey58) {
Security.checkApiCallAllowed(request);

View File

@ -6,6 +6,7 @@ import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.qortal.api.*;
@ -31,12 +32,10 @@ public class ListsResource {
HttpServletRequest request;
/* Address blacklist */
@POST
@Path("/blacklist/addresses")
@Path("/{listName}")
@Operation(
summary = "Add one or more QORT addresses to the local blacklist",
summary = "Add items to a new or existing list",
requestBody = @RequestBody(
required = true,
content = @Content(
@ -48,17 +47,23 @@ public class ListsResource {
),
responses = {
@ApiResponse(
description = "Returns true if all addresses were processed, false if any couldn't be " +
description = "Returns true if all items were processed, false if any couldn't be " +
"processed, or an exception on failure. If false or an exception is returned, " +
"the list will not be updated, and the request will need to be re-issued.",
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
)
}
)
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
public String addAddressesToBlacklist(ListRequest listRequest) {
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String addItemstoList(@PathParam("listName") String listName,
ListRequest listRequest) {
Security.checkApiCallAllowed(request);
if (listName == null) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
}
if (listRequest == null || listRequest.items == null) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_CRITERIA);
}
@ -66,51 +71,33 @@ public class ListsResource {
int successCount = 0;
int errorCount = 0;
try (final Repository repository = RepositoryManager.getRepository()) {
for (String item : listRequest.items) {
for (String address : listRequest.items) {
if (!Crypto.isValidAddress(address)) {
errorCount++;
continue;
}
AccountData accountData = repository.getAccountRepository().getAccount(address);
// Not found?
if (accountData == null) {
errorCount++;
continue;
}
// Valid address, so go ahead and blacklist it
boolean success = ResourceListManager.getInstance().addToList("blacklist", "addresses", address, false);
if (success) {
successCount++;
}
else {
errorCount++;
}
boolean success = ResourceListManager.getInstance().addToList(listName, item, false);
if (success) {
successCount++;
}
else {
errorCount++;
}
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
if (successCount > 0 && errorCount == 0) {
// All were successful, so save the blacklist
ResourceListManager.getInstance().saveList("blacklist", "addresses");
// All were successful, so save the list
ResourceListManager.getInstance().saveList(listName);
return "true";
}
else {
// Something went wrong, so revert
ResourceListManager.getInstance().revertList("blacklist", "addresses");
ResourceListManager.getInstance().revertList(listName);
return "false";
}
}
@DELETE
@Path("/blacklist/addresses")
@Path("/{listName}")
@Operation(
summary = "Remove one or more QORT addresses from the local blacklist",
summary = "Remove one or more items from a list",
requestBody = @RequestBody(
required = true,
content = @Content(
@ -122,15 +109,17 @@ public class ListsResource {
),
responses = {
@ApiResponse(
description = "Returns true if all addresses were processed, false if any couldn't be " +
description = "Returns true if all items were processed, false if any couldn't be " +
"processed, or an exception on failure. If false or an exception is returned, " +
"the list will not be updated, and the request will need to be re-issued.",
content = @Content(mediaType = MediaType.TEXT_PLAIN, schema = @Schema(type = "boolean"))
)
}
)
@ApiErrors({ApiError.INVALID_ADDRESS, ApiError.ADDRESS_UNKNOWN, ApiError.REPOSITORY_ISSUE})
public String removeAddressesFromBlacklist(ListRequest listRequest) {
@ApiErrors({ApiError.INVALID_CRITERIA, ApiError.REPOSITORY_ISSUE})
@SecurityRequirement(name = "apiKey")
public String removeItemsFromList(@PathParam("listName") String listName,
ListRequest listRequest) {
Security.checkApiCallAllowed(request);
if (listRequest == null || listRequest.items == null) {
@ -140,62 +129,46 @@ public class ListsResource {
int successCount = 0;
int errorCount = 0;
try (final Repository repository = RepositoryManager.getRepository()) {
for (String address : listRequest.items) {
for (String address : listRequest.items) {
if (!Crypto.isValidAddress(address)) {
errorCount++;
continue;
}
AccountData accountData = repository.getAccountRepository().getAccount(address);
// Not found?
if (accountData == null) {
errorCount++;
continue;
}
// Valid address, so go ahead and blacklist it
// Don't save as we will do this at the end of the process
boolean success = ResourceListManager.getInstance().removeFromList("blacklist", "addresses", address, false);
if (success) {
successCount++;
}
else {
errorCount++;
}
// Attempt to remove the item
// Don't save as we will do this at the end of the process
boolean success = ResourceListManager.getInstance().removeFromList(listName, address, false);
if (success) {
successCount++;
}
else {
errorCount++;
}
} catch (DataException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE, e);
}
if (successCount > 0 && errorCount == 0) {
// All were successful, so save the blacklist
ResourceListManager.getInstance().saveList("blacklist", "addresses");
// All were successful, so save the list
ResourceListManager.getInstance().saveList(listName);
return "true";
}
else {
// Something went wrong, so revert
ResourceListManager.getInstance().revertList("blacklist", "addresses");
ResourceListManager.getInstance().revertList(listName);
return "false";
}
}
@GET
@Path("/blacklist/addresses")
@Path("/{listName}")
@Operation(
summary = "Fetch the list of blacklisted addresses",
summary = "Fetch all items in a list",
responses = {
@ApiResponse(
description = "A JSON array of addresses",
description = "A JSON array of items",
content = @Content(mediaType = MediaType.APPLICATION_JSON, array = @ArraySchema(schema = @Schema(implementation = String.class)))
)
}
)
public String getAddressBlacklist() {
@SecurityRequirement(name = "apiKey")
public String getItemsInList(@PathParam("listName") String listName) {
Security.checkApiCallAllowed(request);
return ResourceListManager.getInstance().getJSONStringForList("blacklist", "addresses");
return ResourceListManager.getInstance().getJSONStringForList(listName);
}
}

View File

@ -23,12 +23,9 @@ import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import org.qortal.api.ApiError;
import org.qortal.api.ApiErrors;
import org.qortal.api.ApiException;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.api.*;
import org.qortal.api.model.ConnectedPeer;
import org.qortal.api.model.PeersSummary;
import org.qortal.controller.Controller;
import org.qortal.controller.Synchronizer;
import org.qortal.controller.Synchronizer.SynchronizationResult;
@ -338,4 +335,39 @@ public class PeersResource {
}
}
@GET
@Path("/summary")
@Operation(
summary = "Returns total inbound and outbound connections for connected peers",
responses = {
@ApiResponse(
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
array = @ArraySchema(
schema = @Schema(
implementation = PeersSummary.class
)
)
)
)
}
)
@SecurityRequirement(name = "apiKey")
public PeersSummary peersSummary() {
Security.checkApiCallAllowed(request);
PeersSummary peersSummary = new PeersSummary();
List<Peer> connectedPeers = Network.getInstance().getConnectedPeers().stream().collect(Collectors.toList());
for (Peer peer : connectedPeers) {
if (peer.isOutbound()) {
peersSummary.inboundConnections++;
}
else {
peersSummary.outboundConnections++;
}
}
return peersSummary;
}
}

View File

@ -0,0 +1,195 @@
package org.qortal.api.resource;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import java.io.*;
import java.nio.file.Paths;
import java.util.Map;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.parameters.RequestBody;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.ApiError;
import org.qortal.api.ApiExceptionFactory;
import org.qortal.api.Security;
import org.qortal.arbitrary.misc.Service;
import org.qortal.arbitrary.*;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.controller.arbitrary.ArbitraryDataRenderManager;
import org.qortal.data.transaction.ArbitraryTransactionData.*;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.arbitrary.ArbitraryDataFile.*;
import org.qortal.utils.Base58;
@Path("/render")
@Tag(name = "Render")
public class RenderResource {
private static final Logger LOGGER = LogManager.getLogger(RenderResource.class);
@Context HttpServletRequest request;
@Context HttpServletResponse response;
@Context ServletContext context;
@POST
@Path("/preview")
@Operation(
summary = "Generate preview URL based on a user-supplied path and service",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string", example = "/Users/user/Documents/MyStaticWebsite"
)
)
),
responses = {
@ApiResponse(
description = "a temporary URL to preview the website",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
)
)
)
}
)
@SecurityRequirement(name = "apiKey")
public String preview(String directoryPath) {
Security.checkApiCallAllowed(request);
Method method = Method.PUT;
Compression compression = Compression.ZIP;
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(Paths.get(directoryPath), null, Service.WEBSITE, null, method, compression);
try {
arbitraryDataWriter.save();
} catch (IOException | DataException | InterruptedException | MissingDataException e) {
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.REPOSITORY_ISSUE);
} catch (RuntimeException e) {
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
}
ArbitraryDataFile arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile();
if (arbitraryDataFile != null) {
String digest58 = arbitraryDataFile.digest58();
if (digest58 != null) {
return "http://localhost:12393/render/hash/" + digest58 + "?secret=" + Base58.encode(arbitraryDataFile.getSecret());
}
}
return "Unable to generate preview URL";
}
@POST
@Path("/authorize/{resourceId}")
@SecurityRequirement(name = "apiKey")
public boolean authorizeResource(@PathParam("resourceId") String resourceId) {
Security.checkApiCallAllowed(request);
ArbitraryDataResource resource = new ArbitraryDataResource(resourceId, null, null, null);
ArbitraryDataRenderManager.getInstance().addToAuthorizedResources(resource);
return true;
}
@POST
@Path("authorize/{service}/{resourceId}")
@SecurityRequirement(name = "apiKey")
public boolean authorizeResource(@PathParam("service") Service service,
@PathParam("resourceId") String resourceId) {
Security.checkApiCallAllowed(request);
ArbitraryDataResource resource = new ArbitraryDataResource(resourceId, null, service, null);
ArbitraryDataRenderManager.getInstance().addToAuthorizedResources(resource);
return true;
}
@POST
@Path("authorize/{service}/{resourceId}/{identifier}")
@SecurityRequirement(name = "apiKey")
public boolean authorizeResource(@PathParam("service") Service service,
@PathParam("resourceId") String resourceId,
@PathParam("identifier") String identifier) {
Security.checkApiCallAllowed(request);
ArbitraryDataResource resource = new ArbitraryDataResource(resourceId, null, service, identifier);
ArbitraryDataRenderManager.getInstance().addToAuthorizedResources(resource);
return true;
}
@GET
@Path("/signature/{signature}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexBySignature(@PathParam("signature") String signature) {
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
return this.get(signature, ResourceIdType.SIGNATURE, null, "/", null, "/render/signature", true, true);
}
@GET
@Path("/signature/{signature}/{path:.*}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getPathBySignature(@PathParam("signature") String signature, @PathParam("path") String inPath) {
Security.requirePriorAuthorization(request, signature, Service.WEBSITE, null);
return this.get(signature, ResourceIdType.SIGNATURE, null, inPath,null, "/render/signature", true, true);
}
@GET
@Path("/hash/{hash}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexByHash(@PathParam("hash") String hash58, @QueryParam("secret") String secret58) {
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.WEBSITE, "/", secret58, "/render/hash", true, false);
}
@GET
@Path("/hash/{hash}/{path:.*}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getPathByHash(@PathParam("hash") String hash58, @PathParam("path") String inPath,
@QueryParam("secret") String secret58) {
Security.requirePriorAuthorization(request, hash58, Service.WEBSITE, null);
return this.get(hash58, ResourceIdType.FILE_HASH, Service.WEBSITE, inPath, secret58, "/render/hash", true, false);
}
@GET
@Path("{service}/{name}/{path:.*}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getPathByName(@PathParam("service") Service service,
@PathParam("name") String name,
@PathParam("path") String inPath) {
Security.requirePriorAuthorization(request, name, service, null);
String prefix = String.format("/render/%s", service);
return this.get(name, ResourceIdType.NAME, service, inPath, null, prefix, true, true);
}
@GET
@Path("{service}/{name}")
@SecurityRequirement(name = "apiKey")
public HttpServletResponse getIndexByName(@PathParam("service") Service service,
@PathParam("name") String name) {
Security.requirePriorAuthorization(request, name, service, null);
String prefix = String.format("/render/%s", service);
return this.get(name, ResourceIdType.NAME, service, "/", null, prefix, true, true);
}
private HttpServletResponse get(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
String secret58, String prefix, boolean usePrefix, boolean async) {
ArbitraryDataRenderer renderer = new ArbitraryDataRenderer(resourceId, resourceIdType, service, inPath,
secret58, prefix, usePrefix, async, request, response, context);
return renderer.render();
}
}

View File

@ -348,7 +348,7 @@ public class TransactionsResource {
try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(startBlock, blockLimit, txGroupId,
txTypes, null, address, confirmationStatus, limit, offset, reverse);
txTypes, null, null, address, confirmationStatus, limit, offset, reverse);
// Expand signatures to transactions
List<TransactionData> transactions = new ArrayList<>(signatures.size());
@ -418,32 +418,83 @@ public class TransactionsResource {
}
@POST
@Path("/sign")
@Path("/convert")
@Operation(
summary = "Sign a raw, unsigned transaction",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(
implementation = SimpleTransactionSignRequest.class
)
)
),
responses = {
@ApiResponse(
description = "raw, signed transaction encoded in Base58",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
summary = "Convert transaction bytes into bytes for signing",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string",
description = "raw, unsigned transaction in base58 encoding",
example = "raw transaction base58"
)
)
)
)
}
),
responses = {
@ApiResponse(
description = "raw, unsigned transaction encoded in Base58, ready for signing",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
)
)
)
}
)
@ApiErrors({
ApiError.NON_PRODUCTION, ApiError.INVALID_PRIVATE_KEY, ApiError.TRANSFORMATION_ERROR
ApiError.NON_PRODUCTION, ApiError.TRANSFORMATION_ERROR
})
public String convertTransactionForSigning(String rawInputBytes58) {
byte[] rawInputBytes = Base58.decode(rawInputBytes58);
if (rawInputBytes.length == 0)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.JSON);
try {
// Append null signature on the end before transformation
byte[] rawBytes = Bytes.concat(rawInputBytes, new byte[TransactionTransformer.SIGNATURE_LENGTH]);
TransactionData transactionData = TransactionTransformer.fromBytes(rawBytes);
if (transactionData == null)
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.INVALID_DATA);
byte[] convertedBytes = TransactionTransformer.toBytesForSigning(transactionData);
return Base58.encode(convertedBytes);
} catch (TransformationException e) {
throw ApiExceptionFactory.INSTANCE.createException(request, ApiError.TRANSFORMATION_ERROR, e);
}
}
@POST
@Path("/sign")
@Operation(
summary = "Sign a raw, unsigned transaction",
requestBody = @RequestBody(
required = true,
content = @Content(
mediaType = MediaType.APPLICATION_JSON,
schema = @Schema(
implementation = SimpleTransactionSignRequest.class
)
)
),
responses = {
@ApiResponse(
description = "raw, signed transaction encoded in Base58",
content = @Content(
mediaType = MediaType.TEXT_PLAIN,
schema = @Schema(
type = "string"
)
)
)
}
)
@ApiErrors({
ApiError.NON_PRODUCTION, ApiError.INVALID_PRIVATE_KEY, ApiError.TRANSFORMATION_ERROR
})
public String signTransaction(SimpleTransactionSignRequest signRequest) {
if (Settings.getInstance().isApiRestricted())

View File

@ -0,0 +1,76 @@
package org.qortal.arbitrary;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.ArbitraryDataFile.*;
import org.qortal.arbitrary.misc.Service;
import org.qortal.repository.DataException;
import org.qortal.utils.NTP;
import java.io.IOException;
public class ArbitraryDataBuildQueueItem extends ArbitraryDataResource {
private final Long creationTimestamp;
private Long buildStartTimestamp = null;
private Long buildEndTimestamp = null;
private boolean failed = false;
/* The maximum amount of time to spend on a single build */
// TODO: interrupt an in-progress build
public static long BUILD_TIMEOUT = 60*1000L; // 60 seconds
/* The amount of time to remember that a build has failed, to avoid retries */
public static long FAILURE_TIMEOUT = 5*60*1000L; // 5 minutes
public ArbitraryDataBuildQueueItem(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) {
super(resourceId, resourceIdType, service, identifier);
this.creationTimestamp = NTP.getTime();
}
public void build() throws IOException, DataException, MissingDataException {
Long now = NTP.getTime();
if (now == null) {
throw new DataException("NTP time hasn't synced yet");
}
this.buildStartTimestamp = now;
ArbitraryDataReader arbitraryDataReader =
new ArbitraryDataReader(this.resourceId, this.resourceIdType, this.service, this.identifier);
try {
arbitraryDataReader.loadSynchronously(true);
} finally {
this.buildEndTimestamp = NTP.getTime();
}
}
public boolean isBuilding() {
return this.buildStartTimestamp != null;
}
public boolean isQueued() {
return this.buildStartTimestamp == null;
}
public boolean hasReachedBuildTimeout(Long now) {
if (now == null || this.creationTimestamp == null) {
return true;
}
return now - this.creationTimestamp > BUILD_TIMEOUT;
}
public boolean hasReachedFailureTimeout(Long now) {
if (now == null || this.buildStartTimestamp == null) {
return true;
}
return now - this.buildStartTimestamp > FAILURE_TIMEOUT;
}
public Long getBuildStartTimestamp() {
return this.buildStartTimestamp;
}
public void setFailed(boolean failed) {
this.failed = failed;
}
}

View File

@ -0,0 +1,280 @@
package org.qortal.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataCache;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.Method;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class ArbitraryDataBuilder {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataBuilder.class);
private final String name;
private final Service service;
private final String identifier;
private boolean canRequestMissingFiles;
private List<ArbitraryTransactionData> transactions;
private ArbitraryTransactionData latestPutTransaction;
private final List<Path> paths;
private byte[] latestSignature;
private Path finalPath;
private int layerCount;
public ArbitraryDataBuilder(String name, Service service, String identifier) {
this.name = name;
this.service = service;
this.identifier = identifier;
this.paths = new ArrayList<>();
// By default we can request missing files
// Callers can use setCanRequestMissingFiles(false) to prevent it
this.canRequestMissingFiles = true;
}
/**
* Process transactions, but do not build anything
* This is useful for checking the status of a given resource
*
* @throws DataException
* @throws IOException
* @throws MissingDataException
*/
public void process() throws DataException, IOException, MissingDataException {
this.fetchTransactions();
this.validateTransactions();
this.processTransactions();
this.validatePaths();
this.findLatestSignature();
}
/**
* Build the latest state of a given resource
*
* @throws DataException
* @throws IOException
* @throws MissingDataException
*/
public void build() throws DataException, IOException, MissingDataException {
this.process();
this.buildLatestState();
this.cacheLatestSignature();
}
private void fetchTransactions() throws DataException {
try (final Repository repository = RepositoryManager.getRepository()) {
// Get the most recent PUT
ArbitraryTransactionData latestPut = repository.getArbitraryRepository()
.getLatestTransaction(this.name, this.service, Method.PUT, this.identifier);
if (latestPut == null) {
String message = String.format("Couldn't find PUT transaction for name %s, service %s and identifier %s",
this.name, this.service, this.identifierString());
throw new DataException(message);
}
this.latestPutTransaction = latestPut;
// Load all transactions since the latest PUT
List<ArbitraryTransactionData> transactionDataList = repository.getArbitraryRepository()
.getArbitraryTransactions(this.name, this.service, this.identifier, latestPut.getTimestamp());
this.transactions = transactionDataList;
this.layerCount = transactionDataList.size();
}
}
private void validateTransactions() throws DataException {
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
ArbitraryTransactionData latestPut = this.latestPutTransaction;
if (latestPut == null) {
throw new DataException("Cannot PATCH without existing PUT. Deploy using PUT first.");
}
if (latestPut.getMethod() != Method.PUT) {
throw new DataException("Expected PUT but received PATCH");
}
if (transactionDataList.size() == 0) {
throw new DataException(String.format("No transactions found for name %s, service %s, " +
"identifier: %s, since %d", name, service, this.identifierString(), latestPut.getTimestamp()));
}
// Verify that the signature of the first transaction matches the latest PUT
ArbitraryTransactionData firstTransaction = transactionDataList.get(0);
if (!Arrays.equals(firstTransaction.getSignature(), latestPut.getSignature())) {
throw new DataException("First transaction did not match latest PUT transaction");
}
// Remove the first transaction, as it should be the only PUT
transactionDataList.remove(0);
for (ArbitraryTransactionData transactionData : transactionDataList) {
if (transactionData == null) {
throw new DataException("Transaction not found");
}
if (transactionData.getMethod() != Method.PATCH) {
throw new DataException("Expected PATCH but received PUT");
}
}
}
private void processTransactions() throws IOException, DataException, MissingDataException {
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
int count = 0;
for (ArbitraryTransactionData transactionData : transactionDataList) {
LOGGER.trace("Found arbitrary transaction {}", Base58.encode(transactionData.getSignature()));
count++;
// Build the data file, overwriting anything that was previously there
String sig58 = Base58.encode(transactionData.getSignature());
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(sig58, ResourceIdType.TRANSACTION_DATA,
this.service, this.identifier);
arbitraryDataReader.setTransactionData(transactionData);
arbitraryDataReader.setCanRequestMissingFiles(this.canRequestMissingFiles);
boolean hasMissingData = false;
try {
arbitraryDataReader.loadSynchronously(true);
}
catch (MissingDataException e) {
hasMissingData = true;
}
// Handle missing data
if (hasMissingData) {
if (!this.canRequestMissingFiles) {
throw new MissingDataException("Files are missing but were not requested.");
}
if (count == transactionDataList.size()) {
// This is the final transaction in the list, so we need to fail
throw new MissingDataException("Requesting missing files. Please wait and try again.");
}
// There are more transactions, so we should process them to give them the opportunity to request data
continue;
}
// By this point we should have all data needed to build the layers
Path path = arbitraryDataReader.getFilePath();
if (path == null) {
throw new DataException(String.format("Null path when building data from transaction %s", sig58));
}
if (!Files.exists(path)) {
throw new DataException(String.format("Path doesn't exist when building data from transaction %s", sig58));
}
paths.add(path);
}
}
private void findLatestSignature() throws DataException {
if (this.transactions.size() == 0) {
throw new DataException("Unable to find latest signature from empty transaction list");
}
// Find the latest signature
ArbitraryTransactionData latestTransaction = this.transactions.get(this.transactions.size() - 1);
if (latestTransaction == null) {
throw new DataException("Unable to find latest signature from null transaction");
}
this.latestSignature = latestTransaction.getSignature();
}
private void validatePaths() throws DataException {
if (this.paths.isEmpty()) {
throw new DataException("No paths available from which to build latest state");
}
}
private void buildLatestState() throws IOException, DataException {
if (this.paths.size() == 1) {
// No patching needed
this.finalPath = this.paths.get(0);
return;
}
Path pathBefore = this.paths.get(0);
boolean validateAllLayers = Settings.getInstance().shouldValidateAllDataLayers();
// Loop from the second path onwards
for (int i=1; i<paths.size(); i++) {
String identifierPrefix = this.identifier != null ? String.format("[%s]", this.identifier) : "";
LOGGER.debug(String.format("[%s][%s]%s Applying layer %d...", this.service, this.name, identifierPrefix, i));
// Create an instance of ArbitraryDataCombiner
Path pathAfter = this.paths.get(i);
byte[] signatureBefore = this.transactions.get(i-1).getSignature();
ArbitraryDataCombiner combiner = new ArbitraryDataCombiner(pathBefore, pathAfter, signatureBefore);
// We only want to validate this layer's hash if it's the final layer, or if the settings
// indicate that we should validate interim layers too
boolean isFinalLayer = (i == paths.size() - 1);
combiner.setShouldValidateHashes(isFinalLayer || validateAllLayers);
// Now combine this layer with the last, and set the output path to the "before" path for the next cycle
combiner.combine();
combiner.cleanup();
pathBefore = combiner.getFinalPath();
}
this.finalPath = pathBefore;
}
private void cacheLatestSignature() throws IOException, DataException {
byte[] latestTransactionSignature = this.transactions.get(this.transactions.size()-1).getSignature();
if (latestTransactionSignature == null) {
throw new DataException("Missing latest transaction signature");
}
Long now = NTP.getTime();
if (now == null) {
throw new DataException("NTP time not synced yet");
}
ArbitraryDataMetadataCache cache = new ArbitraryDataMetadataCache(this.finalPath);
cache.setSignature(latestTransactionSignature);
cache.setTimestamp(NTP.getTime());
cache.write();
}
private String identifierString() {
return identifier != null ? identifier : "";
}
public Path getFinalPath() {
return this.finalPath;
}
public byte[] getLatestSignature() {
return this.latestSignature;
}
public int getLayerCount() {
return this.layerCount;
}
/**
* Use the below setter to ensure that we only read existing
* data without requesting any missing files,
*
* @param canRequestMissingFiles
*/
public void setCanRequestMissingFiles(boolean canRequestMissingFiles) {
this.canRequestMissingFiles = canRequestMissingFiles;
}
}

View File

@ -0,0 +1,166 @@
package org.qortal.arbitrary;
import org.qortal.arbitrary.ArbitraryDataFile.*;
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataCache;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.utils.FilesystemUtils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
public class ArbitraryDataCache {
private final boolean overwrite;
private final Path filePath;
private final String resourceId;
private final ResourceIdType resourceIdType;
private final Service service;
private final String identifier;
public ArbitraryDataCache(Path filePath, boolean overwrite, String resourceId,
ResourceIdType resourceIdType, Service service, String identifier) {
this.filePath = filePath;
this.overwrite = overwrite;
this.resourceId = resourceId;
this.resourceIdType = resourceIdType;
this.service = service;
this.identifier = identifier;
}
public boolean isCachedDataAvailable() {
return !this.shouldInvalidate();
}
public boolean shouldInvalidate() {
try {
// If the user has requested an overwrite, always invalidate the cache
if (this.overwrite) {
return true;
}
// Overwrite is false, but we still need to invalidate if no files exist
if (!Files.exists(this.filePath) || FilesystemUtils.isDirectoryEmpty(this.filePath)) {
return true;
}
// We might want to overwrite anyway, if an updated version is available
if (this.shouldInvalidateResource()) {
return true;
}
} catch (IOException e) {
// Something went wrong, so invalidate the cache just in case
return true;
}
// No need to invalidate the cache
return false;
}
private boolean shouldInvalidateResource() {
switch (this.resourceIdType) {
case NAME:
return this.shouldInvalidateName();
default:
// Other resource ID types remain constant, so no need to invalidate
return false;
}
}
private boolean shouldInvalidateName() {
// To avoid spamming the database too often, we shouldn't check sigs or invalidate when rate limited
if (this.rateLimitInEffect()) {
return false;
}
// If the state's sig doesn't match the latest transaction's sig, we need to invalidate
// This means that an updated layer is available
if (this.shouldInvalidateDueToSignatureMismatch()) {
// Add to the in-memory cache first, so that we won't check again for a while
ArbitraryDataManager.getInstance().addResourceToCache(this.getArbitraryDataResource());
return true;
}
return false;
}
/**
* rateLimitInEffect()
*
* When loading a website, we need to check the cache for every static asset loaded by the page.
* This would involve asking the database for the latest transaction every time.
* To reduce database load and page load times, we maintain an in-memory list to "rate limit" lookups.
* Once a resource ID is in this in-memory list, we will avoid cache invalidations until it
* has been present in the list for a certain amount of time.
* Items are automatically removed from the list when a new arbitrary transaction arrives, so this
* should not prevent updates from taking effect immediately.
*
* @return whether to avoid lookups for this resource due to the in-memory cache
*/
private boolean rateLimitInEffect() {
return ArbitraryDataManager.getInstance().isResourceCached(this.getArbitraryDataResource());
}
private boolean shouldInvalidateDueToSignatureMismatch() {
// Fetch the latest transaction for this name and service
byte[] latestTransactionSig = this.fetchLatestTransactionSignature();
// Now fetch the transaction signature stored in the cache metadata
byte[] cachedSig = this.fetchCachedSignature();
// If either are null, we should invalidate
if (latestTransactionSig == null || cachedSig == null) {
return true;
}
// Check if they match
return !Arrays.equals(latestTransactionSig, cachedSig);
}
private byte[] fetchLatestTransactionSignature() {
try (final Repository repository = RepositoryManager.getRepository()) {
// Find latest transaction for name and service, with any method
ArbitraryTransactionData latestTransaction = repository.getArbitraryRepository()
.getLatestTransaction(this.resourceId, this.service, null, this.identifier);
if (latestTransaction != null) {
return latestTransaction.getSignature();
}
} catch (DataException e) {
return null;
}
return null;
}
private byte[] fetchCachedSignature() {
try {
// Fetch the transaction signature stored in the cache metadata
ArbitraryDataMetadataCache cache = new ArbitraryDataMetadataCache(this.filePath);
cache.read();
return cache.getSignature();
} catch (IOException | DataException e) {
return null;
}
}
private ArbitraryDataResource getArbitraryDataResource() {
// TODO: pass an ArbitraryDataResource into the constructor, rather than individual components
return new ArbitraryDataResource(this.resourceId, this.resourceIdType, this.service, this.identifier);
}
}

View File

@ -0,0 +1,170 @@
package org.qortal.arbitrary;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import java.io.File;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.nio.file.DirectoryNotEmptyException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
public class ArbitraryDataCombiner {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataCombiner.class);
private final Path pathBefore;
private final Path pathAfter;
private final byte[] signatureBefore;
private boolean shouldValidateHashes;
private Path finalPath;
private ArbitraryDataMetadataPatch metadata;
public ArbitraryDataCombiner(Path pathBefore, Path pathAfter, byte[] signatureBefore) {
this.pathBefore = pathBefore;
this.pathAfter = pathAfter;
this.signatureBefore = signatureBefore;
}
public void combine() throws IOException, DataException {
try {
this.preExecute();
this.readMetadata();
this.validatePreviousSignature();
this.validatePreviousHash();
this.process();
this.validateCurrentHash();
} finally {
this.postExecute();
}
}
public void cleanup() {
this.cleanupPath(this.pathBefore);
this.cleanupPath(this.pathAfter);
}
private void cleanupPath(Path path) {
// Delete pathBefore, if it exists in our data/temp directory
if (FilesystemUtils.pathInsideDataOrTempPath(path)) {
File directory = new File(path.toString());
try {
FileUtils.deleteDirectory(directory);
} catch (IOException e) {
// This will eventually be cleaned up by a maintenance process, so log the error and continue
LOGGER.debug("Unable to cleanup directory {}", directory.toString());
}
}
// Delete the parent directory of pathBefore if it is empty (and exists in our data/temp directory)
Path parentDirectory = path.getParent();
if (FilesystemUtils.pathInsideDataOrTempPath(parentDirectory)) {
try {
Files.deleteIfExists(parentDirectory);
} catch (DirectoryNotEmptyException e) {
// No need to log anything
} catch (IOException e) {
// This will eventually be cleaned up by a maintenance process, so log the error and continue
LOGGER.debug("Unable to cleanup parent directory {}", parentDirectory.toString());
}
}
}
private void preExecute() throws DataException {
if (this.pathBefore == null || this.pathAfter == null) {
throw new DataException("No paths available to build patch");
}
if (!Files.exists(this.pathBefore) || !Files.exists(this.pathAfter)) {
throw new DataException("Unable to create patch because at least one path doesn't exist");
}
}
private void postExecute() {
}
private void readMetadata() throws IOException, DataException {
this.metadata = new ArbitraryDataMetadataPatch(this.pathAfter);
this.metadata.read();
}
private void validatePreviousSignature() throws DataException {
if (this.signatureBefore == null) {
throw new DataException("No previous signature passed to the combiner");
}
byte[] previousSignature = this.metadata.getPreviousSignature();
if (previousSignature == null) {
throw new DataException("Unable to extract previous signature from patch metadata");
}
// Compare the signatures
if (!Arrays.equals(previousSignature, this.signatureBefore)) {
throw new DataException("Previous signatures do not match - transactions out of order?");
}
}
private void validatePreviousHash() throws IOException, DataException {
if (!Settings.getInstance().shouldValidateAllDataLayers()) {
return;
}
byte[] previousHash = this.metadata.getPreviousHash();
if (previousHash == null) {
throw new DataException("Unable to extract previous hash from patch metadata");
}
ArbitraryDataDigest digest = new ArbitraryDataDigest(this.pathBefore);
digest.compute();
boolean valid = digest.isHashValid(previousHash);
if (!valid) {
String previousHash58 = Base58.encode(previousHash);
throw new InvalidObjectException(String.format("Previous state hash mismatch. " +
"Patch prevHash: %s, actual: %s", previousHash58, digest.getHash58()));
}
}
private void process() throws IOException, DataException {
ArbitraryDataMerge merge = new ArbitraryDataMerge(this.pathBefore, this.pathAfter);
merge.compute();
this.finalPath = merge.getMergePath();
}
private void validateCurrentHash() throws IOException, DataException {
if (!this.shouldValidateHashes) {
return;
}
byte[] currentHash = this.metadata.getCurrentHash();
if (currentHash == null) {
throw new DataException("Unable to extract current hash from patch metadata");
}
ArbitraryDataDigest digest = new ArbitraryDataDigest(this.finalPath);
digest.compute();
boolean valid = digest.isHashValid(currentHash);
if (!valid) {
String currentHash58 = Base58.encode(currentHash);
throw new InvalidObjectException(String.format("Current state hash mismatch. " +
"Patch curHash: %s, actual: %s", currentHash58, digest.getHash58()));
}
}
public void setShouldValidateHashes(boolean shouldValidateHashes) {
this.shouldValidateHashes = shouldValidateHashes;
}
public Path getFinalPath() {
return this.finalPath;
}
}

View File

@ -0,0 +1,141 @@
package org.qortal.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.utils.FilesystemUtils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.UUID;
public class ArbitraryDataCreatePatch {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataCreatePatch.class);
private final Path pathBefore;
private Path pathAfter;
private final byte[] previousSignature;
private Path finalPath;
private int totalFileCount;
private int fileDifferencesCount;
private ArbitraryDataMetadataPatch metadata;
private Path workingPath;
private String identifier;
public ArbitraryDataCreatePatch(Path pathBefore, Path pathAfter, byte[] previousSignature) {
this.pathBefore = pathBefore;
this.pathAfter = pathAfter;
this.previousSignature = previousSignature;
}
public void create() throws DataException, IOException {
try {
this.preExecute();
this.copyFiles();
this.process();
} catch (Exception e) {
this.cleanupOnFailure();
throw e;
} finally {
this.postExecute();
}
}
private void preExecute() throws DataException {
if (this.pathBefore == null || this.pathAfter == null) {
throw new DataException("No paths available to build patch");
}
if (!Files.exists(this.pathBefore) || !Files.exists(this.pathAfter)) {
throw new DataException("Unable to create patch because at least one path doesn't exist");
}
this.createRandomIdentifier();
this.createWorkingDirectory();
}
private void postExecute() {
this.cleanupWorkingPath();
}
private void cleanupWorkingPath() {
try {
FilesystemUtils.safeDeleteDirectory(this.workingPath, true);
} catch (IOException e) {
LOGGER.debug("Unable to cleanup working directory");
}
}
private void cleanupOnFailure() {
try {
FilesystemUtils.safeDeleteDirectory(this.finalPath, true);
} catch (IOException e) {
LOGGER.debug("Unable to cleanup diff directory on failure");
}
}
private void createRandomIdentifier() {
this.identifier = UUID.randomUUID().toString();
}
private void createWorkingDirectory() throws DataException {
// Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware
String baseDir = Settings.getInstance().getTempDataPath();
Path tempDir = Paths.get(baseDir, "patch", this.identifier);
try {
Files.createDirectories(tempDir);
} catch (IOException e) {
throw new DataException("Unable to create temp directory");
}
this.workingPath = tempDir;
}
private void copyFiles() throws IOException {
// When dealing with single files, we need to copy them to a container directory
// in order for the structure to align with the previous revision and therefore
// make comparisons possible.
if (this.pathAfter.toFile().isFile()) {
// Create a "data" directory within the working directory
Path workingDataPath = Paths.get(this.workingPath.toString(), "data");
Files.createDirectories(workingDataPath);
// Copy to temp directory
// Filename is currently hardcoded to "data"
String filename = "data"; //this.pathAfter.getFileName().toString();
Files.copy(this.pathAfter, Paths.get(workingDataPath.toString(), filename));
// Update pathAfter to point to the new path
this.pathAfter = workingDataPath;
}
}
private void process() throws IOException, DataException {
ArbitraryDataDiff diff = new ArbitraryDataDiff(this.pathBefore, this.pathAfter, this.previousSignature);
this.finalPath = diff.getDiffPath();
diff.compute();
this.totalFileCount = diff.getTotalFileCount();
this.metadata = diff.getMetadata();
}
public Path getFinalPath() {
return this.finalPath;
}
public int getTotalFileCount() {
return this.totalFileCount;
}
public ArbitraryDataMetadataPatch getMetadata() {
return this.metadata;
}
}

View File

@ -0,0 +1,383 @@
package org.qortal.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.json.JSONObject;
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch;
import org.qortal.arbitrary.patch.UnifiedDiffPatch;
import org.qortal.crypto.Crypto;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import java.io.*;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.*;
public class ArbitraryDataDiff {
/** Only create a patch if both the before and after file sizes are within defined limit **/
private static final long MAX_DIFF_FILE_SIZE = 100 * 1024L; // 100kiB
public enum DiffType {
COMPLETE_FILE,
UNIFIED_DIFF
}
public static class ModifiedPath {
private Path path;
private DiffType diffType;
public ModifiedPath(Path path, DiffType diffType) {
this.path = path;
this.diffType = diffType;
}
public ModifiedPath(JSONObject jsonObject) {
String pathString = jsonObject.getString("path");
if (pathString != null) {
this.path = Paths.get(pathString);
}
String diffTypeString = jsonObject.getString("type");
if (diffTypeString != null) {
this.diffType = DiffType.valueOf(diffTypeString);
}
}
public Path getPath() {
return this.path;
}
public DiffType getDiffType() {
return this.diffType;
}
public String toString() {
return this.path.toString();
}
}
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataDiff.class);
private final Path pathBefore;
private final Path pathAfter;
private final byte[] previousSignature;
private byte[] previousHash;
private byte[] currentHash;
private Path diffPath;
private String identifier;
private final List<Path> addedPaths;
private final List<ModifiedPath> modifiedPaths;
private final List<Path> removedPaths;
private int totalFileCount;
private ArbitraryDataMetadataPatch metadata;
public ArbitraryDataDiff(Path pathBefore, Path pathAfter, byte[] previousSignature) throws DataException {
this.pathBefore = pathBefore;
this.pathAfter = pathAfter;
this.previousSignature = previousSignature;
this.addedPaths = new ArrayList<>();
this.modifiedPaths = new ArrayList<>();
this.removedPaths = new ArrayList<>();
this.createRandomIdentifier();
this.createOutputDirectory();
}
public void compute() throws IOException, DataException {
try {
this.preExecute();
this.hashPreviousState();
this.findAddedOrModifiedFiles();
this.findRemovedFiles();
this.validate();
this.hashCurrentState();
this.writeMetadata();
} finally {
this.postExecute();
}
}
private void preExecute() {
LOGGER.debug("Generating diff...");
}
private void postExecute() {
}
private void createRandomIdentifier() {
this.identifier = UUID.randomUUID().toString();
}
private void createOutputDirectory() throws DataException {
// Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware
String baseDir = Settings.getInstance().getTempDataPath();
Path tempDir = Paths.get(baseDir, "diff", this.identifier);
try {
Files.createDirectories(tempDir);
} catch (IOException e) {
throw new DataException("Unable to create temp directory");
}
this.diffPath = tempDir;
}
private void hashPreviousState() throws IOException, DataException {
ArbitraryDataDigest digest = new ArbitraryDataDigest(this.pathBefore);
digest.compute();
this.previousHash = digest.getHash();
}
private void findAddedOrModifiedFiles() throws IOException {
try {
final Path pathBeforeAbsolute = this.pathBefore.toAbsolutePath();
final Path pathAfterAbsolute = this.pathAfter.toAbsolutePath();
final Path diffPathAbsolute = this.diffPath.toAbsolutePath();
final ArbitraryDataDiff diff = this;
// Check for additions or modifications
Files.walkFileTree(this.pathAfter, new FileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path after, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path afterPathAbsolute, BasicFileAttributes attrs) throws IOException {
Path afterPathRelative = pathAfterAbsolute.relativize(afterPathAbsolute.toAbsolutePath());
Path beforePathAbsolute = pathBeforeAbsolute.resolve(afterPathRelative);
if (afterPathRelative.startsWith(".qortal")) {
// Ignore the .qortal metadata folder
return FileVisitResult.CONTINUE;
}
boolean wasAdded = false;
boolean wasModified = false;
if (!Files.exists(beforePathAbsolute)) {
LOGGER.trace("File was added: {}", afterPathRelative.toString());
diff.addedPaths.add(afterPathRelative);
wasAdded = true;
}
else if (Files.size(afterPathAbsolute) != Files.size(beforePathAbsolute)) {
// Check file size first because it's quicker
LOGGER.trace("File size was modified: {}", afterPathRelative.toString());
wasModified = true;
}
else if (!Arrays.equals(ArbitraryDataDiff.digestFromPath(afterPathAbsolute), ArbitraryDataDiff.digestFromPath(beforePathAbsolute))) {
// Check hashes as a last resort
LOGGER.trace("File contents were modified: {}", afterPathRelative.toString());
wasModified = true;
}
if (wasAdded) {
diff.copyFilePathToBaseDir(afterPathAbsolute, diffPathAbsolute, afterPathRelative);
}
if (wasModified) {
try {
diff.pathModified(beforePathAbsolute, afterPathAbsolute, afterPathRelative, diffPathAbsolute);
} catch (DataException e) {
// We can only throw IOExceptions because we are overriding FileVisitor.visitFile()
throw new IOException(e);
}
}
// Keep a tally of the total number of files to help with decision making
diff.totalFileCount++;
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException e){
LOGGER.info("File visit failed: {}, error: {}", file.toString(), e.getMessage());
// TODO: throw exception?
return FileVisitResult.TERMINATE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException e) {
return FileVisitResult.CONTINUE;
}
});
} catch (IOException e) {
LOGGER.info("IOException when walking through file tree: {}", e.getMessage());
throw(e);
}
}
private void findRemovedFiles() throws IOException {
try {
final Path pathBeforeAbsolute = this.pathBefore.toAbsolutePath();
final Path pathAfterAbsolute = this.pathAfter.toAbsolutePath();
final ArbitraryDataDiff diff = this;
// Check for removals
Files.walkFileTree(this.pathBefore, new FileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path before, BasicFileAttributes attrs) {
Path directoryPathBefore = pathBeforeAbsolute.relativize(before.toAbsolutePath());
Path directoryPathAfter = pathAfterAbsolute.resolve(directoryPathBefore);
if (directoryPathBefore.startsWith(".qortal")) {
// Ignore the .qortal metadata folder
return FileVisitResult.CONTINUE;
}
if (!Files.exists(directoryPathAfter)) {
LOGGER.trace("Directory was removed: {}", directoryPathAfter.toString());
diff.removedPaths.add(directoryPathBefore);
// TODO: we might need to mark directories differently to files
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path before, BasicFileAttributes attrs) {
Path filePathBefore = pathBeforeAbsolute.relativize(before.toAbsolutePath());
Path filePathAfter = pathAfterAbsolute.resolve(filePathBefore);
if (filePathBefore.startsWith(".qortal")) {
// Ignore the .qortal metadata folder
return FileVisitResult.CONTINUE;
}
if (!Files.exists(filePathAfter)) {
LOGGER.trace("File was removed: {}", filePathBefore.toString());
diff.removedPaths.add(filePathBefore);
}
// Keep a tally of the total number of files to help with decision making
diff.totalFileCount++;
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException e){
LOGGER.info("File visit failed: {}, error: {}", file.toString(), e.getMessage());
// TODO: throw exception?
return FileVisitResult.TERMINATE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException e) {
return FileVisitResult.CONTINUE;
}
});
} catch (IOException e) {
throw new IOException(String.format("IOException when walking through file tree: %s", e.getMessage()));
}
}
private void validate() throws DataException {
if (this.addedPaths.isEmpty() && this.modifiedPaths.isEmpty() && this.removedPaths.isEmpty()) {
throw new DataException("Current state matches previous state. Nothing to do.");
}
}
private void hashCurrentState() throws IOException, DataException {
ArbitraryDataDigest digest = new ArbitraryDataDigest(this.pathAfter);
digest.compute();
this.currentHash = digest.getHash();
}
private void writeMetadata() throws IOException, DataException {
ArbitraryDataMetadataPatch metadata = new ArbitraryDataMetadataPatch(this.diffPath);
metadata.setAddedPaths(this.addedPaths);
metadata.setModifiedPaths(this.modifiedPaths);
metadata.setRemovedPaths(this.removedPaths);
metadata.setPreviousSignature(this.previousSignature);
metadata.setPreviousHash(this.previousHash);
metadata.setCurrentHash(this.currentHash);
metadata.write();
this.metadata = metadata;
}
private void pathModified(Path beforePathAbsolute, Path afterPathAbsolute, Path afterPathRelative,
Path destinationBasePathAbsolute) throws IOException, DataException {
Path destination = Paths.get(destinationBasePathAbsolute.toString(), afterPathRelative.toString());
long beforeSize = Files.size(beforePathAbsolute);
long afterSize = Files.size(afterPathAbsolute);
DiffType diffType;
if (beforeSize > MAX_DIFF_FILE_SIZE || afterSize > MAX_DIFF_FILE_SIZE) {
// Files are large, so don't attempt a diff
this.copyFilePathToBaseDir(afterPathAbsolute, destinationBasePathAbsolute, afterPathRelative);
diffType = DiffType.COMPLETE_FILE;
}
else {
// Attempt to create patch using java-diff-utils
UnifiedDiffPatch unifiedDiffPatch = new UnifiedDiffPatch(beforePathAbsolute, afterPathAbsolute, destination);
unifiedDiffPatch.create();
if (unifiedDiffPatch.isValid()) {
diffType = DiffType.UNIFIED_DIFF;
}
else {
// Diff failed validation, so copy the whole file instead
this.copyFilePathToBaseDir(afterPathAbsolute, destinationBasePathAbsolute, afterPathRelative);
diffType = DiffType.COMPLETE_FILE;
}
}
ModifiedPath modifiedPath = new ModifiedPath(afterPathRelative, diffType);
this.modifiedPaths.add(modifiedPath);
}
private void copyFilePathToBaseDir(Path source, Path base, Path relativePath) throws IOException {
if (!Files.exists(source)) {
throw new IOException(String.format("File not found: %s", source.toString()));
}
// Ensure parent folders exist in the destination
Path dest = Paths.get(base.toString(), relativePath.toString());
File file = new File(dest.toString());
File parent = file.getParentFile();
if (parent != null) {
parent.mkdirs();
}
LOGGER.trace("Copying {} to {}", source, dest);
Files.copy(source, dest, StandardCopyOption.REPLACE_EXISTING);
}
public Path getDiffPath() {
return this.diffPath;
}
public int getTotalFileCount() {
return this.totalFileCount;
}
public ArbitraryDataMetadataPatch getMetadata() {
return this.metadata;
}
// Utils
private static byte[] digestFromPath(Path path) {
try {
return Crypto.digest(path.toFile());
} catch (IOException e) {
return null;
}
}
}

View File

@ -0,0 +1,73 @@
package org.qortal.arbitrary;
import org.qortal.repository.DataException;
import org.qortal.utils.Base58;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
public class ArbitraryDataDigest {
private final Path path;
private byte[] hash;
public ArbitraryDataDigest(Path path) {
this.path = path;
}
public void compute() throws IOException, DataException {
List<Path> allPaths = Files.walk(path).filter(Files::isRegularFile).sorted().collect(Collectors.toList());
Path basePathAbsolute = this.path.toAbsolutePath();
MessageDigest sha256;
try {
sha256 = MessageDigest.getInstance("SHA-256");
} catch (NoSuchAlgorithmException e) {
throw new DataException("SHA-256 hashing algorithm unavailable");
}
for (Path path : allPaths) {
// We need to work with paths relative to the base path, to ensure the same hash
// is generated on different systems
Path relativePath = basePathAbsolute.relativize(path.toAbsolutePath());
// Exclude Qortal folder since it can be different each time
// We only care about hashing the actual user data
if (relativePath.startsWith(".qortal/")) {
continue;
}
// Hash path
byte[] filePathBytes = relativePath.toString().getBytes(StandardCharsets.UTF_8);
sha256.update(filePathBytes);
// Hash contents
byte[] fileContent = Files.readAllBytes(path);
sha256.update(fileContent);
}
this.hash = sha256.digest();
}
public boolean isHashValid(byte[] hash) {
return Arrays.equals(hash, this.hash);
}
public byte[] getHash() {
return this.hash;
}
public String getHash58() {
if (this.hash == null) {
return null;
}
return Base58.encode(this.hash);
}
}

View File

@ -0,0 +1,735 @@
package org.qortal.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.crypto.Crypto;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import java.io.*;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.*;
import java.util.stream.Stream;
import static java.util.Arrays.stream;
import static java.util.stream.Collectors.toMap;
public class ArbitraryDataFile {
// Validation results
public enum ValidationResult {
OK(1),
FILE_TOO_LARGE(10),
FILE_NOT_FOUND(11);
public final int value;
private static final Map<Integer, ArbitraryDataFile.ValidationResult> map = stream(ArbitraryDataFile.ValidationResult.values()).collect(toMap(result -> result.value, result -> result));
ValidationResult(int value) {
this.value = value;
}
public static ArbitraryDataFile.ValidationResult valueOf(int value) {
return map.get(value);
}
}
// Resource ID types
public enum ResourceIdType {
SIGNATURE,
FILE_HASH,
TRANSACTION_DATA,
NAME
}
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFile.class);
public static final long MAX_FILE_SIZE = 500 * 1024 * 1024; // 500MiB
public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1MiB
public static int SHORT_DIGEST_LENGTH = 8;
protected Path filePath;
protected String hash58;
protected byte[] signature;
private ArrayList<ArbitraryDataFileChunk> chunks;
private byte[] secret;
// Metadata
private byte[] metadataHash;
private ArbitraryDataFile metadataFile;
private ArbitraryDataTransactionMetadata metadata;
public ArbitraryDataFile() {
}
public ArbitraryDataFile(String hash58, byte[] signature) throws DataException {
this.createDataDirectory();
this.filePath = ArbitraryDataFile.getOutputFilePath(hash58, signature, false);
this.chunks = new ArrayList<>();
this.hash58 = hash58;
this.signature = signature;
}
public ArbitraryDataFile(byte[] fileContent, byte[] signature) throws DataException {
if (fileContent == null) {
LOGGER.error("fileContent is null");
return;
}
this.hash58 = Base58.encode(Crypto.digest(fileContent));
this.signature = signature;
LOGGER.trace(String.format("File digest: %s, size: %d bytes", this.hash58, fileContent.length));
Path outputFilePath = getOutputFilePath(this.hash58, signature, true);
File outputFile = outputFilePath.toFile();
try (FileOutputStream outputStream = new FileOutputStream(outputFile)) {
outputStream.write(fileContent);
this.filePath = outputFilePath;
// Verify hash
if (!this.hash58.equals(this.digest58())) {
LOGGER.error("Hash {} does not match file digest {}", this.hash58, this.digest58());
this.delete();
throw new DataException("Data file digest validation failed");
}
} catch (IOException e) {
throw new DataException("Unable to write data to file");
}
}
public static ArbitraryDataFile fromHash58(String hash58, byte[] signature) throws DataException {
return new ArbitraryDataFile(hash58, signature);
}
public static ArbitraryDataFile fromHash(byte[] hash, byte[] signature) throws DataException {
return ArbitraryDataFile.fromHash58(Base58.encode(hash), signature);
}
public static ArbitraryDataFile fromPath(Path path, byte[] signature) {
if (path == null) {
return null;
}
File file = path.toFile();
if (file.exists()) {
try {
byte[] digest = Crypto.digest(file);
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
// Copy file to data directory if needed
if (Files.exists(path) && !arbitraryDataFile.isInBaseDirectory(path)) {
arbitraryDataFile.copyToDataDirectory(path, signature);
}
// Or, if it's already in the data directory, we may need to move it
else if (!path.equals(arbitraryDataFile.getFilePath())) {
// Wrong path, so relocate (but don't cleanup, as the source folder may still be needed by the caller)
Path dest = arbitraryDataFile.getFilePath();
FilesystemUtils.moveFile(path, dest, false);
}
return arbitraryDataFile;
} catch (IOException | DataException e) {
LOGGER.error("Couldn't compute digest for ArbitraryDataFile");
}
}
return null;
}
public static ArbitraryDataFile fromFile(File file, byte[] signature) {
return ArbitraryDataFile.fromPath(Paths.get(file.getPath()), signature);
}
private boolean createDataDirectory() {
// Create the data directory if it doesn't exist
String dataPath = Settings.getInstance().getDataPath();
Path dataDirectory = Paths.get(dataPath);
try {
Files.createDirectories(dataDirectory);
} catch (IOException e) {
LOGGER.error("Unable to create data directory");
return false;
}
return true;
}
private Path copyToDataDirectory(Path sourcePath, byte[] signature) throws DataException {
if (this.hash58 == null || this.filePath == null) {
return null;
}
Path outputFilePath = getOutputFilePath(this.hash58, signature, true);
sourcePath = sourcePath.toAbsolutePath();
Path destPath = outputFilePath.toAbsolutePath();
try {
return Files.copy(sourcePath, destPath, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
throw new DataException(String.format("Unable to copy file %s to data directory %s", sourcePath, destPath));
}
}
public static Path getOutputFilePath(String hash58, byte[] signature, boolean createDirectories) throws DataException {
Path directory;
if (hash58 == null) {
return null;
}
if (signature != null) {
// Key by signature
String signature58 = Base58.encode(signature);
String sig58First2Chars = signature58.substring(0, 2).toLowerCase();
String sig58Next2Chars = signature58.substring(2, 4).toLowerCase();
directory = Paths.get(Settings.getInstance().getDataPath(), sig58First2Chars, sig58Next2Chars, signature58);
}
else {
// Put files without signatures in a "_misc" directory, and the files will be relocated later
String hash58First2Chars = hash58.substring(0, 2).toLowerCase();
String hash58Next2Chars = hash58.substring(2, 4).toLowerCase();
directory = Paths.get(Settings.getInstance().getDataPath(), "_misc", hash58First2Chars, hash58Next2Chars);
}
if (createDirectories) {
try {
Files.createDirectories(directory);
} catch (IOException e) {
throw new DataException("Unable to create data subdirectory");
}
}
return Paths.get(directory.toString(), hash58);
}
public ValidationResult isValid() {
try {
// Ensure the file exists on disk
if (!Files.exists(this.filePath)) {
LOGGER.error("File doesn't exist at path {}", this.filePath);
return ValidationResult.FILE_NOT_FOUND;
}
// Validate the file size
long fileSize = Files.size(this.filePath);
if (fileSize > MAX_FILE_SIZE) {
LOGGER.error(String.format("ArbitraryDataFile is too large: %d bytes (max size: %d bytes)", fileSize, MAX_FILE_SIZE));
return ArbitraryDataFile.ValidationResult.FILE_TOO_LARGE;
}
} catch (IOException e) {
return ValidationResult.FILE_NOT_FOUND;
}
return ValidationResult.OK;
}
public void validateFileSize(long expectedSize) throws DataException {
// Verify that we can determine the file's size
long fileSize = 0;
try {
fileSize = Files.size(this.getFilePath());
} catch (IOException e) {
throw new DataException(String.format("Couldn't get file size for transaction %s", Base58.encode(signature)));
}
// Ensure the file's size matches the size reported by the transaction
if (fileSize != expectedSize) {
throw new DataException(String.format("File size mismatch for transaction %s", Base58.encode(signature)));
}
}
private void addChunk(ArbitraryDataFileChunk chunk) {
this.chunks.add(chunk);
}
private void addChunkHashes(List<byte[]> chunkHashes) throws DataException {
if (chunkHashes == null || chunkHashes.isEmpty()) {
return;
}
for (byte[] chunkHash : chunkHashes) {
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
this.addChunk(chunk);
}
}
public List<byte[]> getChunkHashes() {
List<byte[]> hashes = new ArrayList<>();
if (this.chunks == null || this.chunks.isEmpty()) {
return hashes;
}
for (ArbitraryDataFileChunk chunkData : this.chunks) {
hashes.add(chunkData.getHash());
}
return hashes;
}
public int split(int chunkSize) throws DataException {
try {
File file = this.getFile();
byte[] buffer = new byte[chunkSize];
this.chunks = new ArrayList<>();
if (file != null) {
try (FileInputStream fileInputStream = new FileInputStream(file);
BufferedInputStream bis = new BufferedInputStream(fileInputStream)) {
int numberOfBytes;
while ((numberOfBytes = bis.read(buffer)) > 0) {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
out.write(buffer, 0, numberOfBytes);
out.flush();
ArbitraryDataFileChunk chunk = new ArbitraryDataFileChunk(out.toByteArray(), this.signature);
ValidationResult validationResult = chunk.isValid();
if (validationResult == ValidationResult.OK) {
this.chunks.add(chunk);
} else {
throw new DataException(String.format("Chunk %s is invalid", chunk));
}
}
}
}
}
} catch (Exception e) {
throw new DataException("Unable to split file into chunks");
}
return this.chunks.size();
}
public boolean join() {
// Ensure we have chunks
if (this.chunks != null && this.chunks.size() > 0) {
// Create temporary path for joined file
// Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware
String baseDir = Settings.getInstance().getTempDataPath();
Path tempDir = Paths.get(baseDir, "join");
try {
Files.createDirectories(tempDir);
} catch (IOException e) {
return false;
}
// Join the chunks
Path outputPath = Paths.get(tempDir.toString(), this.chunks.get(0).digest58());
File outputFile = new File(outputPath.toString());
try (BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(outputFile))) {
for (ArbitraryDataFileChunk chunk : this.chunks) {
File sourceFile = chunk.filePath.toFile();
BufferedInputStream in = new BufferedInputStream(new FileInputStream(sourceFile));
byte[] buffer = new byte[2048];
int inSize;
while ((inSize = in.read(buffer)) != -1) {
out.write(buffer, 0, inSize);
}
in.close();
}
out.close();
// Copy temporary file to data directory
this.filePath = this.copyToDataDirectory(outputPath, this.signature);
if (FilesystemUtils.pathInsideDataOrTempPath(outputPath)) {
Files.delete(outputPath);
}
return true;
} catch (FileNotFoundException e) {
return false;
} catch (IOException | DataException e) {
return false;
}
}
return false;
}
public boolean delete() {
// Delete the complete file
// ... but only if it's inside the Qortal data or temp directory
if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) {
if (Files.exists(this.filePath)) {
try {
Files.delete(this.filePath);
this.cleanupFilesystem();
LOGGER.debug("Deleted file {}", this.filePath);
return true;
} catch (IOException e) {
LOGGER.warn("Couldn't delete file at path {}", this.filePath);
}
}
}
return false;
}
public boolean deleteAllChunks() {
boolean success = false;
// Delete the individual chunks
if (this.chunks != null && this.chunks.size() > 0) {
Iterator iterator = this.chunks.iterator();
while (iterator.hasNext()) {
ArbitraryDataFileChunk chunk = (ArbitraryDataFileChunk) iterator.next();
success = chunk.delete();
iterator.remove();
}
}
return success;
}
public boolean deleteMetadata() {
if (this.metadataFile != null && this.metadataFile.exists()) {
return this.metadataFile.delete();
}
return false;
}
public boolean deleteAll() {
// Delete the complete file
boolean fileDeleted = this.delete();
// Delete the metadata file
boolean metadataDeleted = this.deleteMetadata();
// Delete the individual chunks
boolean chunksDeleted = this.deleteAllChunks();
return fileDeleted || metadataDeleted || chunksDeleted;
}
protected void cleanupFilesystem() throws IOException {
// It is essential that use a separate path reference in this method
// as we don't want to modify this.filePath
Path path = this.filePath;
FilesystemUtils.safeDeleteEmptyParentDirectories(path);
}
public byte[] getBytes() {
try {
return Files.readAllBytes(this.filePath);
} catch (IOException e) {
LOGGER.error("Unable to read bytes for file");
return null;
}
}
/* Helper methods */
private boolean isInBaseDirectory(Path filePath) {
Path path = filePath.toAbsolutePath();
String dataPath = Settings.getInstance().getDataPath();
String basePath = Paths.get(dataPath).toAbsolutePath().toString();
return path.startsWith(basePath);
}
public boolean exists() {
File file = this.filePath.toFile();
return file.exists();
}
public boolean chunkExists(byte[] hash) {
for (ArbitraryDataFileChunk chunk : this.chunks) {
if (Arrays.equals(hash, chunk.getHash())) {
return chunk.exists();
}
}
if (Arrays.equals(this.getHash(), hash)) {
return this.exists();
}
return false;
}
public boolean allChunksExist() {
try {
if (this.metadataHash == null) {
// We don't have any metadata so can't check if we have the chunks
// Even if this transaction has no chunks, we don't have the file either (already checked above)
return false;
}
if (this.metadataFile == null) {
this.metadataFile = ArbitraryDataFile.fromHash(this.metadataHash, this.signature);
if (!metadataFile.exists()) {
return false;
}
}
// If the metadata file doesn't exist, we can't check if we have the chunks
if (!metadataFile.getFilePath().toFile().exists()) {
return false;
}
if (this.metadata == null) {
this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath()));
}
// Read the metadata
List<byte[]> chunks = metadata.getChunks();
for (byte[] chunkHash : chunks) {
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
if (!chunk.exists()) {
return false;
}
}
return true;
} catch (DataException e) {
// Something went wrong, so assume we don't have all the chunks
return false;
}
}
public boolean anyChunksExist() throws DataException {
try {
if (this.metadataHash == null) {
// We don't have any metadata so can't check if we have the chunks
// Even if this transaction has no chunks, we don't have the file either (already checked above)
return false;
}
if (this.metadataFile == null) {
this.metadataFile = ArbitraryDataFile.fromHash(this.metadataHash, this.signature);
if (!metadataFile.exists()) {
return false;
}
}
// If the metadata file doesn't exist, we can't check if we have any chunks
if (!metadataFile.getFilePath().toFile().exists()) {
return false;
}
if (this.metadata == null) {
this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath()));
}
// Read the metadata
List<byte[]> chunks = metadata.getChunks();
for (byte[] chunkHash : chunks) {
ArbitraryDataFileChunk chunk = ArbitraryDataFileChunk.fromHash(chunkHash, this.signature);
if (chunk.exists()) {
return true;
}
}
return false;
} catch (DataException e) {
// Something went wrong, so assume we don't have all the chunks
return false;
}
}
public boolean allFilesExist() {
if (this.exists()) {
return true;
}
// Complete file doesn't exist, so check the chunks
if (this.allChunksExist()) {
return true;
}
return false;
}
public boolean containsChunk(byte[] hash) {
for (ArbitraryDataFileChunk chunk : this.chunks) {
if (Arrays.equals(hash, chunk.getHash())) {
return true;
}
}
return false;
}
public long size() {
try {
return Files.size(this.filePath);
} catch (IOException e) {
return 0;
}
}
public int chunkCount() {
return this.chunks.size();
}
public List<ArbitraryDataFileChunk> getChunks() {
return this.chunks;
}
public byte[] chunkHashes() throws DataException {
if (this.chunks != null && this.chunks.size() > 0) {
// Return null if we only have one chunk, with the same hash as the parent
if (Arrays.equals(this.digest(), this.chunks.get(0).digest())) {
return null;
}
try {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (ArbitraryDataFileChunk chunk : this.chunks) {
byte[] chunkHash = chunk.digest();
if (chunkHash.length != 32) {
LOGGER.info("Invalid chunk hash length: {}", chunkHash.length);
throw new DataException("Invalid chunk hash length");
}
outputStream.write(chunk.digest());
}
return outputStream.toByteArray();
} catch (IOException e) {
return null;
}
}
return null;
}
public List<byte[]> chunkHashList() {
List<byte[]> chunks = new ArrayList<>();
if (this.chunks != null && this.chunks.size() > 0) {
// Return null if we only have one chunk, with the same hash as the parent
if (Arrays.equals(this.digest(), this.chunks.get(0).digest())) {
return null;
}
try {
for (ArbitraryDataFileChunk chunk : this.chunks) {
byte[] chunkHash = chunk.digest();
if (chunkHash.length != 32) {
LOGGER.info("Invalid chunk hash length: {}", chunkHash.length);
throw new DataException("Invalid chunk hash length");
}
chunks.add(chunkHash);
}
return chunks;
} catch (DataException e) {
return null;
}
}
return null;
}
private void loadMetadata() throws DataException {
try {
this.metadata.read();
} catch (DataException | IOException e) {
throw new DataException(e);
}
}
private File getFile() {
File file = this.filePath.toFile();
if (file.exists()) {
return file;
}
return null;
}
public Path getFilePath() {
return this.filePath;
}
public byte[] digest() {
File file = this.getFile();
if (file != null && file.exists()) {
try {
return Crypto.digest(file);
} catch (IOException e) {
LOGGER.error("Couldn't compute digest for ArbitraryDataFile");
}
}
return null;
}
public String digest58() {
if (this.digest() != null) {
return Base58.encode(this.digest());
}
return null;
}
public String shortHash58() {
if (this.hash58 == null) {
return null;
}
return this.hash58.substring(0, Math.min(this.hash58.length(), SHORT_DIGEST_LENGTH));
}
public String getHash58() {
return this.hash58;
}
public byte[] getHash() {
return Base58.decode(this.hash58);
}
public String printChunks() {
String outputString = "";
if (this.chunkCount() > 0) {
for (ArbitraryDataFileChunk chunk : this.chunks) {
if (outputString.length() > 0) {
outputString = outputString.concat(",");
}
outputString = outputString.concat(chunk.digest58());
}
}
return outputString;
}
public void setSecret(byte[] secret) {
this.secret = secret;
}
public byte[] getSecret() {
return this.secret;
}
public byte[] getSignature() {
return this.signature;
}
public void setMetadataFile(ArbitraryDataFile metadataFile) {
this.metadataFile = metadataFile;
}
public ArbitraryDataFile getMetadataFile() {
return this.metadataFile;
}
public void setMetadataHash(byte[] hash) throws DataException {
this.metadataHash = hash;
if (hash == null) {
return;
}
this.metadataFile = ArbitraryDataFile.fromHash(hash, this.signature);
if (metadataFile.exists()) {
this.setMetadata(new ArbitraryDataTransactionMetadata(this.metadataFile.getFilePath()));
this.addChunkHashes(this.metadata.getChunks());
}
}
public byte[] getMetadataHash() {
return this.metadataHash;
}
public void setMetadata(ArbitraryDataTransactionMetadata metadata) throws DataException {
this.metadata = metadata;
this.loadMetadata();
}
@Override
public String toString() {
return this.shortHash58();
}
}

View File

@ -0,0 +1,54 @@
package org.qortal.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.repository.DataException;
import org.qortal.utils.Base58;
import java.io.IOException;
import java.nio.file.Files;
public class ArbitraryDataFileChunk extends ArbitraryDataFile {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileChunk.class);
public ArbitraryDataFileChunk(String hash58, byte[] signature) throws DataException {
super(hash58, signature);
}
public ArbitraryDataFileChunk(byte[] fileContent, byte[] signature) throws DataException {
super(fileContent, signature);
}
public static ArbitraryDataFileChunk fromHash58(String hash58, byte[] signature) throws DataException {
return new ArbitraryDataFileChunk(hash58, signature);
}
public static ArbitraryDataFileChunk fromHash(byte[] hash, byte[] signature) throws DataException {
return ArbitraryDataFileChunk.fromHash58(Base58.encode(hash), signature);
}
@Override
public ValidationResult isValid() {
// DataChunk validation applies here too
ValidationResult superclassValidationResult = super.isValid();
if (superclassValidationResult != ValidationResult.OK) {
return superclassValidationResult;
}
try {
// Validate the file size (chunks have stricter limits)
long fileSize = Files.size(this.filePath);
if (fileSize > CHUNK_SIZE) {
LOGGER.error(String.format("DataFileChunk is too large: %d bytes (max chunk size: %d bytes)", fileSize, CHUNK_SIZE));
return ValidationResult.FILE_TOO_LARGE;
}
} catch (IOException e) {
return ValidationResult.FILE_NOT_FOUND;
}
return ValidationResult.OK;
}
}

View File

@ -0,0 +1,176 @@
package org.qortal.arbitrary;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataDiff.*;
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch;
import org.qortal.arbitrary.patch.UnifiedDiffPatch;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.utils.FilesystemUtils;
import java.io.File;
import java.io.IOException;
import java.nio.file.*;
import java.util.List;
import java.util.UUID;
public class ArbitraryDataMerge {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataMerge.class);
private final Path pathBefore;
private final Path pathAfter;
private Path mergePath;
private String identifier;
private ArbitraryDataMetadataPatch metadata;
public ArbitraryDataMerge(Path pathBefore, Path pathAfter) {
this.pathBefore = pathBefore;
this.pathAfter = pathAfter;
}
public void compute() throws IOException, DataException {
try {
this.preExecute();
this.copyPreviousStateToMergePath();
this.loadMetadata();
this.applyDifferences();
this.copyMetadata();
} finally {
this.postExecute();
}
}
private void preExecute() throws DataException {
this.createRandomIdentifier();
this.createOutputDirectory();
}
private void postExecute() {
}
private void createRandomIdentifier() {
this.identifier = UUID.randomUUID().toString();
}
private void createOutputDirectory() throws DataException {
// Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware
String baseDir = Settings.getInstance().getTempDataPath();
Path tempDir = Paths.get(baseDir, "merge", this.identifier);
try {
Files.createDirectories(tempDir);
} catch (IOException e) {
throw new DataException("Unable to create temp directory");
}
this.mergePath = tempDir;
}
private void copyPreviousStateToMergePath() throws IOException {
ArbitraryDataMerge.copyDirPathToBaseDir(this.pathBefore, this.mergePath, Paths.get(""));
}
private void loadMetadata() throws IOException, DataException {
this.metadata = new ArbitraryDataMetadataPatch(this.pathAfter);
this.metadata.read();
}
private void applyDifferences() throws IOException, DataException {
List<Path> addedPaths = this.metadata.getAddedPaths();
for (Path path : addedPaths) {
LOGGER.trace("File was added: {}", path.toString());
Path filePath = Paths.get(this.pathAfter.toString(), path.toString());
ArbitraryDataMerge.copyPathToBaseDir(filePath, this.mergePath, path);
}
List<ModifiedPath> modifiedPaths = this.metadata.getModifiedPaths();
for (ModifiedPath modifiedPath : modifiedPaths) {
LOGGER.trace("File was modified: {}", modifiedPath.toString());
this.applyPatch(modifiedPath);
}
List<Path> removedPaths = this.metadata.getRemovedPaths();
for (Path path : removedPaths) {
LOGGER.trace("File was removed: {}", path.toString());
ArbitraryDataMerge.deletePathInBaseDir(this.mergePath, path);
}
}
private void applyPatch(ModifiedPath modifiedPath) throws IOException, DataException {
if (modifiedPath.getDiffType() == DiffType.UNIFIED_DIFF) {
// Create destination file from patch
UnifiedDiffPatch unifiedDiffPatch = new UnifiedDiffPatch(pathBefore, pathAfter, mergePath);
unifiedDiffPatch.apply(modifiedPath.getPath());
}
else if (modifiedPath.getDiffType() == DiffType.COMPLETE_FILE) {
// Copy complete file
Path filePath = Paths.get(this.pathAfter.toString(), modifiedPath.getPath().toString());
ArbitraryDataMerge.copyPathToBaseDir(filePath, this.mergePath, modifiedPath.getPath());
}
else {
throw new DataException(String.format("Unrecognized patch diff type: %s", modifiedPath.getDiffType()));
}
}
private void copyMetadata() throws IOException {
Path filePath = Paths.get(this.pathAfter.toString(), ".qortal");
ArbitraryDataMerge.copyPathToBaseDir(filePath, this.mergePath, Paths.get(".qortal"));
}
private static void copyPathToBaseDir(Path source, Path base, Path relativePath) throws IOException {
if (!Files.exists(source)) {
throw new IOException(String.format("File not found: %s", source.toString()));
}
File sourceFile = source.toFile();
Path dest = Paths.get(base.toString(), relativePath.toString());
LOGGER.trace("Copying {} to {}", source, dest);
if (sourceFile.isFile()) {
Files.copy(source, dest, StandardCopyOption.REPLACE_EXISTING);
}
else if (sourceFile.isDirectory()) {
FilesystemUtils.copyAndReplaceDirectory(source.toString(), dest.toString());
}
else {
throw new IOException(String.format("Invalid file: %s", source.toString()));
}
}
private static void copyDirPathToBaseDir(Path source, Path base, Path relativePath) throws IOException {
if (!Files.exists(source)) {
throw new IOException(String.format("File not found: %s", source.toString()));
}
Path dest = Paths.get(base.toString(), relativePath.toString());
LOGGER.trace("Copying {} to {}", source, dest);
FilesystemUtils.copyAndReplaceDirectory(source.toString(), dest.toString());
}
private static void deletePathInBaseDir(Path base, Path relativePath) throws IOException {
Path dest = Paths.get(base.toString(), relativePath.toString());
File file = new File(dest.toString());
if (file.exists() && file.isFile()) {
if (FilesystemUtils.pathInsideDataOrTempPath(dest)) {
LOGGER.trace("Deleting file {}", dest);
Files.delete(dest);
}
}
if (file.exists() && file.isDirectory()) {
if (FilesystemUtils.pathInsideDataOrTempPath(dest)) {
LOGGER.trace("Deleting directory {}", dest);
FileUtils.deleteDirectory(file);
}
}
}
public Path getMergePath() {
return this.mergePath;
}
}

View File

@ -0,0 +1,536 @@
package org.qortal.arbitrary;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataBuildManager;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
import org.qortal.crypto.AES;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.*;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.arbitrary.ArbitraryDataFile.*;
import org.qortal.settings.Settings;
import org.qortal.transform.Transformer;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.ZipUtils;
import javax.crypto.BadPaddingException;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.SecretKey;
import javax.crypto.spec.SecretKeySpec;
import java.io.File;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
public class ArbitraryDataReader {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataReader.class);
private final String resourceId;
private final ResourceIdType resourceIdType;
private final Service service;
private final String identifier;
private ArbitraryTransactionData transactionData;
private String secret58;
private Path filePath;
private boolean canRequestMissingFiles;
// Intermediate paths
private final Path workingPath;
private final Path uncompressedPath;
// Stats (available for synchronous builds only)
private int layerCount;
private byte[] latestSignature;
public ArbitraryDataReader(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) {
// Ensure names are always lowercase
if (resourceIdType == ResourceIdType.NAME) {
resourceId = resourceId.toLowerCase();
}
// If identifier is a blank string, or reserved keyword "default", treat it as null
if (identifier == null || identifier.equals("") || identifier.equals("default")) {
identifier = null;
}
this.resourceId = resourceId;
this.resourceIdType = resourceIdType;
this.service = service;
this.identifier = identifier;
this.workingPath = this.buildWorkingPath();
this.uncompressedPath = Paths.get(this.workingPath.toString(), "data");
// By default we can request missing files
// Callers can use setCanRequestMissingFiles(false) to prevent it
this.canRequestMissingFiles = true;
}
private Path buildWorkingPath() {
// Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware
String baseDir = Settings.getInstance().getTempDataPath();
String identifier = this.identifier != null ? this.identifier : "default";
return Paths.get(baseDir, "reader", this.resourceIdType.toString(), this.resourceId, this.service.toString(), identifier);
}
public boolean isCachedDataAvailable() {
// If this resource is in the build queue then we shouldn't attempt to serve
// cached data, as it may not be fully built
if (ArbitraryDataBuildManager.getInstance().isInBuildQueue(this.createQueueItem())) {
return false;
}
// Not in the build queue - so check the cache itself
ArbitraryDataCache cache = new ArbitraryDataCache(this.uncompressedPath, false,
this.resourceId, this.resourceIdType, this.service, this.identifier);
if (cache.isCachedDataAvailable()) {
this.filePath = this.uncompressedPath;
return true;
}
return false;
}
public boolean isBuilding() {
return ArbitraryDataBuildManager.getInstance().isInBuildQueue(this.createQueueItem());
}
private ArbitraryDataBuildQueueItem createQueueItem() {
return new ArbitraryDataBuildQueueItem(this.resourceId, this.resourceIdType, this.service, this.identifier);
}
/**
* loadAsynchronously
*
* Attempts to load the resource asynchronously
* This adds the build task to a queue, and the result will be cached when complete
* To check the status of the build, periodically call isCachedDataAvailable()
* Once it returns true, you can then use getFilePath() to access the data itself.
* @return true if added or already present in queue; false if not
*/
public boolean loadAsynchronously() {
return ArbitraryDataBuildManager.getInstance().addToBuildQueue(this.createQueueItem());
}
/**
* loadSynchronously
*
* Attempts to load the resource synchronously
* Warning: this can block for a long time when building or fetching complex data
* If no exception is thrown, you can then use getFilePath() to access the data immediately after returning
*
* @param overwrite - set to true to force rebuild an existing cache
* @throws IOException
* @throws DataException
* @throws MissingDataException
*/
public void loadSynchronously(boolean overwrite) throws DataException, IOException, MissingDataException {
try {
ArbitraryDataCache cache = new ArbitraryDataCache(this.uncompressedPath, overwrite,
this.resourceId, this.resourceIdType, this.service, this.identifier);
if (cache.isCachedDataAvailable()) {
// Use cached data
this.filePath = this.uncompressedPath;
return;
}
this.preExecute();
this.deleteExistingFiles();
this.fetch();
this.decrypt();
this.uncompress();
this.validate();
} catch (DataException e) {
this.deleteWorkingDirectory();
throw new DataException(e.getMessage());
} finally {
this.postExecute();
}
}
private void preExecute() throws DataException {
ArbitraryDataBuildManager.getInstance().setBuildInProgress(true);
this.checkEnabled();
this.createWorkingDirectory();
this.createUncompressedDirectory();
}
private void postExecute() {
ArbitraryDataBuildManager.getInstance().setBuildInProgress(false);
}
private void checkEnabled() throws DataException {
if (!Settings.getInstance().isQdnEnabled()) {
throw new DataException("QDN is disabled in settings");
}
}
private void createWorkingDirectory() throws DataException {
try {
Files.createDirectories(this.workingPath);
} catch (IOException e) {
throw new DataException("Unable to create temp directory");
}
}
/**
* Working directory should only be deleted on failure, since it is currently used to
* serve a cached version of the resource for subsequent requests.
* @throws IOException
*/
private void deleteWorkingDirectory() throws IOException {
FilesystemUtils.safeDeleteDirectory(this.workingPath, true);
}
private void createUncompressedDirectory() throws DataException {
try {
// Create parent directory
Files.createDirectories(this.uncompressedPath.getParent());
// Ensure child directory doesn't already exist
FileUtils.deleteDirectory(this.uncompressedPath.toFile());
} catch (IOException e) {
throw new DataException("Unable to create uncompressed directory");
}
}
private void deleteExistingFiles() {
final Path uncompressedPath = this.uncompressedPath;
if (FilesystemUtils.pathInsideDataOrTempPath(uncompressedPath)) {
if (Files.exists(uncompressedPath)) {
LOGGER.trace("Attempting to delete path {}", this.uncompressedPath);
try {
Files.walkFileTree(uncompressedPath, new SimpleFileVisitor<>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Files.delete(file);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException e) throws IOException {
// Don't delete the parent directory, as we want to leave an empty folder
if (dir.compareTo(uncompressedPath) == 0) {
return FileVisitResult.CONTINUE;
}
if (e == null) {
Files.delete(dir);
return FileVisitResult.CONTINUE;
} else {
throw e;
}
}
});
} catch (IOException e) {
LOGGER.debug("Unable to delete file or directory: {}", e.getMessage());
}
}
}
}
private void fetch() throws DataException, IOException, MissingDataException {
switch (resourceIdType) {
case FILE_HASH:
this.fetchFromFileHash();
break;
case NAME:
this.fetchFromName();
break;
case SIGNATURE:
this.fetchFromSignature();
break;
case TRANSACTION_DATA:
this.fetchFromTransactionData(this.transactionData);
break;
default:
throw new DataException(String.format("Unknown resource ID type specified: %s", resourceIdType.toString()));
}
}
private void fetchFromFileHash() throws DataException {
// Load data file directly from the hash (without a signature)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash58(resourceId, null);
// Set filePath to the location of the ArbitraryDataFile
this.filePath = arbitraryDataFile.getFilePath();
}
private void fetchFromName() throws DataException, IOException, MissingDataException {
try {
// Build the existing state using past transactions
ArbitraryDataBuilder builder = new ArbitraryDataBuilder(this.resourceId, this.service, this.identifier);
builder.build();
Path builtPath = builder.getFinalPath();
if (builtPath == null) {
throw new DataException("Unable to build path");
}
// Update stats
this.layerCount = builder.getLayerCount();
this.latestSignature = builder.getLatestSignature();
// Set filePath to the builtPath
this.filePath = builtPath;
} catch (InvalidObjectException e) {
// Hash validation failed. Invalidate the cache for this name, so it can be rebuilt
LOGGER.info("Deleting {}", this.workingPath.toString());
FilesystemUtils.safeDeleteDirectory(this.workingPath, false);
throw(e);
}
}
private void fetchFromSignature() throws DataException, IOException, MissingDataException {
// Load the full transaction data from the database so we can access the file hashes
ArbitraryTransactionData transactionData;
try (final Repository repository = RepositoryManager.getRepository()) {
transactionData = (ArbitraryTransactionData) repository.getTransactionRepository().fromSignature(Base58.decode(resourceId));
}
if (transactionData == null) {
throw new DataException(String.format("Transaction data not found for signature %s", this.resourceId));
}
this.fetchFromTransactionData(transactionData);
}
private void fetchFromTransactionData(ArbitraryTransactionData transactionData) throws DataException, IOException, MissingDataException {
if (transactionData == null) {
throw new DataException(String.format("Transaction data not found for signature %s", this.resourceId));
}
// Load hashes
byte[] digest = transactionData.getData();
byte[] metadataHash = transactionData.getMetadataHash();
byte[] signature = transactionData.getSignature();
// Load secret
byte[] secret = transactionData.getSecret();
if (secret != null) {
this.secret58 = Base58.encode(secret);
}
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
ArbitraryTransactionUtils.checkAndRelocateMiscFiles(transactionData);
arbitraryDataFile.setMetadataHash(metadataHash);
if (!arbitraryDataFile.allFilesExist()) {
if (ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName())) {
throw new DataException(
String.format("Unable to request missing data for file %s because the name is blocked", arbitraryDataFile));
}
else {
// Ask the arbitrary data manager to fetch data for this transaction
String message;
if (this.canRequestMissingFiles) {
boolean requested = ArbitraryDataManager.getInstance().fetchData(transactionData);
if (requested) {
message = String.format("Requested missing data for file %s", arbitraryDataFile);
} else {
message = String.format("Unable to reissue request for missing file %s for signature %s due to rate limit. Please try again later.", arbitraryDataFile, Base58.encode(transactionData.getSignature()));
}
}
else {
message = String.format("Missing data for file %s", arbitraryDataFile);
}
// Throw a missing data exception, which allows subsequent layers to fetch data
LOGGER.info(message);
throw new MissingDataException(message);
}
}
if (arbitraryDataFile.allChunksExist() && !arbitraryDataFile.exists()) {
// We have all the chunks but not the complete file, so join them
arbitraryDataFile.join();
}
// If the complete file still doesn't exist then something went wrong
if (!arbitraryDataFile.exists()) {
throw new IOException(String.format("File doesn't exist: %s", arbitraryDataFile));
}
// Ensure the complete hash matches the joined chunks
if (!Arrays.equals(arbitraryDataFile.digest(), digest)) {
// Delete the invalid file
arbitraryDataFile.delete();
throw new DataException("Unable to validate complete file hash");
}
// Ensure the file's size matches the size reported by the transaction (throws a DataException if not)
arbitraryDataFile.validateFileSize(transactionData.getSize());
// Set filePath to the location of the ArbitraryDataFile
this.filePath = arbitraryDataFile.getFilePath();
}
private void decrypt() throws DataException {
// Decrypt if we have the secret key.
byte[] secret = this.secret58 != null ? Base58.decode(this.secret58) : null;
if (secret != null && secret.length == Transformer.AES256_LENGTH) {
try {
Path unencryptedPath = Paths.get(this.workingPath.toString(), "zipped.zip");
SecretKey aesKey = new SecretKeySpec(secret, 0, secret.length, "AES");
AES.decryptFile("AES", aesKey, this.filePath.toString(), unencryptedPath.toString());
// Replace filePath pointer with the encrypted file path
// Don't delete the original ArbitraryDataFile, as this is handled in the cleanup phase
this.filePath = unencryptedPath;
} catch (NoSuchAlgorithmException | InvalidAlgorithmParameterException | NoSuchPaddingException
| BadPaddingException | IllegalBlockSizeException | IOException | InvalidKeyException e) {
// TODO: delete files and block this resource if privateDataEnabled is false
throw new DataException(String.format("Unable to decrypt file at path %s: %s", this.filePath, e.getMessage()));
}
} else {
// Assume it is unencrypted. This will be the case when we have built a custom path by combining
// multiple decrypted archives into a single state.
}
}
private void uncompress() throws IOException, DataException {
if (this.filePath == null || !Files.exists(this.filePath)) {
throw new DataException("Can't uncompress non-existent file path");
}
File file = new File(this.filePath.toString());
if (file.isDirectory()) {
// Already a directory - nothing to uncompress
// We still need to copy the directory to its final destination if it's not already there
this.moveFilePathToFinalDestination();
return;
}
try {
// Default to ZIP compression - this is needed for previews
Compression compression = transactionData != null ? transactionData.getCompression() : Compression.ZIP;
// Handle each type of compression
if (compression == Compression.ZIP) {
ZipUtils.unzip(this.filePath.toString(), this.uncompressedPath.getParent().toString());
}
else if (compression == Compression.NONE) {
Files.createDirectories(this.uncompressedPath);
Path finalPath = Paths.get(this.uncompressedPath.toString(), "data");
this.filePath.toFile().renameTo(finalPath.toFile());
}
else {
throw new DataException(String.format("Unrecognized compression type: %s", transactionData.getCompression()));
}
} catch (IOException e) {
throw new DataException(String.format("Unable to unzip file: %s", e.getMessage()));
}
// Replace filePath pointer with the uncompressed file path
if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) {
if (Files.exists(this.filePath)) {
Files.delete(this.filePath);
}
}
this.filePath = this.uncompressedPath;
}
private void validate() throws IOException, DataException {
if (this.service.isValidationRequired()) {
Service.ValidationResult result = this.service.validate(this.filePath);
if (result != Service.ValidationResult.OK) {
throw new DataException(String.format("Validation of %s failed: %s", this.service, result.toString()));
}
}
}
private void moveFilePathToFinalDestination() throws IOException, DataException {
if (this.filePath.compareTo(this.uncompressedPath) != 0) {
File source = new File(this.filePath.toString());
File dest = new File(this.uncompressedPath.toString());
if (!source.exists()) {
throw new DataException("Source directory doesn't exist");
}
// Ensure destination directory doesn't exist
FileUtils.deleteDirectory(dest);
// Move files to destination
FilesystemUtils.copyAndReplaceDirectory(source.toString(), dest.toString());
try {
// Delete existing
if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) {
File directory = new File(this.filePath.toString());
FileUtils.deleteDirectory(directory);
}
// ... and its parent directory if empty
Path parentDirectory = this.filePath.getParent();
if (FilesystemUtils.pathInsideDataOrTempPath(parentDirectory)) {
Files.deleteIfExists(parentDirectory);
}
} catch (DirectoryNotEmptyException e) {
// No need to log anything
} catch (IOException e) {
// This will eventually be cleaned up by a maintenance process, so log the error and continue
LOGGER.debug("Unable to cleanup directories: {}", e.getMessage());
}
// Finally, update filePath to point to uncompressedPath
this.filePath = this.uncompressedPath;
}
}
public void setTransactionData(ArbitraryTransactionData transactionData) {
this.transactionData = transactionData;
}
public void setSecret58(String secret58) {
this.secret58 = secret58;
}
public Path getFilePath() {
return this.filePath;
}
public int getLayerCount() {
return this.layerCount;
}
public byte[] getLatestSignature() {
return this.latestSignature;
}
/**
* Use the below setter to ensure that we only read existing
* data without requesting any missing files,
*
* @param canRequestMissingFiles - whether or not fetching missing files is allowed
*/
public void setCanRequestMissingFiles(boolean canRequestMissingFiles) {
this.canRequestMissingFiles = canRequestMissingFiles;
}
}

View File

@ -0,0 +1,212 @@
package org.qortal.arbitrary;
import com.google.common.io.Resources;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.HTMLParser;
import org.qortal.arbitrary.ArbitraryDataFile.*;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.Controller;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
public class ArbitraryDataRenderer {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataRenderer.class);
private final String resourceId;
private final ResourceIdType resourceIdType;
private final Service service;
private String inPath;
private final String secret58;
private final String prefix;
private final boolean usePrefix;
private final boolean async;
private final HttpServletRequest request;
private final HttpServletResponse response;
private final ServletContext context;
public ArbitraryDataRenderer(String resourceId, ResourceIdType resourceIdType, Service service, String inPath,
String secret58, String prefix, boolean usePrefix, boolean async,
HttpServletRequest request, HttpServletResponse response, ServletContext context) {
this.resourceId = resourceId;
this.resourceIdType = resourceIdType;
this.service = service;
this.inPath = inPath;
this.secret58 = secret58;
this.prefix = prefix;
this.usePrefix = usePrefix;
this.async = async;
this.request = request;
this.response = response;
this.context = context;
}
public HttpServletResponse render() {
if (!inPath.startsWith(File.separator)) {
inPath = File.separator + inPath;
}
// Don't render data if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
return ArbitraryDataRenderer.getResponse(response, 500, "QDN is disabled in settings");
}
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(resourceId, resourceIdType, service, null);
arbitraryDataReader.setSecret58(secret58); // Optional, used for loading encrypted file hashes only
try {
if (!arbitraryDataReader.isCachedDataAvailable()) {
// If async is requested, show a loading screen whilst build is in progress
if (async) {
arbitraryDataReader.loadAsynchronously();
return this.getLoadingResponse(service, resourceId);
}
// Otherwise, loop until we have data
int attempts = 0;
while (!Controller.isStopping()) {
attempts++;
if (!arbitraryDataReader.isBuilding()) {
try {
arbitraryDataReader.loadSynchronously(false);
break;
} catch (MissingDataException e) {
if (attempts > 5) {
// Give up after 5 attempts
return ArbitraryDataRenderer.getResponse(response, 404, "Data unavailable. Please try again later.");
}
}
}
Thread.sleep(3000L);
}
}
} catch (Exception e) {
LOGGER.info(String.format("Unable to load %s %s: %s", service, resourceId, e.getMessage()));
return ArbitraryDataRenderer.getResponse(response, 500, "Error 500: Internal Server Error");
}
java.nio.file.Path path = arbitraryDataReader.getFilePath();
if (path == null) {
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
}
String unzippedPath = path.toString();
try {
String filename = this.getFilename(unzippedPath, inPath);
String filePath = Paths.get(unzippedPath, filename).toString();
if (HTMLParser.isHtmlFile(filename)) {
// HTML file - needs to be parsed
byte[] data = Files.readAllBytes(Paths.get(filePath)); // TODO: limit file size that can be read into memory
HTMLParser htmlParser = new HTMLParser(resourceId, inPath, prefix, usePrefix, data);
htmlParser.setDocumentBaseUrl();
response.setContentType(context.getMimeType(filename));
response.setContentLength(htmlParser.getData().length);
response.getOutputStream().write(htmlParser.getData());
}
else {
// Regular file - can be streamed directly
File file = new File(filePath);
FileInputStream inputStream = new FileInputStream(file);
response.setContentType(context.getMimeType(filename));
int bytesRead, length = 0;
byte[] buffer = new byte[10240];
while ((bytesRead = inputStream.read(buffer)) != -1) {
response.getOutputStream().write(buffer, 0, bytesRead);
length += bytesRead;
}
response.setContentLength(length);
inputStream.close();
}
return response;
} catch (FileNotFoundException | NoSuchFileException e) {
LOGGER.info("Unable to serve file: {}", e.getMessage());
if (inPath.equals("/")) {
// Delete the unzipped folder if no index file was found
try {
FileUtils.deleteDirectory(new File(unzippedPath));
} catch (IOException ioException) {
LOGGER.debug("Unable to delete directory: {}", unzippedPath, e);
}
}
} catch (IOException e) {
LOGGER.info("Unable to serve file at path {}: {}", inPath, e.getMessage());
}
return ArbitraryDataRenderer.getResponse(response, 404, "Error 404: File Not Found");
}
private String getFilename(String directory, String userPath) {
if (userPath == null || userPath.endsWith("/") || userPath.equals("")) {
// Locate index file
List<String> indexFiles = ArbitraryDataRenderer.indexFiles();
for (String indexFile : indexFiles) {
Path path = Paths.get(directory, indexFile);
if (Files.exists(path)) {
return userPath + indexFile;
}
}
}
return userPath;
}
private HttpServletResponse getLoadingResponse(Service service, String name) {
String responseString = "";
URL url = Resources.getResource("loading/index.html");
try {
responseString = Resources.toString(url, StandardCharsets.UTF_8);
// Replace vars
responseString = responseString.replace("%%SERVICE%%", service.toString());
responseString = responseString.replace("%%NAME%%", name);
} catch (IOException e) {
LOGGER.info("Unable to show loading screen: {}", e.getMessage());
}
return ArbitraryDataRenderer.getResponse(response, 503, responseString);
}
public static HttpServletResponse getResponse(HttpServletResponse response, int responseCode, String responseString) {
try {
byte[] responseData = responseString.getBytes();
response.setStatus(responseCode);
response.setContentLength(responseData.length);
response.getOutputStream().write(responseData);
} catch (IOException e) {
LOGGER.info("Error writing {} response", responseCode);
}
return response;
}
public static List<String> indexFiles() {
List<String> indexFiles = new ArrayList<>();
indexFiles.add("index.html");
indexFiles.add("index.htm");
indexFiles.add("default.html");
indexFiles.add("default.htm");
indexFiles.add("home.html");
indexFiles.add("home.htm");
return indexFiles;
}
}

View File

@ -0,0 +1,301 @@
package org.qortal.arbitrary;
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.arbitrary.ArbitraryDataBuildManager;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.data.arbitrary.ArbitraryResourceStatus;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.list.ResourceListManager;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import static org.qortal.data.arbitrary.ArbitraryResourceStatus.Status;
public class ArbitraryDataResource {
protected final String resourceId;
protected final ResourceIdType resourceIdType;
protected final Service service;
protected final String identifier;
private List<ArbitraryTransactionData> transactions;
private ArbitraryTransactionData latestPutTransaction;
private int layerCount;
public ArbitraryDataResource(String resourceId, ResourceIdType resourceIdType, Service service, String identifier) {
this.resourceId = resourceId.toLowerCase();
this.resourceIdType = resourceIdType;
this.service = service;
// If identifier is a blank string, or reserved keyword "default", treat it as null
if (identifier == null || identifier.equals("") || identifier.equals("default")) {
identifier = null;
}
this.identifier = identifier;
}
public ArbitraryResourceStatus getStatus() {
if (resourceIdType != ResourceIdType.NAME) {
// We only support statuses for resources with a name
return new ArbitraryResourceStatus(Status.UNSUPPORTED);
}
// Check if the name is blocked
if (ResourceListManager.getInstance()
.listContains("blockedNames", this.resourceId, false)) {
return new ArbitraryResourceStatus(Status.BLOCKED);
}
// Firstly check the cache to see if it's already built
ArbitraryDataReader arbitraryDataReader = new ArbitraryDataReader(
resourceId, resourceIdType, service, identifier);
if (arbitraryDataReader.isCachedDataAvailable()) {
return new ArbitraryResourceStatus(Status.READY);
}
// Next check if there's a build in progress
ArbitraryDataBuildQueueItem queueItem =
new ArbitraryDataBuildQueueItem(resourceId, resourceIdType, service, identifier);
if (ArbitraryDataBuildManager.getInstance().isInBuildQueue(queueItem)) {
return new ArbitraryResourceStatus(Status.BUILDING);
}
// Check if a build has failed
if (ArbitraryDataBuildManager.getInstance().isInFailedBuildsList(queueItem)) {
return new ArbitraryResourceStatus(Status.BUILD_FAILED);
}
// Check if we have all data locally for this resource
if (!this.allFilesDownloaded()) {
if (this.isDownloading()) {
return new ArbitraryResourceStatus(Status.DOWNLOADING);
}
else if (this.isDataPotentiallyAvailable()) {
return new ArbitraryResourceStatus(Status.NOT_STARTED);
}
return new ArbitraryResourceStatus(Status.MISSING_DATA);
}
// We have all data locally
return new ArbitraryResourceStatus(Status.DOWNLOADED);
}
public boolean delete() {
try {
this.fetchTransactions();
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
for (ArbitraryTransactionData transactionData : transactionDataList) {
byte[] hash = transactionData.getData();
byte[] metadataHash = transactionData.getMetadataHash();
byte[] signature = transactionData.getSignature();
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
arbitraryDataFile.setMetadataHash(metadataHash);
// Delete any chunks or complete files from each transaction
arbitraryDataFile.deleteAll();
}
// Also delete cached data for the entire resource
this.deleteCache();
return true;
} catch (DataException | IOException e) {
return false;
}
}
public void deleteCache() throws IOException {
String baseDir = Settings.getInstance().getTempDataPath();
String identifier = this.identifier != null ? this.identifier : "default";
Path cachePath = Paths.get(baseDir, "reader", this.resourceIdType.toString(), this.resourceId, this.service.toString(), identifier);
if (cachePath.toFile().exists()) {
FilesystemUtils.safeDeleteDirectory(cachePath, true);
}
}
private boolean allFilesDownloaded() {
try {
this.fetchTransactions();
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
for (ArbitraryTransactionData transactionData : transactionDataList) {
if (!ArbitraryTransactionUtils.completeFileExists(transactionData) ||
!ArbitraryTransactionUtils.allChunksExist(transactionData)) {
return false;
}
}
return true;
} catch (DataException e) {
return false;
}
}
private boolean isRateLimited() {
try {
this.fetchTransactions();
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
for (ArbitraryTransactionData transactionData : transactionDataList) {
if (ArbitraryDataManager.getInstance().isSignatureRateLimited(transactionData.getSignature())) {
return true;
}
}
return true;
} catch (DataException e) {
return false;
}
}
/**
* Best guess as to whether data might be available
* This is only used to give an indication to the user of progress
* @return - whether data might be available on the network
*/
private boolean isDataPotentiallyAvailable() {
try {
this.fetchTransactions();
Long now = NTP.getTime();
if (now == null) {
return false;
}
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
for (ArbitraryTransactionData transactionData : transactionDataList) {
long lastRequestTime = ArbitraryDataManager.getInstance().lastRequestForSignature(transactionData.getSignature());
// If we haven't requested yet, or requested in the last 30 seconds, there's still a
// chance that data is on its way but hasn't arrived yet
if (lastRequestTime == 0 || now - lastRequestTime < 30 * 1000L) {
return true;
}
}
return false;
} catch (DataException e) {
return false;
}
}
/**
* Best guess as to whether we are currently downloading a resource
* This is only used to give an indication to the user of progress
* @return - whether we are trying to download the resource
*/
private boolean isDownloading() {
try {
this.fetchTransactions();
Long now = NTP.getTime();
if (now == null) {
return false;
}
List<ArbitraryTransactionData> transactionDataList = new ArrayList<>(this.transactions);
for (ArbitraryTransactionData transactionData : transactionDataList) {
long lastRequestTime = ArbitraryDataManager.getInstance().lastRequestForSignature(transactionData.getSignature());
// If were have requested data in the last 30 seconds, treat it as "downloading"
if (lastRequestTime > 0 && now - lastRequestTime < 30 * 1000L) {
return true;
}
}
// FUTURE: we may want to check for file hashes (including the metadata file hash) in
// ArbitraryDataManager.arbitraryDataFileRequests and return true if one is found.
return false;
} catch (DataException e) {
return false;
}
}
private void fetchTransactions() throws DataException {
if (this.transactions != null && !this.transactions.isEmpty()) {
// Already fetched
return;
}
try (final Repository repository = RepositoryManager.getRepository()) {
// Get the most recent PUT
ArbitraryTransactionData latestPut = repository.getArbitraryRepository()
.getLatestTransaction(this.resourceId, this.service, ArbitraryTransactionData.Method.PUT, this.identifier);
if (latestPut == null) {
String message = String.format("Couldn't find PUT transaction for name %s, service %s and identifier %s",
this.resourceId, this.service, this.identifierString());
throw new DataException(message);
}
this.latestPutTransaction = latestPut;
// Load all transactions since the latest PUT
List<ArbitraryTransactionData> transactionDataList = repository.getArbitraryRepository()
.getArbitraryTransactions(this.resourceId, this.service, this.identifier, latestPut.getTimestamp());
this.transactions = transactionDataList;
this.layerCount = transactionDataList.size();
}
}
private String resourceIdString() {
return resourceId != null ? resourceId : "";
}
private String resourceIdTypeString() {
return resourceIdType != null ? resourceIdType.toString() : "";
}
private String serviceString() {
return service != null ? service.toString() : "";
}
private String identifierString() {
return identifier != null ? identifier : "";
}
@Override
public String toString() {
return String.format("%s %s %s", this.serviceString(), this.resourceIdString(), this.identifierString());
}
/**
* @return unique key used to identify this resource
*/
public String getUniqueKey() {
return String.format("%s-%s-%s", this.service, this.resourceId, this.identifier).toLowerCase();
}
public String getResourceId() {
return this.resourceId;
}
public Service getService() {
return this.service;
}
public String getIdentifier() {
return this.identifier;
}
}

View File

@ -0,0 +1,285 @@
package org.qortal.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.ArbitraryDataFile.ResourceIdType;
import org.qortal.arbitrary.ArbitraryDataDiff.*;
import org.qortal.arbitrary.metadata.ArbitraryDataMetadataPatch;
import org.qortal.arbitrary.misc.Service;
import org.qortal.block.BlockChain;
import org.qortal.crypto.Crypto;
import org.qortal.data.PaymentData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.*;
import org.qortal.data.transaction.BaseTransactionData;
import org.qortal.group.Group;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction;
import org.qortal.transform.Transformer;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
public class ArbitraryDataTransactionBuilder {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataTransactionBuilder.class);
// Min transaction version required
private static final int MIN_TRANSACTION_VERSION = 5;
// Maximum number of PATCH layers allowed
private static final int MAX_LAYERS = 10;
// Maximum size difference (out of 1) allowed for PATCH transactions
private static final double MAX_SIZE_DIFF = 0.2f;
// Maximum proportion of files modified relative to total
private static final double MAX_FILE_DIFF = 0.5f;
private final String publicKey58;
private final Path path;
private final String name;
private Method method;
private final Service service;
private final String identifier;
private final Repository repository;
private int chunkSize = ArbitraryDataFile.CHUNK_SIZE;
private ArbitraryTransactionData arbitraryTransactionData;
private ArbitraryDataFile arbitraryDataFile;
public ArbitraryDataTransactionBuilder(Repository repository, String publicKey58, Path path, String name,
Method method, Service service, String identifier) {
this.repository = repository;
this.publicKey58 = publicKey58;
this.path = path;
this.name = name;
this.method = method;
this.service = service;
// If identifier is a blank string, or reserved keyword "default", treat it as null
if (identifier == null || identifier.equals("") || identifier.equals("default")) {
identifier = null;
}
this.identifier = identifier;
}
public void build() throws DataException {
try {
this.preExecute();
this.checkMethod();
this.createTransaction();
}
finally {
this.postExecute();
}
}
private void preExecute() {
}
private void postExecute() {
}
private void checkMethod() throws DataException {
if (this.method == null) {
// We need to automatically determine the method
this.method = this.determineMethodAutomatically();
}
}
private Method determineMethodAutomatically() throws DataException {
ArbitraryDataReader reader = new ArbitraryDataReader(this.name, ResourceIdType.NAME, this.service, this.identifier);
try {
reader.loadSynchronously(true);
} catch (Exception e) {
// Catch all exceptions if the existing resource cannot be loaded first time
// In these cases it's simplest to just use a PUT transaction
return Method.PUT;
}
try {
// Check layer count
int layerCount = reader.getLayerCount();
if (layerCount >= MAX_LAYERS) {
LOGGER.info("Reached maximum layer count ({} / {}) - using PUT", layerCount, MAX_LAYERS);
return Method.PUT;
}
// Check size of differences between this layer and previous layer
ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(reader.getFilePath(), this.path, reader.getLatestSignature());
patch.create();
long diffSize = FilesystemUtils.getDirectorySize(patch.getFinalPath());
long existingStateSize = FilesystemUtils.getDirectorySize(reader.getFilePath());
double difference = (double) diffSize / (double) existingStateSize;
if (difference > MAX_SIZE_DIFF) {
LOGGER.info("Reached maximum difference ({} / {}) - using PUT", difference, MAX_SIZE_DIFF);
return Method.PUT;
}
// Check number of modified files
ArbitraryDataMetadataPatch metadata = patch.getMetadata();
int totalFileCount = patch.getTotalFileCount();
int differencesCount = metadata.getFileDifferencesCount();
difference = (double) differencesCount / (double) totalFileCount;
if (difference > MAX_FILE_DIFF) {
LOGGER.info("Reached maximum file differences ({} / {}) - using PUT", difference, MAX_FILE_DIFF);
return Method.PUT;
}
// Check the patch types
// Limit this check to single file resources only for now
boolean atLeastOnePatch = false;
if (totalFileCount == 1) {
for (ModifiedPath path : metadata.getModifiedPaths()) {
if (path.getDiffType() != DiffType.COMPLETE_FILE) {
atLeastOnePatch = true;
}
}
}
if (!atLeastOnePatch) {
LOGGER.info("Patch consists of complete files only - using PUT");
return Method.PUT;
}
// State is appropriate for a PATCH transaction
return Method.PATCH;
}
catch (IOException | DataException e) {
// Handle matching states separately, as it's best to block transactions with duplicate states
if (e.getMessage().equals("Current state matches previous state. Nothing to do.")) {
throw new DataException(e.getMessage());
}
LOGGER.info("Caught exception: {}", e.getMessage());
LOGGER.info("Unable to load existing resource - using PUT to overwrite it.");
return Method.PUT;
}
}
private void createTransaction() throws DataException {
arbitraryDataFile = null;
try {
Long now = NTP.getTime();
if (now == null) {
throw new DataException("NTP time not synced yet");
}
// Ensure that this chain supports transactions necessary for complex arbitrary data
int transactionVersion = Transaction.getVersionByTimestamp(now);
if (transactionVersion < MIN_TRANSACTION_VERSION) {
throw new DataException("Transaction version unsupported on this blockchain.");
}
if (publicKey58 == null || path == null) {
throw new DataException("Missing public key or path");
}
byte[] creatorPublicKey = Base58.decode(publicKey58);
final String creatorAddress = Crypto.toAddress(creatorPublicKey);
byte[] lastReference = repository.getAccountRepository().getLastReference(creatorAddress);
if (lastReference == null) {
// Use a random last reference on the very first transaction for an account
// Code copied from CrossChainResource.buildAtMessage()
// We already require PoW on all arbitrary transactions, so no additional logic is needed
Random random = new Random();
lastReference = new byte[Transformer.SIGNATURE_LENGTH];
random.nextBytes(lastReference);
}
Compression compression = Compression.ZIP;
// FUTURE? Use zip compression for directories, or no compression for single files
// Compression compression = (path.toFile().isDirectory()) ? Compression.ZIP : Compression.NONE;
ArbitraryDataWriter arbitraryDataWriter = new ArbitraryDataWriter(path, name, service, identifier, method, compression);
try {
arbitraryDataWriter.setChunkSize(this.chunkSize);
arbitraryDataWriter.save();
} catch (IOException | DataException | InterruptedException | RuntimeException | MissingDataException e) {
LOGGER.info("Unable to create arbitrary data file: {}", e.getMessage());
throw new DataException(e.getMessage());
}
// Get main file
arbitraryDataFile = arbitraryDataWriter.getArbitraryDataFile();
if (arbitraryDataFile == null) {
throw new DataException("Arbitrary data file is null");
}
// Get chunks metadata file
ArbitraryDataFile metadataFile = arbitraryDataFile.getMetadataFile();
if (metadataFile == null && arbitraryDataFile.chunkCount() > 1) {
throw new DataException(String.format("Chunks metadata data file is null but there are %d chunks", arbitraryDataFile.chunkCount()));
}
String digest58 = arbitraryDataFile.digest58();
if (digest58 == null) {
LOGGER.error("Unable to calculate file digest");
throw new DataException("Unable to calculate file digest");
}
final BaseTransactionData baseTransactionData = new BaseTransactionData(now, Group.NO_GROUP,
lastReference, creatorPublicKey, 0L, null);
final int size = (int) arbitraryDataFile.size();
final int version = 5;
final int nonce = 0;
byte[] secret = arbitraryDataFile.getSecret();
final ArbitraryTransactionData.DataType dataType = ArbitraryTransactionData.DataType.DATA_HASH;
final byte[] digest = arbitraryDataFile.digest();
final byte[] metadataHash = (metadataFile != null) ? metadataFile.getHash() : null;
final List<PaymentData> payments = new ArrayList<>();
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
version, service, nonce, size, name, identifier, method,
secret, compression, digest, dataType, metadataHash, payments);
this.arbitraryTransactionData = transactionData;
} catch (DataException e) {
if (arbitraryDataFile != null) {
arbitraryDataFile.deleteAll();
}
throw(e);
}
}
public void computeNonce() throws DataException {
if (this.arbitraryTransactionData == null) {
throw new DataException("Arbitrary transaction data is required to compute nonce");
}
ArbitraryTransaction transaction = (ArbitraryTransaction) Transaction.fromData(repository, this.arbitraryTransactionData);
LOGGER.info("Computing nonce...");
transaction.computeNonce();
Transaction.ValidationResult result = transaction.isValidUnconfirmed();
if (result != Transaction.ValidationResult.OK) {
arbitraryDataFile.deleteAll();
throw new DataException(String.format("Arbitrary transaction invalid: %s", result));
}
LOGGER.info("Transaction is valid");
}
public ArbitraryTransactionData getArbitraryTransactionData() {
return this.arbitraryTransactionData;
}
public ArbitraryDataFile getArbitraryDataFile() {
return this.arbitraryDataFile;
}
public void setChunkSize(int chunkSize) {
this.chunkSize = chunkSize;
}
}

View File

@ -0,0 +1,342 @@
package org.qortal.arbitrary;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.arbitrary.metadata.ArbitraryDataTransactionMetadata;
import org.qortal.arbitrary.misc.Service;
import org.qortal.crypto.Crypto;
import org.qortal.data.transaction.ArbitraryTransactionData.*;
import org.qortal.crypto.AES;
import org.qortal.repository.DataException;
import org.qortal.arbitrary.ArbitraryDataFile.*;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.ZipUtils;
import javax.crypto.BadPaddingException;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.SecretKey;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
public class ArbitraryDataWriter {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataWriter.class);
private Path filePath;
private final String name;
private final Service service;
private final String identifier;
private final Method method;
private final Compression compression;
private int chunkSize = ArbitraryDataFile.CHUNK_SIZE;
private SecretKey aesKey;
private ArbitraryDataFile arbitraryDataFile;
// Intermediate paths to cleanup
private Path workingPath;
private Path compressedPath;
private Path encryptedPath;
public ArbitraryDataWriter(Path filePath, String name, Service service, String identifier, Method method, Compression compression) {
this.filePath = filePath;
this.name = name;
this.service = service;
this.method = method;
this.compression = compression;
// If identifier is a blank string, or reserved keyword "default", treat it as null
if (identifier == null || identifier.equals("") || identifier.equals("default")) {
identifier = null;
}
this.identifier = identifier;
}
public void save() throws IOException, DataException, InterruptedException, MissingDataException {
try {
this.preExecute();
this.validateService();
this.process();
this.compress();
this.encrypt();
this.split();
this.createMetadataFile();
this.validate();
} finally {
this.postExecute();
}
}
private void preExecute() throws DataException {
this.checkEnabled();
// Enforce compression when uploading a directory
File file = new File(this.filePath.toString());
if (file.isDirectory() && compression == Compression.NONE) {
throw new DataException("Unable to upload a directory without compression");
}
// Create temporary working directory
this.createWorkingDirectory();
}
private void postExecute() throws IOException {
this.cleanupFilesystem();
}
private void checkEnabled() throws DataException {
if (!Settings.getInstance().isQdnEnabled()) {
throw new DataException("QDN is disabled in settings");
}
}
private void createWorkingDirectory() throws DataException {
// Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware
String baseDir = Settings.getInstance().getTempDataPath();
String identifier = Base58.encode(Crypto.digest(this.filePath.toString().getBytes()));
Path tempDir = Paths.get(baseDir, "writer", identifier);
try {
Files.createDirectories(tempDir);
} catch (IOException e) {
throw new DataException("Unable to create temp directory");
}
this.workingPath = tempDir;
}
private void validateService() throws IOException, DataException {
if (this.service.isValidationRequired()) {
Service.ValidationResult result = this.service.validate(this.filePath);
if (result != Service.ValidationResult.OK) {
throw new DataException(String.format("Validation of %s failed: %s", this.service, result.toString()));
}
}
}
private void process() throws DataException, IOException, MissingDataException {
switch (this.method) {
case PUT:
// Nothing to do
break;
case PATCH:
this.processPatch();
break;
default:
throw new DataException(String.format("Unknown method specified: %s", method.toString()));
}
}
private void processPatch() throws DataException, IOException, MissingDataException {
// Build the existing state using past transactions
ArbitraryDataBuilder builder = new ArbitraryDataBuilder(this.name, this.service, this.identifier);
builder.build();
Path builtPath = builder.getFinalPath();
// Obtain the latest signature, so this can be included in the patch
byte[] latestSignature = builder.getLatestSignature();
// Compute a diff of the latest changes on top of the previous state
// Then use only the differences as our data payload
ArbitraryDataCreatePatch patch = new ArbitraryDataCreatePatch(builtPath, this.filePath, latestSignature);
patch.create();
this.filePath = patch.getFinalPath();
// Delete the input directory
if (FilesystemUtils.pathInsideDataOrTempPath(builtPath)) {
File directory = new File(builtPath.toString());
FileUtils.deleteDirectory(directory);
}
// Validate the patch
this.validatePatch();
}
private void validatePatch() throws DataException {
if (this.filePath == null) {
throw new DataException("Null path after creating patch");
}
File qortalMetadataDirectoryFile = Paths.get(this.filePath.toString(), ".qortal").toFile();
if (!qortalMetadataDirectoryFile.exists()) {
throw new DataException("Qortal metadata folder doesn't exist in patch");
}
if (!qortalMetadataDirectoryFile.isDirectory()) {
throw new DataException("Qortal metadata folder isn't a directory");
}
File qortalPatchMetadataFile = Paths.get(this.filePath.toString(), ".qortal", "patch").toFile();
if (!qortalPatchMetadataFile.exists()) {
throw new DataException("Qortal patch metadata file doesn't exist in patch");
}
if (!qortalPatchMetadataFile.isFile()) {
throw new DataException("Qortal patch metadata file isn't a file");
}
}
private void compress() throws InterruptedException, DataException {
// Compress the data if requested
if (this.compression != Compression.NONE) {
this.compressedPath = Paths.get(this.workingPath.toString(), "data.zip");
try {
if (this.compression == Compression.ZIP) {
LOGGER.info("Compressing...");
String enclosingFolderName = "data";
ZipUtils.zip(this.filePath.toString(), this.compressedPath.toString(), enclosingFolderName);
}
else {
throw new DataException(String.format("Unknown compression type specified: %s", compression.toString()));
}
// FUTURE: other compression types
// Delete the input directory
if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) {
File directory = new File(this.filePath.toString());
FileUtils.deleteDirectory(directory);
}
// Replace filePath pointer with the zipped file path
this.filePath = this.compressedPath;
} catch (IOException | DataException e) {
throw new DataException("Unable to zip directory", e);
}
}
}
private void encrypt() throws DataException {
this.encryptedPath = Paths.get(this.workingPath.toString(), "data.zip.encrypted");
try {
// Encrypt the file with AES
LOGGER.info("Encrypting...");
this.aesKey = AES.generateKey(256);
AES.encryptFile("AES", this.aesKey, this.filePath.toString(), this.encryptedPath.toString());
// Delete the input file
if (FilesystemUtils.pathInsideDataOrTempPath(this.filePath)) {
Files.delete(this.filePath);
}
// Replace filePath pointer with the encrypted file path
this.filePath = this.encryptedPath;
} catch (NoSuchAlgorithmException | InvalidAlgorithmParameterException | NoSuchPaddingException
| BadPaddingException | IllegalBlockSizeException | IOException | InvalidKeyException e) {
throw new DataException(String.format("Unable to encrypt file %s: %s", this.filePath, e.getMessage()));
}
}
private void split() throws IOException, DataException {
// We don't have a signature yet, so use null to put the file in a generic folder
this.arbitraryDataFile = ArbitraryDataFile.fromPath(this.filePath, null);
if (this.arbitraryDataFile == null) {
throw new IOException("No file available when trying to split");
}
int chunkCount = this.arbitraryDataFile.split(this.chunkSize);
if (chunkCount > 0) {
LOGGER.info(String.format("Successfully split into %d chunk%s", chunkCount, (chunkCount == 1 ? "" : "s")));
}
else {
throw new DataException("Unable to split file into chunks");
}
}
private void createMetadataFile() throws IOException, DataException {
// If we have at least one chunk, we need to create an index file containing their hashes
if (this.arbitraryDataFile.chunkCount() > 1) {
// Create the JSON file
Path chunkFilePath = Paths.get(this.workingPath.toString(), "metadata.json");
ArbitraryDataTransactionMetadata chunkMetadata = new ArbitraryDataTransactionMetadata(chunkFilePath);
chunkMetadata.setChunks(this.arbitraryDataFile.chunkHashList());
chunkMetadata.write();
// Create an ArbitraryDataFile from the JSON file (we don't have a signature yet)
ArbitraryDataFile metadataFile = ArbitraryDataFile.fromPath(chunkFilePath, null);
this.arbitraryDataFile.setMetadataFile(metadataFile);
}
}
private void validate() throws IOException, DataException {
if (this.arbitraryDataFile == null) {
throw new DataException("No file available when validating");
}
this.arbitraryDataFile.setSecret(this.aesKey.getEncoded());
// Validate the file
ValidationResult validationResult = this.arbitraryDataFile.isValid();
if (validationResult != ValidationResult.OK) {
throw new DataException(String.format("File %s failed validation: %s", this.arbitraryDataFile, validationResult));
}
LOGGER.info("Whole file hash is valid: {}", this.arbitraryDataFile.digest58());
// Validate each chunk
for (ArbitraryDataFileChunk chunk : this.arbitraryDataFile.getChunks()) {
validationResult = chunk.isValid();
if (validationResult != ValidationResult.OK) {
throw new DataException(String.format("Chunk %s failed validation: %s", chunk, validationResult));
}
}
LOGGER.info("Chunk hashes are valid");
// Validate chunks metadata file
if (this.arbitraryDataFile.chunkCount() > 1) {
ArbitraryDataFile metadataFile = this.arbitraryDataFile.getMetadataFile();
if (metadataFile == null || !metadataFile.exists()) {
throw new DataException("No metadata file available, but there are multiple chunks");
}
// Read the file
ArbitraryDataTransactionMetadata metadata = new ArbitraryDataTransactionMetadata(metadataFile.getFilePath());
metadata.read();
// Check all chunks exist
for (byte[] chunk : this.arbitraryDataFile.chunkHashList()) {
if (!metadata.containsChunk(chunk)) {
throw new DataException(String.format("Missing chunk %s in metadata file", Base58.encode(chunk)));
}
}
}
}
private void cleanupFilesystem() throws IOException {
// Clean up
if (FilesystemUtils.pathInsideDataOrTempPath(this.compressedPath)) {
File zippedFile = new File(this.compressedPath.toString());
if (zippedFile.exists()) {
zippedFile.delete();
}
}
if (FilesystemUtils.pathInsideDataOrTempPath(this.encryptedPath)) {
File encryptedFile = new File(this.encryptedPath.toString());
if (encryptedFile.exists()) {
encryptedFile.delete();
}
}
if (FilesystemUtils.pathInsideDataOrTempPath(this.workingPath)) {
FileUtils.deleteDirectory(new File(this.workingPath.toString()));
}
}
public ArbitraryDataFile getArbitraryDataFile() {
return this.arbitraryDataFile;
}
public void setChunkSize(int chunkSize) {
this.chunkSize = chunkSize;
}
}

View File

@ -0,0 +1,20 @@
package org.qortal.arbitrary.exception;
public class MissingDataException extends Exception {
public MissingDataException() {
}
public MissingDataException(String message) {
super(message);
}
public MissingDataException(String message, Throwable cause) {
super(message, cause);
}
public MissingDataException(Throwable cause) {
super(cause);
}
}

View File

@ -0,0 +1,85 @@
package org.qortal.arbitrary.metadata;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.repository.DataException;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
/**
* ArbitraryDataMetadata
*
* This is a base class to handle reading and writing JSON to the supplied filePath.
*
* It is not usable on its own; it must be subclassed, with two methods overridden:
*
* readJson() - code to unserialize the JSON file
* buildJson() - code to serialize the JSON file
*
*/
public class ArbitraryDataMetadata {
protected static final Logger LOGGER = LogManager.getLogger(ArbitraryDataMetadata.class);
protected Path filePath;
protected String jsonString;
public ArbitraryDataMetadata(Path filePath) {
this.filePath = filePath;
}
protected void readJson() throws DataException {
// To be overridden
}
protected void buildJson() {
// To be overridden
}
public void read() throws IOException, DataException {
this.loadJson();
this.readJson();
}
public void write() throws IOException, DataException {
this.buildJson();
this.createParentDirectories();
BufferedWriter writer = new BufferedWriter(new FileWriter(this.filePath.toString()));
writer.write(this.jsonString);
writer.newLine();
writer.close();
}
protected void loadJson() throws IOException {
File metadataFile = new File(this.filePath.toString());
if (!metadataFile.exists()) {
throw new IOException(String.format("Metadata file doesn't exist: %s", this.filePath.toString()));
}
this.jsonString = new String(Files.readAllBytes(this.filePath));
}
protected void createParentDirectories() throws DataException {
try {
Files.createDirectories(this.filePath.getParent());
} catch (IOException e) {
throw new DataException("Unable to create parent directories");
}
}
public String getJsonString() {
return this.jsonString;
}
}

View File

@ -0,0 +1,69 @@
package org.qortal.arbitrary.metadata;
import org.json.JSONObject;
import org.qortal.repository.DataException;
import org.qortal.utils.Base58;
import java.nio.file.Path;
public class ArbitraryDataMetadataCache extends ArbitraryDataQortalMetadata {
private byte[] signature;
private long timestamp;
public ArbitraryDataMetadataCache(Path filePath) {
super(filePath);
}
@Override
protected String fileName() {
return "cache";
}
@Override
protected void readJson() throws DataException {
if (this.jsonString == null) {
throw new DataException("Patch JSON string is null");
}
JSONObject cache = new JSONObject(this.jsonString);
if (cache.has("signature")) {
String sig = cache.getString("signature");
if (sig != null) {
this.signature = Base58.decode(sig);
}
}
if (cache.has("timestamp")) {
this.timestamp = cache.getLong("timestamp");
}
}
@Override
protected void buildJson() {
JSONObject patch = new JSONObject();
patch.put("signature", Base58.encode(this.signature));
patch.put("timestamp", this.timestamp);
this.jsonString = patch.toString(2);
LOGGER.trace("Cache metadata: {}", this.jsonString);
}
public void setSignature(byte[] signature) {
this.signature = signature;
}
public byte[] getSignature() {
return this.signature;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
public long getTimestamp() {
return this.timestamp;
}
}

View File

@ -0,0 +1,182 @@
package org.qortal.arbitrary.metadata;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.json.JSONArray;
import org.json.JSONObject;
import org.qortal.arbitrary.ArbitraryDataDiff.*;
import org.qortal.repository.DataException;
import org.qortal.utils.Base58;
import java.lang.reflect.Field;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
public class ArbitraryDataMetadataPatch extends ArbitraryDataQortalMetadata {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataMetadataPatch.class);
private List<Path> addedPaths;
private List<ModifiedPath> modifiedPaths;
private List<Path> removedPaths;
private byte[] previousSignature;
private byte[] previousHash;
private byte[] currentHash;
public ArbitraryDataMetadataPatch(Path filePath) {
super(filePath);
this.addedPaths = new ArrayList<>();
this.modifiedPaths = new ArrayList<>();
this.removedPaths = new ArrayList<>();
}
@Override
protected String fileName() {
return "patch";
}
@Override
protected void readJson() throws DataException {
if (this.jsonString == null) {
throw new DataException("Patch JSON string is null");
}
JSONObject patch = new JSONObject(this.jsonString);
if (patch.has("prevSig")) {
String prevSig = patch.getString("prevSig");
if (prevSig != null) {
this.previousSignature = Base58.decode(prevSig);
}
}
if (patch.has("prevHash")) {
String prevHash = patch.getString("prevHash");
if (prevHash != null) {
this.previousHash = Base58.decode(prevHash);
}
}
if (patch.has("curHash")) {
String curHash = patch.getString("curHash");
if (curHash != null) {
this.currentHash = Base58.decode(curHash);
}
}
if (patch.has("added")) {
JSONArray added = (JSONArray) patch.get("added");
if (added != null) {
for (int i=0; i<added.length(); i++) {
String pathString = added.getString(i);
this.addedPaths.add(Paths.get(pathString));
}
}
}
if (patch.has("modified")) {
JSONArray modified = (JSONArray) patch.get("modified");
if (modified != null) {
for (int i=0; i<modified.length(); i++) {
JSONObject jsonObject = modified.getJSONObject(i);
ModifiedPath modifiedPath = new ModifiedPath(jsonObject);
this.modifiedPaths.add(modifiedPath);
}
}
}
if (patch.has("removed")) {
JSONArray removed = (JSONArray) patch.get("removed");
if (removed != null) {
for (int i=0; i<removed.length(); i++) {
String pathString = removed.getString(i);
this.removedPaths.add(Paths.get(pathString));
}
}
}
}
@Override
protected void buildJson() {
JSONObject patch = new JSONObject();
// Attempt to use a LinkedHashMap so that the order of fields is maintained
try {
Field changeMap = patch.getClass().getDeclaredField("map");
changeMap.setAccessible(true);
changeMap.set(patch, new LinkedHashMap<>());
changeMap.setAccessible(false);
} catch (IllegalAccessException | NoSuchFieldException e) {
// Don't worry about failures as this is for optional ordering only
}
patch.put("prevSig", Base58.encode(this.previousSignature));
patch.put("prevHash", Base58.encode(this.previousHash));
patch.put("curHash", Base58.encode(this.currentHash));
patch.put("added", new JSONArray(this.addedPaths));
patch.put("removed", new JSONArray(this.removedPaths));
JSONArray modifiedPaths = new JSONArray();
for (ModifiedPath modifiedPath : this.modifiedPaths) {
JSONObject modifiedPathJson = new JSONObject();
modifiedPathJson.put("path", modifiedPath.getPath());
modifiedPathJson.put("type", modifiedPath.getDiffType());
modifiedPaths.put(modifiedPathJson);
}
patch.put("modified", modifiedPaths);
this.jsonString = patch.toString(2);
LOGGER.debug("Patch metadata: {}", this.jsonString);
}
public void setAddedPaths(List<Path> addedPaths) {
this.addedPaths = addedPaths;
}
public List<Path> getAddedPaths() {
return this.addedPaths;
}
public void setModifiedPaths(List<ModifiedPath> modifiedPaths) {
this.modifiedPaths = modifiedPaths;
}
public List<ModifiedPath> getModifiedPaths() {
return this.modifiedPaths;
}
public void setRemovedPaths(List<Path> removedPaths) {
this.removedPaths = removedPaths;
}
public List<Path> getRemovedPaths() {
return this.removedPaths;
}
public void setPreviousSignature(byte[] previousSignature) {
this.previousSignature = previousSignature;
}
public byte[] getPreviousSignature() {
return this.previousSignature;
}
public void setPreviousHash(byte[] previousHash) {
this.previousHash = previousHash;
}
public byte[] getPreviousHash() {
return this.previousHash;
}
public void setCurrentHash(byte[] currentHash) {
this.currentHash = currentHash;
}
public byte[] getCurrentHash() {
return this.currentHash;
}
public int getFileDifferencesCount() {
return this.addedPaths.size() + this.modifiedPaths.size() + this.removedPaths.size();
}
}

View File

@ -0,0 +1,102 @@
package org.qortal.arbitrary.metadata;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.repository.DataException;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
/**
* ArbitraryDataQortalMetadata
*
* This is a base class to handle reading and writing JSON to a .qortal folder
* within the supplied filePath. This is used when storing data against an existing
* arbitrary data file structure.
*
* It is not usable on its own; it must be subclassed, with three methods overridden:
*
* fileName() - the file name to use within the .qortal folder
* readJson() - code to unserialize the JSON file
* buildJson() - code to serialize the JSON file
*
*/
public class ArbitraryDataQortalMetadata extends ArbitraryDataMetadata {
protected static final Logger LOGGER = LogManager.getLogger(ArbitraryDataQortalMetadata.class);
protected Path filePath;
protected Path qortalDirectoryPath;
protected String jsonString;
public ArbitraryDataQortalMetadata(Path filePath) {
super(filePath);
this.qortalDirectoryPath = Paths.get(filePath.toString(), ".qortal");
}
protected String fileName() {
// To be overridden
return null;
}
protected void readJson() throws DataException {
// To be overridden
}
protected void buildJson() {
// To be overridden
}
@Override
public void read() throws IOException, DataException {
this.loadJson();
this.readJson();
}
@Override
public void write() throws IOException, DataException {
this.buildJson();
this.createParentDirectories();
this.createQortalDirectory();
Path patchPath = Paths.get(this.qortalDirectoryPath.toString(), this.fileName());
BufferedWriter writer = new BufferedWriter(new FileWriter(patchPath.toString()));
writer.write(this.jsonString);
writer.newLine();
writer.close();
}
@Override
protected void loadJson() throws IOException {
Path path = Paths.get(this.qortalDirectoryPath.toString(), this.fileName());
File patchFile = new File(path.toString());
if (!patchFile.exists()) {
throw new IOException(String.format("Patch file doesn't exist: %s", path.toString()));
}
this.jsonString = new String(Files.readAllBytes(path));
}
protected void createQortalDirectory() throws DataException {
try {
Files.createDirectories(this.qortalDirectoryPath);
} catch (IOException e) {
throw new DataException("Unable to create .qortal directory");
}
}
public String getJsonString() {
return this.jsonString;
}
}

View File

@ -0,0 +1,78 @@
package org.qortal.arbitrary.metadata;
import org.json.JSONArray;
import org.json.JSONObject;
import org.qortal.repository.DataException;
import org.qortal.utils.Base58;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class ArbitraryDataTransactionMetadata extends ArbitraryDataMetadata {
private List<byte[]> chunks;
public ArbitraryDataTransactionMetadata(Path filePath) {
super(filePath);
}
@Override
protected void readJson() throws DataException {
if (this.jsonString == null) {
throw new DataException("Transaction metadata JSON string is null");
}
List<byte[]> chunksList = new ArrayList<>();
JSONObject cache = new JSONObject(this.jsonString);
if (cache.has("chunks")) {
JSONArray chunks = cache.getJSONArray("chunks");
if (chunks != null) {
for (int i=0; i<chunks.length(); i++) {
String chunk = chunks.getString(i);
if (chunk != null) {
chunksList.add(Base58.decode(chunk));
}
}
}
this.chunks = chunksList;
}
}
@Override
protected void buildJson() {
JSONObject outer = new JSONObject();
JSONArray chunks = new JSONArray();
if (this.chunks != null) {
for (byte[] chunk : this.chunks) {
chunks.put(Base58.encode(chunk));
}
}
outer.put("chunks", chunks);
this.jsonString = outer.toString(2);
LOGGER.trace("Transaction metadata: {}", this.jsonString);
}
public void setChunks(List<byte[]> chunks) {
this.chunks = chunks;
}
public List<byte[]> getChunks() {
return this.chunks;
}
public boolean containsChunk(byte[] chunk) {
for (byte[] c : this.chunks) {
if (Arrays.equals(c, chunk)) {
return true;
}
}
return false;
}
}

View File

@ -0,0 +1,131 @@
package org.qortal.arbitrary.misc;
import org.json.JSONObject;
import org.qortal.arbitrary.ArbitraryDataRenderer;
import org.qortal.transaction.Transaction;
import org.qortal.utils.FilesystemUtils;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import static java.util.Arrays.stream;
import static java.util.stream.Collectors.toMap;
public enum Service {
AUTO_UPDATE(1, false, null, null),
ARBITRARY_DATA(100, false, null, null),
WEBSITE(200, true, null, null) {
@Override
public ValidationResult validate(Path path) {
// Custom validation function to require an index HTML file in the root directory
List<String> fileNames = ArbitraryDataRenderer.indexFiles();
String[] files = path.toFile().list();
if (files != null) {
for (String file : files) {
Path fileName = Paths.get(file).getFileName();
if (fileName != null && fileNames.contains(fileName.toString())) {
return ValidationResult.OK;
}
}
}
return ValidationResult.MISSING_INDEX_FILE;
}
},
GIT_REPOSITORY(300, false, null, null),
IMAGE(400, true, 10*1024*1024L, null),
THUMBNAIL(410, true, 500*1024L, null),
VIDEO(500, false, null, null),
AUDIO(600, false, null, null),
BLOG(700, false, null, null),
BLOG_POST(777, false, null, null),
BLOG_COMMENT(778, false, null, null),
DOCUMENT(800, false, null, null),
LIST(900, true, null, null),
PLAYLIST(910, true, null, null),
APP(1000, false, null, null),
METADATA(1100, false, null, null),
QORTAL_METADATA(1111, true, 10*1024L, Arrays.asList("title", "description", "tags"));
public final int value;
private final boolean requiresValidation;
private final Long maxSize;
private final List<String> requiredKeys;
private static final Map<Integer, Service> map = stream(Service.values())
.collect(toMap(service -> service.value, service -> service));
Service(int value, boolean requiresValidation, Long maxSize, List<String> requiredKeys) {
this.value = value;
this.requiresValidation = requiresValidation;
this.maxSize = maxSize;
this.requiredKeys = requiredKeys;
}
public ValidationResult validate(Path path) throws IOException {
if (!this.isValidationRequired()) {
return ValidationResult.OK;
}
byte[] data = FilesystemUtils.getSingleFileContents(path);
long size = FilesystemUtils.getDirectorySize(path);
// Validate max size if needed
if (this.maxSize != null) {
if (size > this.maxSize) {
return ValidationResult.EXCEEDS_SIZE_LIMIT;
}
}
// Validate required keys if needed
if (this.requiredKeys != null) {
if (data == null) {
return ValidationResult.MISSING_KEYS;
}
JSONObject json = Service.toJsonObject(data);
for (String key : this.requiredKeys) {
if (!json.has(key)) {
return ValidationResult.MISSING_KEYS;
}
}
}
// Validation passed
return ValidationResult.OK;
}
public boolean isValidationRequired() {
return this.requiresValidation;
}
public static Service valueOf(int value) {
return map.get(value);
}
public static JSONObject toJsonObject(byte[] data) {
String dataString = new String(data);
return new JSONObject(dataString);
}
public enum ValidationResult {
OK(1),
MISSING_KEYS(2),
EXCEEDS_SIZE_LIMIT(3),
MISSING_INDEX_FILE(4);
public final int value;
private static final Map<Integer, Transaction.ValidationResult> map = stream(Transaction.ValidationResult.values()).collect(toMap(result -> result.value, result -> result));
ValidationResult(int value) {
this.value = value;
}
public static Transaction.ValidationResult valueOf(int value) {
return map.get(value);
}
}
}

View File

@ -0,0 +1,229 @@
package org.qortal.arbitrary.patch;
import com.github.difflib.DiffUtils;
import com.github.difflib.UnifiedDiffUtils;
import com.github.difflib.patch.Patch;
import com.github.difflib.patch.PatchFailedException;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.crypto.Crypto;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.utils.FilesystemUtils;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
public class UnifiedDiffPatch {
private static final Logger LOGGER = LogManager.getLogger(UnifiedDiffPatch.class);
private final Path before;
private final Path after;
private final Path destination;
private String identifier;
private Path validationPath;
public UnifiedDiffPatch(Path before, Path after, Path destination) {
this.before = before;
this.after = after;
this.destination = destination;
}
/**
* Create a patch based on the differences in path "after"
* compared with base path "before", outputting the patch
* to the "destination" path.
*
* @throws IOException
*/
public void create() throws IOException {
if (!Files.exists(before)) {
throw new IOException(String.format("File not found (before): %s", before.toString()));
}
if (!Files.exists(after)) {
throw new IOException(String.format("File not found (after): %s", after.toString()));
}
// Ensure parent folders exist in the destination
File file = new File(destination.toString());
File parent = file.getParentFile();
if (parent != null) {
parent.mkdirs();
}
// Delete an existing file if it exists
File destFile = destination.toFile();
if (destFile.exists() && destFile.isFile()) {
Files.delete(destination);
}
// Load the two files into memory
List<String> original = FileUtils.readLines(before.toFile(), StandardCharsets.UTF_8);
List<String> revised = FileUtils.readLines(after.toFile(), StandardCharsets.UTF_8);
// Check if the original file ends with a newline
boolean endsWithNewline = FilesystemUtils.fileEndsWithNewline(before);
// Generate diff information
Patch<String> diff = DiffUtils.diff(original, revised);
// Generate unified diff format
String originalFileName = before.getFileName().toString();
String revisedFileName = after.getFileName().toString();
List<String> unifiedDiff = UnifiedDiffUtils.generateUnifiedDiff(originalFileName, revisedFileName, original, diff, 0);
// Write the diff to the destination directory
FileWriter fileWriter = new FileWriter(destination.toString(), true);
BufferedWriter writer = new BufferedWriter(fileWriter);
for (int i=0; i<unifiedDiff.size(); i++) {
String line = unifiedDiff.get(i);
writer.append(line);
// Add a newline if this isn't the last line, or the original ended with a newline
if (i < unifiedDiff.size()-1 || endsWithNewline) {
writer.newLine();
}
}
writer.flush();
writer.close();
}
/**
* Validate the patch at the "destination" path to ensure
* it works correctly and is smaller than the original file
*
* @return true if valid, false if invalid
*/
public boolean isValid() throws DataException {
this.createRandomIdentifier();
this.createTempValidationDirectory();
// Merge the patch with the original path
Path tempPath = Paths.get(this.validationPath.toString(), this.identifier);
try {
UnifiedDiffPatch unifiedDiffPatch = new UnifiedDiffPatch(before, destination, tempPath);
unifiedDiffPatch.apply(null);
byte[] inputDigest = Crypto.digest(after.toFile());
byte[] outputDigest = Crypto.digest(tempPath.toFile());
if (Arrays.equals(inputDigest, outputDigest)) {
// Patch is valid, but we might want to reject if it's larger than the original file
long originalSize = Files.size(after);
long patchSize = Files.size(destination);
if (patchSize < originalSize) {
// Patch file is smaller than the original file size, so treat it as valid
return true;
}
}
else {
LOGGER.info("Checksum mismatch when verifying patch for file {}", destination.toString());
return false;
}
}
catch (IOException e) {
LOGGER.info("Failed to compute merge for file {}: {}", destination.toString(), e.getMessage());
}
finally {
try {
Files.delete(tempPath);
} catch (IOException e) {
// Not important - will be cleaned up later
}
}
return false;
}
/**
* Apply a patch at path "after" on top of base path "before",
* outputting the combined results to the "destination" path.
* If before and after are directories, a relative path suffix
* can be used to specify the file within these folder structures.
*
* @param pathSuffix - a file path to append to the base paths, or null if the base paths are already files
* @throws IOException
*/
public void apply(Path pathSuffix) throws IOException, DataException {
Path originalPath = this.before;
Path patchPath = this.after;
Path mergePath = this.destination;
// If a path has been supplied, we need to append it to the base paths
if (pathSuffix != null) {
originalPath = Paths.get(this.before.toString(), pathSuffix.toString());
patchPath = Paths.get(this.after.toString(), pathSuffix.toString());
mergePath = Paths.get(this.destination.toString(), pathSuffix.toString());
}
if (!patchPath.toFile().exists()) {
throw new DataException("Patch file doesn't exist, but its path was included in modifiedPaths");
}
// Delete an existing file, as we are starting from a duplicate of pathBefore
File destFile = mergePath.toFile();
if (destFile.exists() && destFile.isFile()) {
Files.delete(mergePath);
}
List<String> originalContents = FileUtils.readLines(originalPath.toFile(), StandardCharsets.UTF_8);
List<String> patchContents = FileUtils.readLines(patchPath.toFile(), StandardCharsets.UTF_8);
// Check if the patch file (and therefore the original file) ends with a newline
boolean endsWithNewline = FilesystemUtils.fileEndsWithNewline(patchPath);
// At first, parse the unified diff file and get the patch
Patch<String> patch = UnifiedDiffUtils.parseUnifiedDiff(patchContents);
// Then apply the computed patch to the given text
try {
List<String> patchedContents = DiffUtils.patch(originalContents, patch);
// Write the patched file to the merge directory
FileWriter fileWriter = new FileWriter(mergePath.toString(), true);
BufferedWriter writer = new BufferedWriter(fileWriter);
for (int i=0; i<patchedContents.size(); i++) {
String line = patchedContents.get(i);
writer.append(line);
// Add a newline if this isn't the last line, or the original ended with a newline
if (i < patchedContents.size()-1 || endsWithNewline) {
writer.newLine();
}
}
writer.flush();
writer.close();
} catch (PatchFailedException e) {
throw new DataException(String.format("Failed to apply patch for path %s: %s", pathSuffix, e.getMessage()));
}
}
private void createRandomIdentifier() {
this.identifier = UUID.randomUUID().toString();
}
private void createTempValidationDirectory() throws DataException {
// Use the user-specified temp dir, as it is deterministic, and is more likely to be located on reusable storage hardware
String baseDir = Settings.getInstance().getTempDataPath();
Path tempDir = Paths.get(baseDir, "diff", "validate");
try {
Files.createDirectories(tempDir);
} catch (IOException e) {
throw new DataException("Unable to create temp directory");
}
this.validationPath = tempDir;
}
}

View File

@ -476,6 +476,16 @@ public class Block {
return this.minter;
}
public void setRepository(Repository repository) throws DataException {
this.repository = repository;
for (Transaction transaction : this.getTransactions()) {
transaction.setRepository(repository);
}
}
// More information
/**
@ -524,8 +534,10 @@ public class Block {
long nonAtTransactionCount = transactionsData.stream().filter(transactionData -> transactionData.getType() != TransactionType.AT).count();
// The number of non-AT transactions fetched from repository should correspond with Block's transactionCount
if (nonAtTransactionCount != this.blockData.getTransactionCount())
if (nonAtTransactionCount != this.blockData.getTransactionCount()) {
LOGGER.error(() -> String.format("Block's transactions from repository (%d) do not match block's transaction count (%d)", nonAtTransactionCount, this.blockData.getTransactionCount()));
throw new IllegalStateException("Block's transactions from repository do not match block's transaction count");
}
this.transactions = new ArrayList<>();

View File

@ -1,91 +0,0 @@
package org.qortal.controller;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction.TransactionType;
public class ArbitraryDataManager extends Thread {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataManager.class);
private static final List<TransactionType> ARBITRARY_TX_TYPE = Arrays.asList(TransactionType.ARBITRARY);
private static ArbitraryDataManager instance;
private volatile boolean isStopping = false;
private ArbitraryDataManager() {
}
public static ArbitraryDataManager getInstance() {
if (instance == null)
instance = new ArbitraryDataManager();
return instance;
}
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Manager");
try {
while (!isStopping) {
Thread.sleep(2000);
// Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, ConfirmationStatus.BOTH, null, null, true);
if (signatures == null || signatures.isEmpty())
continue;
// Filter out those that already have local data
signatures.removeIf(signature -> hasLocalData(repository, signature));
if (signatures.isEmpty())
continue;
// Pick one at random
final int index = new Random().nextInt(signatures.size());
byte[] signature = signatures.get(index);
Controller.getInstance().fetchArbitraryData(signature);
} catch (DataException e) {
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
}
}
} catch (InterruptedException e) {
// Fall-through to exit thread...
}
}
public void shutdown() {
isStopping = true;
this.interrupt();
}
private boolean hasLocalData(final Repository repository, final byte[] signature) {
try {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (!(transactionData instanceof ArbitraryTransactionData))
return true;
ArbitraryTransaction arbitraryTransaction = new ArbitraryTransaction(repository, transactionData);
return arbitraryTransaction.isDataLocal();
} catch (DataException e) {
LOGGER.error("Repository issue when checking arbitrary transaction's data is local", e);
return true;
}
}
}

View File

@ -17,7 +17,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
@ -41,18 +40,21 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.bouncycastle.jsse.provider.BouncyCastleJsseProvider;
import com.google.common.primitives.Longs;
import org.qortal.account.Account;
import org.qortal.account.PrivateKeyAccount;
import org.qortal.account.PublicKeyAccount;
import org.qortal.api.ApiService;
import org.qortal.api.DomainMapService;
import org.qortal.api.GatewayService;
import org.qortal.block.Block;
import org.qortal.block.BlockChain;
import org.qortal.block.BlockChain.BlockTimingByHeight;
import org.qortal.controller.arbitrary.*;
import org.qortal.controller.Synchronizer.SynchronizationResult;
import org.qortal.controller.repository.PruneManager;
import org.qortal.controller.repository.NamesDatabaseIntegrityCheck;
import org.qortal.controller.tradebot.TradeBot;
import org.qortal.crypto.Crypto;
import org.qortal.data.account.MintingAccountData;
import org.qortal.data.account.RewardShareData;
import org.qortal.data.block.BlockData;
@ -60,45 +62,24 @@ import org.qortal.data.block.BlockSummaryData;
import org.qortal.data.network.OnlineAccountData;
import org.qortal.data.network.PeerChainTipData;
import org.qortal.data.network.PeerData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.ChatTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.DataType;
import org.qortal.event.Event;
import org.qortal.event.EventBus;
import org.qortal.data.transaction.ChatTransactionData;
import org.qortal.globalization.Translator;
import org.qortal.gui.Gui;
import org.qortal.gui.SysTray;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.ArbitraryDataMessage;
import org.qortal.network.message.BlockSummariesMessage;
import org.qortal.network.message.CachedBlockMessage;
import org.qortal.network.message.GetArbitraryDataMessage;
import org.qortal.network.message.GetBlockMessage;
import org.qortal.network.message.GetBlockSummariesMessage;
import org.qortal.network.message.GetOnlineAccountsMessage;
import org.qortal.network.message.GetPeersMessage;
import org.qortal.network.message.GetSignaturesV2Message;
import org.qortal.network.message.GetTransactionMessage;
import org.qortal.network.message.GetUnconfirmedTransactionsMessage;
import org.qortal.network.message.HeightV2Message;
import org.qortal.network.message.Message;
import org.qortal.network.message.OnlineAccountsMessage;
import org.qortal.network.message.SignaturesMessage;
import org.qortal.network.message.TransactionMessage;
import org.qortal.network.message.TransactionSignaturesMessage;
import org.qortal.network.message.*;
import org.qortal.repository.*;
import org.qortal.repository.hsqldb.HSQLDBRepositoryFactory;
import org.qortal.settings.Settings;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction;
import org.qortal.transaction.Transaction.TransactionType;
import org.qortal.transaction.Transaction.ValidationResult;
import org.qortal.utils.*;
import com.google.common.primitives.Longs;
public class Controller extends Thread {
static {
@ -109,14 +90,13 @@ public class Controller extends Thread {
/** Controller start-up time (ms) taken using <tt>System.currentTimeMillis()</tt>. */
public static final long startTime = System.currentTimeMillis();
public static final String VERSION_PREFIX = "qortal-";
public static final String VERSION_PREFIX = "qortaldata-";
private static final Logger LOGGER = LogManager.getLogger(Controller.class);
private static final long MISBEHAVIOUR_COOLOFF = 10 * 60 * 1000L; // ms
private static final int MAX_BLOCKCHAIN_TIP_AGE = 5; // blocks
private static final Object shutdownLock = new Object();
private static final String repositoryUrlTemplate = "jdbc:hsqldb:file:%s" + File.separator + "blockchain;create=true;hsqldb.full_log_replay=true";
private static final long ARBITRARY_REQUEST_TIMEOUT = 5 * 1000L; // ms
private static final long NTP_PRE_SYNC_CHECK_PERIOD = 5 * 1000L; // ms
private static final long NTP_POST_SYNC_CHECK_PERIOD = 5 * 60 * 1000L; // ms
private static final long DELETE_EXPIRED_INTERVAL = 5 * 60 * 1000L; // ms
@ -181,25 +161,6 @@ public class Controller extends Thread {
private boolean peersAvailable = true; // peersAvailable must default to true
private long timePeersLastAvailable = 0;
/**
* Map of recent requests for ARBITRARY transaction data payloads.
* <p>
* Key is original request's message ID<br>
* Value is Triple&lt;transaction signature in base58, first requesting peer, first request's timestamp&gt;
* <p>
* If peer is null then either:<br>
* <ul>
* <li>we are the original requesting peer</li>
* <li>we have already sent data payload to original requesting peer.</li>
* </ul>
* If signature is null then we have already received the data payload and either:<br>
* <ul>
* <li>we are the original requesting peer and have saved it locally</li>
* <li>we have forwarded the data payload (and maybe also saved it locally)</li>
* </ul>
*/
private Map<Integer, Triple<String, Peer, Long>> arbitraryDataRequests = Collections.synchronizedMap(new HashMap<>());
/** Lock for only allowing one blockchain-modifying codepath at a time. e.g. synchronization or newly minted block. */
private final ReentrantLock blockchainLock = new ReentrantLock();
@ -242,12 +203,30 @@ public class Controller extends Thread {
}
public GetBlockSignaturesV2Stats getBlockSignaturesV2Stats = new GetBlockSignaturesV2Stats();
public static class GetArbitraryDataFileMessageStats {
public AtomicLong requests = new AtomicLong();
public AtomicLong unknownFiles = new AtomicLong();
public GetArbitraryDataFileMessageStats() {
}
}
public GetArbitraryDataFileMessageStats getArbitraryDataFileMessageStats = new GetArbitraryDataFileMessageStats();
public static class GetArbitraryDataFileListMessageStats {
public AtomicLong requests = new AtomicLong();
public AtomicLong unknownFiles = new AtomicLong();
public GetArbitraryDataFileListMessageStats() {
}
}
public GetArbitraryDataFileListMessageStats getArbitraryDataFileListMessageStats = new GetArbitraryDataFileListMessageStats();
public AtomicLong latestBlocksCacheRefills = new AtomicLong();
public StatsSnapshot() {
}
}
private final StatsSnapshot stats = new StatsSnapshot();
public final StatsSnapshot stats = new StatsSnapshot();
// Constructors
@ -387,6 +366,8 @@ public class Controller extends Thread {
// Entry point
public static void main(String[] args) {
LoggingUtils.fixLegacyLog4j2Properties();
LOGGER.info("Starting up...");
// Potential GUI startup with splash screen, etc.
@ -493,9 +474,13 @@ public class Controller extends Thread {
LOGGER.info("Starting trade-bot");
TradeBot.getInstance();
// Arbitrary transaction data manager
// LOGGER.info("Starting arbitrary-transaction data manager");
// ArbitraryDataManager.getInstance().start();
// Arbitrary data controllers
LOGGER.info("Starting arbitrary-transaction controllers");
ArbitraryDataManager.getInstance().start();
ArbitraryDataBuildManager.getInstance().start();
ArbitraryDataCleanupManager.getInstance().start();
ArbitraryDataStorageManager.getInstance().start();
ArbitraryDataRenderManager.getInstance().start();
// Auto-update service?
if (Settings.getInstance().isAutoUpdateEnabled()) {
@ -514,6 +499,32 @@ public class Controller extends Thread {
return; // Not System.exit() so that GUI can display error
}
if (Settings.getInstance().isGatewayEnabled()) {
LOGGER.info(String.format("Starting gateway service on port %d", Settings.getInstance().getGatewayPort()));
try {
GatewayService gatewayService = GatewayService.getInstance();
gatewayService.start();
} catch (Exception e) {
LOGGER.error("Unable to start gateway service", e);
Controller.getInstance().shutdown();
Gui.getInstance().fatalError("Gateway service failure", e);
return; // Not System.exit() so that GUI can display error
}
}
if (Settings.getInstance().isDomainMapEnabled()) {
LOGGER.info(String.format("Starting domain map service on port %d", Settings.getInstance().getDomainMapPort()));
try {
DomainMapService domainMapService = DomainMapService.getInstance();
domainMapService.start();
} catch (Exception e) {
LOGGER.error("Unable to start domain map service", e);
Controller.getInstance().shutdown();
Gui.getInstance().fatalError("Domain map service failure", e);
return; // Not System.exit() so that GUI can display error
}
}
// If GUI is enabled, we're no longer starting up but actually running now
Gui.getInstance().notifyRunning();
}
@ -574,8 +585,9 @@ public class Controller extends Thread {
}
// Clean up arbitrary data request cache
final long requestMinimumTimestamp = now - ARBITRARY_REQUEST_TIMEOUT;
arbitraryDataRequests.entrySet().removeIf(entry -> entry.getValue().getC() < requestMinimumTimestamp);
ArbitraryDataManager.getInstance().cleanupRequestCache(now);
// Clean up arbitrary data queues and lists
ArbitraryDataBuildManager.getInstance().cleanupQueues(now);
// Time to 'checkpoint' uncommitted repository writes?
if (now >= repositoryCheckpointTimestamp + repositoryCheckpointInterval) {
@ -1054,9 +1066,13 @@ public class Controller extends Thread {
AutoUpdate.getInstance().shutdown();
}
// Arbitrary transaction data manager
// LOGGER.info("Shutting down arbitrary-transaction data manager");
// ArbitraryDataManager.getInstance().shutdown();
// Arbitrary data controllers
LOGGER.info("Shutting down arbitrary-transaction controllers");
ArbitraryDataManager.getInstance().shutdown();
ArbitraryDataBuildManager.getInstance().shutdown();
ArbitraryDataCleanupManager.getInstance().shutdown();
ArbitraryDataStorageManager.getInstance().shutdown();
ArbitraryDataRenderManager.getInstance().shutdown();
if (blockMinter != null) {
LOGGER.info("Shutting down block minter");
@ -1352,14 +1368,6 @@ public class Controller extends Thread {
onNetworkTransactionSignaturesMessage(peer, message);
break;
case GET_ARBITRARY_DATA:
onNetworkGetArbitraryDataMessage(peer, message);
break;
case ARBITRARY_DATA:
onNetworkArbitraryDataMessage(peer, message);
break;
case GET_ONLINE_ACCOUNTS:
onNetworkGetOnlineAccountsMessage(peer, message);
break;
@ -1368,6 +1376,26 @@ public class Controller extends Thread {
onNetworkOnlineAccountsMessage(peer, message);
break;
case GET_ARBITRARY_DATA:
// Not currently supported
break;
case ARBITRARY_DATA_FILE_LIST:
ArbitraryDataFileListManager.getInstance().onNetworkArbitraryDataFileListMessage(peer, message);
break;
case GET_ARBITRARY_DATA_FILE:
ArbitraryDataFileManager.getInstance().onNetworkGetArbitraryDataFileMessage(peer, message);
break;
case GET_ARBITRARY_DATA_FILE_LIST:
ArbitraryDataFileListManager.getInstance().onNetworkGetArbitraryDataFileListMessage(peer, message);
break;
case ARBITRARY_SIGNATURES:
ArbitraryDataManager.getInstance().onNetworkArbitrarySignaturesMessage(peer, message);
break;
default:
LOGGER.debug(() -> String.format("Unhandled %s message [ID %d] from peer %s", message.getType().name(), message.getId(), peer));
break;
@ -1733,103 +1761,6 @@ public class Controller extends Thread {
}
}
private void onNetworkGetArbitraryDataMessage(Peer peer, Message message) {
GetArbitraryDataMessage getArbitraryDataMessage = (GetArbitraryDataMessage) message;
byte[] signature = getArbitraryDataMessage.getSignature();
String signature58 = Base58.encode(signature);
Long timestamp = NTP.getTime();
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, timestamp);
// If we've seen this request recently, then ignore
if (arbitraryDataRequests.putIfAbsent(message.getId(), newEntry) != null)
return;
// Do we even have this transaction?
try (final Repository repository = RepositoryManager.getRepository()) {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (transactionData == null || transactionData.getType() != TransactionType.ARBITRARY)
return;
ArbitraryTransaction transaction = new ArbitraryTransaction(repository, transactionData);
// If we have the data then send it
if (transaction.isDataLocal()) {
byte[] data = transaction.fetchData();
if (data == null)
return;
// Update requests map to reflect that we've sent it
newEntry = new Triple<>(signature58, null, timestamp);
arbitraryDataRequests.put(message.getId(), newEntry);
Message arbitraryDataMessage = new ArbitraryDataMessage(signature, data);
arbitraryDataMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataMessage))
peer.disconnect("failed to send arbitrary data");
return;
}
// Ask our other peers if they have it
Network.getInstance().broadcast(broadcastPeer -> broadcastPeer == peer ? null : message);
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data for peer %s", peer), e);
}
}
private void onNetworkArbitraryDataMessage(Peer peer, Message message) {
ArbitraryDataMessage arbitraryDataMessage = (ArbitraryDataMessage) message;
// Do we have a pending request for this data?
Triple<String, Peer, Long> request = arbitraryDataRequests.get(message.getId());
if (request == null || request.getA() == null)
return;
// Does this message's signature match what we're expecting?
byte[] signature = arbitraryDataMessage.getSignature();
String signature58 = Base58.encode(signature);
if (!request.getA().equals(signature58))
return;
byte[] data = arbitraryDataMessage.getData();
// Check transaction exists and payload hash is correct
try (final Repository repository = RepositoryManager.getRepository()) {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (!(transactionData instanceof ArbitraryTransactionData))
return;
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
byte[] actualHash = Crypto.digest(data);
// "data" from repository will always be hash of actual raw data
if (!Arrays.equals(arbitraryTransactionData.getData(), actualHash))
return;
// Update requests map to reflect that we've received it
Triple<String, Peer, Long> newEntry = new Triple<>(null, null, request.getC());
arbitraryDataRequests.put(message.getId(), newEntry);
// Save payload locally
// TODO: storage policy
arbitraryTransactionData.setDataType(DataType.RAW_DATA);
arbitraryTransactionData.setData(data);
repository.getArbitraryRepository().save(arbitraryTransactionData);
repository.saveChanges();
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data for peer %s", peer), e);
}
Peer requestingPeer = request.getB();
if (requestingPeer != null) {
// Forward to requesting peer;
if (!requestingPeer.sendMessage(arbitraryDataMessage))
requestingPeer.disconnect("failed to forward arbitrary data");
}
}
private void onNetworkGetOnlineAccountsMessage(Peer peer, Message message) {
GetOnlineAccountsMessage getOnlineAccountsMessage = (GetOnlineAccountsMessage) message;
@ -2124,51 +2055,6 @@ public class Controller extends Thread {
}
}
public byte[] fetchArbitraryData(byte[] signature) throws InterruptedException {
// Build request
Message getArbitraryDataMessage = new GetArbitraryDataMessage(signature);
// Save our request into requests map
String signature58 = Base58.encode(signature);
Triple<String, Peer, Long> requestEntry = new Triple<>(signature58, null, NTP.getTime());
// Assign random ID to this message
int id;
do {
id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1;
// Put queue into map (keyed by message ID) so we can poll for a response
// If putIfAbsent() doesn't return null, then this ID is already taken
} while (arbitraryDataRequests.put(id, requestEntry) != null);
getArbitraryDataMessage.setId(id);
// Broadcast request
Network.getInstance().broadcast(peer -> getArbitraryDataMessage);
// Poll to see if data has arrived
final long singleWait = 100;
long totalWait = 0;
while (totalWait < ARBITRARY_REQUEST_TIMEOUT) {
Thread.sleep(singleWait);
requestEntry = arbitraryDataRequests.get(id);
if (requestEntry == null)
return null;
if (requestEntry.getA() == null)
break;
totalWait += singleWait;
}
try (final Repository repository = RepositoryManager.getRepository()) {
return repository.getArbitraryRepository().fetchData(signature);
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while fetching arbitrary transaction data"), e);
return null;
}
}
/** Returns a list of peers that are not misbehaving, and have a recent block. */
public List<Peer> getRecentBehavingPeers() {
final Long minLatestBlockTimestamp = getMinimumLatestBlockTimestamp();

View File

@ -37,12 +37,14 @@ import org.qortal.transaction.Transaction;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import static org.qortal.network.Peer.FETCH_BLOCKS_TIMEOUT;
public class Synchronizer {
private static final Logger LOGGER = LogManager.getLogger(Synchronizer.class);
/** Max number of new blocks we aim to add to chain tip in each sync round */
private static final int SYNC_BATCH_SIZE = 200; // XXX move to Settings?
private static final int SYNC_BATCH_SIZE = 1000; // XXX move to Settings?
/** Initial jump back of block height when searching for common block with peer */
private static final int INITIAL_BLOCK_STEP = 8;
@ -56,6 +58,8 @@ public class Synchronizer {
private static final int MAXIMUM_REQUEST_SIZE = 200; // XXX move to Settings?
// Keep track of the size of the last re-org, so it can be logged
private int lastReorgSize;
@ -585,16 +589,7 @@ public class Synchronizer {
String syncString = String.format("Synchronizing with peer %s at height %d, sig %.8s, ts %d; our height %d, sig %.8s, ts %d", peer,
peerHeight, Base58.encode(peersLastBlockSignature), peer.getChainTipData().getLastBlockTimestamp(),
ourInitialHeight, Base58.encode(ourLastBlockSignature), ourLatestBlockData.getTimestamp());
// If our latest block is very old, we should log that we're attempting to sync with a peer
// Otherwise, it can appear as though nothing is happening for a while after launch
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (minLatestBlockTimestamp != null && ourLatestBlockData.getTimestamp() < minLatestBlockTimestamp) {
LOGGER.info(syncString);
}
else {
LOGGER.debug(syncString);
}
LOGGER.info(syncString);
// Reset last re-org size as we are starting a new sync round
this.lastReorgSize = 0;
@ -872,7 +867,7 @@ public class Synchronizer {
}
private SynchronizationResult syncToPeerChain(Repository repository, BlockData commonBlockData, int ourInitialHeight,
Peer peer, final int peerHeight, List<BlockSummaryData> peerBlockSummaries) throws DataException, InterruptedException {
Peer peer, final int peerHeight, List<BlockSummaryData> peerBlockSummaries) throws DataException, InterruptedException {
final int commonBlockHeight = commonBlockData.getHeight();
final byte[] commonBlockSig = commonBlockData.getSignature();
String commonBlockSig58 = Base58.encode(commonBlockSig);
@ -902,19 +897,19 @@ public class Synchronizer {
if (Controller.isStopping())
return SynchronizationResult.SHUTTING_DOWN;
// Ensure we don't request more than MAXIMUM_REQUEST_SIZE
int numberRequested = Math.min(numberSignaturesRequired, MAXIMUM_REQUEST_SIZE);
// Ensure we don't request more than MAXIMUM_REQUEST_SIZE
int numberRequested = Math.min(numberSignaturesRequired, MAXIMUM_REQUEST_SIZE);
// Do we need more signatures?
// Do we need more signatures?
if (peerBlockSignatures.isEmpty() && numberRequested > 0) {
LOGGER.trace(String.format("Requesting %d signature%s after height %d, sig %.8s",
numberRequested, (numberRequested != 1 ? "s" : ""), height, Base58.encode(latestPeerSignature)));
LOGGER.trace(String.format("Requesting %d signature%s after height %d, sig %.8s",
numberRequested, (numberRequested != 1 ? "s" : ""), height, Base58.encode(latestPeerSignature)));
peerBlockSignatures = this.getBlockSignatures(peer, latestPeerSignature, numberRequested);
peerBlockSignatures = this.getBlockSignatures(peer, latestPeerSignature, numberRequested);
if (peerBlockSignatures == null || peerBlockSignatures.isEmpty()) {
LOGGER.info(String.format("Peer %s failed to respond with more block signatures after height %d, sig %.8s", peer,
height, Base58.encode(latestPeerSignature)));
if (peerBlockSignatures == null || peerBlockSignatures.isEmpty()) {
LOGGER.info(String.format("Peer %s failed to respond with more block signatures after height %d, sig %.8s", peer,
height, Base58.encode(latestPeerSignature)));
// Clear our cache of common block summaries for this peer, as they are likely to be invalid
CommonBlockData cachedCommonBlockData = peer.getCommonBlockData();
@ -924,7 +919,7 @@ public class Synchronizer {
// If we have already received newer blocks from this peer that what we have already, go ahead and apply them
if (peerBlocks.size() > 0) {
final BlockData ourLatestBlockData = repository.getBlockRepository().getLastBlock();
final Block peerLatestBlock = peerBlocks.get(peerBlocks.size() - 1);
final Block peerLatestBlock = peerBlocks.get(peerBlocks.size() - 1);
final Long minLatestBlockTimestamp = Controller.getMinimumLatestBlockTimestamp();
if (ourLatestBlockData != null && peerLatestBlock != null && minLatestBlockTimestamp != null) {
@ -947,8 +942,8 @@ public class Synchronizer {
return SynchronizationResult.NO_REPLY;
}
numberSignaturesRequired = peerHeight - height - peerBlockSignatures.size();
LOGGER.trace(String.format("Received %s signature%s", peerBlockSignatures.size(), (peerBlockSignatures.size() != 1 ? "s" : "")));
numberSignaturesRequired = peerHeight - height - peerBlockSignatures.size();
LOGGER.trace(String.format("Received %s signature%s", peerBlockSignatures.size(), (peerBlockSignatures.size() != 1 ? "s" : "")));
}
if (peerBlockSignatures.isEmpty()) {
@ -1098,7 +1093,7 @@ public class Synchronizer {
}
private SynchronizationResult applyNewBlocks(Repository repository, BlockData commonBlockData, int ourInitialHeight,
Peer peer, int peerHeight, List<BlockSummaryData> peerBlockSummaries) throws InterruptedException, DataException {
Peer peer, int peerHeight, List<BlockSummaryData> peerBlockSummaries) throws InterruptedException, DataException {
LOGGER.debug(String.format("Fetching new blocks from peer %s", peer));
final int commonBlockHeight = commonBlockData.getHeight();

View File

@ -0,0 +1,185 @@
package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataBuildQueueItem;
import org.qortal.utils.NTP;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class ArbitraryDataBuildManager extends Thread {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataBuildManager.class);
private static ArbitraryDataBuildManager instance;
private volatile boolean isStopping = false;
private boolean buildInProgress = false;
/**
* Map to keep track of arbitrary transaction resources currently being built (or queued).
*/
public Map<String, ArbitraryDataBuildQueueItem> arbitraryDataBuildQueue = Collections.synchronizedMap(new HashMap<>());
/**
* Map to keep track of failed arbitrary transaction builds.
*/
public Map<String, ArbitraryDataBuildQueueItem> arbitraryDataFailedBuilds = Collections.synchronizedMap(new HashMap<>());
public ArbitraryDataBuildManager() {
}
@Override
public void run() {
try {
// Use a fixed thread pool to execute the arbitrary data build actions (currently just a single thread)
// This can be expanded to have multiple threads processing the build queue when needed
ExecutorService arbitraryDataBuildExecutor = Executors.newFixedThreadPool(1);
arbitraryDataBuildExecutor.execute(new ArbitraryDataBuilderThread());
while (!isStopping) {
// Nothing to do yet
Thread.sleep(5000);
}
} catch (InterruptedException e) {
// Fall-through to exit thread...
}
}
public static ArbitraryDataBuildManager getInstance() {
if (instance == null)
instance = new ArbitraryDataBuildManager();
return instance;
}
public void shutdown() {
isStopping = true;
this.interrupt();
}
public void cleanupQueues(Long now) {
if (now == null) {
return;
}
arbitraryDataBuildQueue.entrySet().removeIf(entry -> entry.getValue().hasReachedBuildTimeout(now));
arbitraryDataFailedBuilds.entrySet().removeIf(entry -> entry.getValue().hasReachedFailureTimeout(now));
}
// Build queue
public boolean addToBuildQueue(ArbitraryDataBuildQueueItem queueItem) {
String key = queueItem.getUniqueKey();
if (key == null) {
return false;
}
if (this.arbitraryDataBuildQueue == null) {
return false;
}
if (NTP.getTime() == null) {
// Can't use queues until we have synced the time
return false;
}
// Don't add builds that have failed recently
if (this.isInFailedBuildsList(queueItem)) {
return false;
}
if (this.arbitraryDataBuildQueue.put(key, queueItem) != null) {
// Already in queue
return true;
}
LOGGER.info("Added {} to build queue", queueItem);
// Added to queue
return true;
}
public boolean isInBuildQueue(ArbitraryDataBuildQueueItem queueItem) {
String key = queueItem.getUniqueKey();
if (key == null) {
return false;
}
if (this.arbitraryDataBuildQueue == null) {
return false;
}
if (this.arbitraryDataBuildQueue.containsKey(key)) {
// Already in queue
return true;
}
// Not in queue
return false;
}
// Failed builds
public boolean addToFailedBuildsList(ArbitraryDataBuildQueueItem queueItem) {
String key = queueItem.getUniqueKey();
if (key == null) {
return false;
}
if (this.arbitraryDataFailedBuilds == null) {
return false;
}
if (NTP.getTime() == null) {
// Can't use queues until we have synced the time
return false;
}
if (this.arbitraryDataFailedBuilds.put(key, queueItem) != null) {
// Already in list
return true;
}
LOGGER.info("Added {} to failed builds list", queueItem);
// Added to queue
return true;
}
public boolean isInFailedBuildsList(ArbitraryDataBuildQueueItem queueItem) {
String key = queueItem.getUniqueKey();
if (key == null) {
return false;
}
if (this.arbitraryDataFailedBuilds == null) {
return false;
}
if (this.arbitraryDataFailedBuilds.containsKey(key)) {
// Already in list
return true;
}
// Not in list
return false;
}
public void setBuildInProgress(boolean buildInProgress) {
this.buildInProgress = buildInProgress;
}
public boolean getBuildInProgress() {
return this.buildInProgress;
}
}

View File

@ -0,0 +1,98 @@
package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataBuildQueueItem;
import org.qortal.arbitrary.exception.MissingDataException;
import org.qortal.controller.Controller;
import org.qortal.repository.DataException;
import org.qortal.utils.NTP;
import java.io.IOException;
import java.util.Map;
public class ArbitraryDataBuilderThread implements Runnable {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataBuilderThread.class);
public ArbitraryDataBuilderThread() {
}
public void run() {
Thread.currentThread().setName("Arbitrary Data Build Manager");
ArbitraryDataBuildManager buildManager = ArbitraryDataBuildManager.getInstance();
while (!Controller.isStopping()) {
try {
Thread.sleep(1000);
if (buildManager.arbitraryDataBuildQueue == null) {
continue;
}
if (buildManager.arbitraryDataBuildQueue.isEmpty()) {
continue;
}
// Find resources that are queued for building
Map.Entry<String, ArbitraryDataBuildQueueItem> next = buildManager.arbitraryDataBuildQueue
.entrySet().stream()
.filter(e -> e.getValue().isQueued())
.findFirst().get();
if (next == null) {
continue;
}
Long now = NTP.getTime();
if (now == null) {
continue;
}
ArbitraryDataBuildQueueItem queueItem = next.getValue();
if (queueItem == null) {
this.removeFromQueue(queueItem);
}
// Ignore builds that have failed recently
if (buildManager.isInFailedBuildsList(queueItem)) {
continue;
}
try {
// Perform the build
LOGGER.info("Building {}...", queueItem);
queueItem.build();
this.removeFromQueue(queueItem);
LOGGER.info("Finished building {}", queueItem);
} catch (MissingDataException e) {
LOGGER.info("Missing data for {}: {}", queueItem, e.getMessage());
queueItem.setFailed(true);
this.removeFromQueue(queueItem);
// Don't add to the failed builds list, as we may want to retry sooner
} catch (IOException | DataException | RuntimeException e) {
LOGGER.info("Error building {}: {}", queueItem, e.getMessage());
// Something went wrong - so remove it from the queue, and add to failed builds list
queueItem.setFailed(true);
buildManager.addToFailedBuildsList(queueItem);
this.removeFromQueue(queueItem);
}
} catch (InterruptedException e) {
// Time to exit
}
}
}
private void removeFromQueue(ArbitraryDataBuildQueueItem queueItem) {
if (queueItem == null || queueItem.getUniqueKey() == null) {
return;
}
ArbitraryDataBuildManager.getInstance().arbitraryDataBuildQueue.remove(queueItem.getUniqueKey());
}
}

View File

@ -0,0 +1,544 @@
package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
import org.qortal.transaction.Transaction.TransactionType;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.SecureRandom;
import java.util.*;
import static org.qortal.controller.arbitrary.ArbitraryDataStorageManager.DELETION_THRESHOLD;
public class ArbitraryDataCleanupManager extends Thread {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataCleanupManager.class);
private static final List<TransactionType> ARBITRARY_TX_TYPE = Arrays.asList(TransactionType.ARBITRARY);
private static ArbitraryDataCleanupManager instance;
private volatile boolean isStopping = false;
/**
* The amount of time that must pass before a file is treated as stale / not recent.
* We can safely delete files created/accessed longer ago that this, if we have a means of
* rebuilding them. The main purpose of this is to avoid deleting files that are currently
* being used by other parts of the system.
*/
private static final long STALE_FILE_TIMEOUT = 60*60*1000L; // 1 hour
/**
* The number of chunks to delete in a batch when over the capacity limit.
* Storage limits are re-checked after each batch, and there could be a significant
* delay between the processing of each batch as it only occurs after a complete
* cleanup cycle (to allow unwanted chunks to be deleted first).
*/
private static final int CHUNK_DELETION_BATCH_SIZE = 10;
/*
TODO:
- Delete files from the _misc folder once they reach a certain age
*/
private ArbitraryDataCleanupManager() {
}
public static ArbitraryDataCleanupManager getInstance() {
if (instance == null)
instance = new ArbitraryDataCleanupManager();
return instance;
}
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Cleanup Manager");
// Paginate queries when fetching arbitrary transactions
final int limit = 100;
int offset = 0;
try {
while (!isStopping) {
Thread.sleep(30000);
// Don't run if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
Thread.sleep(60 * 60 * 1000L);
continue;
}
Long now = NTP.getTime();
if (now == null) {
// Don't attempt to make decisions if we haven't synced our time yet
continue;
}
ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance();
// Wait until storage capacity has been calculated
if (!storageManager.isStorageCapacityCalculated()) {
continue;
}
// Periodically delete any unnecessary files from the temp directory
if (offset == 0 || offset % (limit * 10) == 0) {
this.cleanupTempDirectory(now);
}
// Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, null, null, ConfirmationStatus.BOTH, limit, offset, true);
// LOGGER.info("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
if (signatures == null || signatures.isEmpty()) {
offset = 0;
continue;
}
offset += limit;
now = NTP.getTime();
// Loop through the signatures in this batch
for (int i=0; i<signatures.size(); i++) {
byte[] signature = signatures.get(i);
if (signature == null) {
continue;
}
// Don't interfere with the filesystem whilst a build is in progress
if (ArbitraryDataBuildManager.getInstance().getBuildInProgress()) {
Thread.sleep(5000);
}
// Fetch the transaction data
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
if (arbitraryTransactionData == null) {
continue;
}
// Raw data doesn't have any associated files to clean up
if (arbitraryTransactionData.getDataType() == ArbitraryTransactionData.DataType.RAW_DATA) {
continue;
}
// Check if we have the complete file
boolean completeFileExists = ArbitraryTransactionUtils.completeFileExists(arbitraryTransactionData);
// Check if we have any of the chunks
boolean anyChunksExist = ArbitraryTransactionUtils.anyChunksExist(arbitraryTransactionData);
boolean transactionHasChunks = (arbitraryTransactionData.getMetadataHash() != null);
if (!completeFileExists && !anyChunksExist) {
// We don't have any files at all for this transaction - nothing to do
continue;
}
// We have at least 1 chunk or file for this transaction, so we might need to delete them...
// Check to see if we should be hosting data for this transaction at all
if (!storageManager.canStoreData(arbitraryTransactionData)) {
LOGGER.info("Deleting transaction {} because we can't host its data",
Base58.encode(arbitraryTransactionData.getSignature()));
ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData);
continue;
}
// Check to see if we have had a more recent PUT
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
if (hasMoreRecentPutTransaction) {
// There is a more recent PUT transaction than the one we are currently processing.
// When a PUT is issued, it replaces any layers that would have been there before.
// Therefore any data relating to this older transaction is no longer needed.
LOGGER.info(String.format("Newer PUT found for %s %s since transaction %s. " +
"Deleting all files associated with the earlier transaction.", arbitraryTransactionData.getService(),
arbitraryTransactionData.getName(), Base58.encode(signature)));
ArbitraryTransactionUtils.deleteCompleteFileAndChunks(arbitraryTransactionData);
// We should also remove peers for this transaction from the lookup table to save space
this.removePeersHostingTransactionData(repository, arbitraryTransactionData);
continue;
}
if (completeFileExists && !transactionHasChunks) {
// This file doesn't have any chunks because it is too small.
// We must not delete anything.
continue;
}
// Check if we have all of the chunks
boolean allChunksExist = ArbitraryTransactionUtils.allChunksExist(arbitraryTransactionData);
if (completeFileExists && allChunksExist) {
// We have the complete file and all the chunks, so we can delete
// the complete file if it has reached a certain age.
LOGGER.debug(String.format("Transaction %s has complete file and all chunks",
Base58.encode(arbitraryTransactionData.getSignature())));
ArbitraryTransactionUtils.deleteCompleteFile(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
continue;
}
if (completeFileExists && !allChunksExist) {
// We have the complete file but not the chunks, so let's convert it
LOGGER.info(String.format("Transaction %s has complete file but no chunks",
Base58.encode(arbitraryTransactionData.getSignature())));
ArbitraryTransactionUtils.convertFileToChunks(arbitraryTransactionData, now, STALE_FILE_TIMEOUT);
continue;
}
}
} catch (DataException e) {
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
}
try (final Repository repository = RepositoryManager.getRepository()) {
// Check if there are any hosted files that don't have matching transactions
this.checkForExpiredTransactions(repository);
// Delete additional data at random if we're over our storage limit
// Use the DELETION_THRESHOLD so that we only start deleting once the hard limit is reached
// This also allows some headroom between the regular threshold (90%) and the hard
// limit, to avoid data getting into a fetch/delete loop.
if (!storageManager.isStorageSpaceAvailable(DELETION_THRESHOLD)) {
// Rate limit, to avoid repeated calls to calculateDirectorySize()
Thread.sleep(60000);
// Now delete some data at random
this.storageLimitReached(repository);
}
// Delete random data associated with name if we're over our storage limit for this name
// Use the DELETION_THRESHOLD, for the same reasons as above
for (String followedName : storageManager.followedNames()) {
if (!storageManager.isStorageSpaceAvailableForName(repository, followedName, DELETION_THRESHOLD)) {
this.storageLimitReachedForName(repository, followedName);
}
}
} catch (DataException e) {
LOGGER.error("Repository issue when cleaning up arbitrary transaction data", e);
}
}
} catch (InterruptedException e) {
// Fall-through to exit thread...
}
}
public List<Path> findPathsWithNoAssociatedTransaction(Repository repository) {
List<Path> pathList = new ArrayList<>();
// Find all hosted paths
List<Path> allPaths = ArbitraryDataStorageManager.getInstance().findAllHostedPaths();
// Loop through each path and find those without matching signatures
for (Path path : allPaths) {
try {
String[] contents = path.toFile().list();
if (contents == null || contents.length == 0) {
// Ignore empty directories
continue;
}
String signature58 = path.getFileName().toString();
byte[] signature = Base58.decode(signature58);
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (transactionData == null) {
// No transaction data, and no DataException, so we can assume that this data relates to an expired transaction
pathList.add(path);
}
} catch (DataException e) {
continue;
}
}
return pathList;
}
private void checkForExpiredTransactions(Repository repository) {
List<Path> expiredPaths = this.findPathsWithNoAssociatedTransaction(repository);
for (Path expiredPath : expiredPaths) {
LOGGER.info("Found path with no associated transaction: {}", expiredPath.toString());
this.safeDeleteDirectory(expiredPath.toFile(), "no matching transaction");
}
}
private void storageLimitReached(Repository repository) throws InterruptedException {
// We think that the storage limit has been reached
// Now calculate the used/total storage again, as a safety precaution
Long now = NTP.getTime();
ArbitraryDataStorageManager.getInstance().calculateDirectorySize(now);
if (ArbitraryDataStorageManager.getInstance().isStorageSpaceAvailable(DELETION_THRESHOLD)) {
// We have space available, so don't delete anything
return;
}
// Delete a batch of random chunks
// This reduces the chance of too many nodes deleting the same chunk
// when they reach their storage limit
Path dataPath = Paths.get(Settings.getInstance().getDataPath());
for (int i=0; i<CHUNK_DELETION_BATCH_SIZE; i++) {
this.deleteRandomFile(repository, dataPath.toFile(), null);
}
// FUTURE: consider reducing the expiry time of the reader cache
}
public void storageLimitReachedForName(Repository repository, String name) throws InterruptedException {
// We think that the storage limit has been reached for supplied name - but we should double check
if (ArbitraryDataStorageManager.getInstance().isStorageSpaceAvailableForName(repository, name, DELETION_THRESHOLD)) {
// We have space available for this name, so don't delete anything
return;
}
// Delete a batch of random chunks associated with this name
// This reduces the chance of too many nodes deleting the same chunk
// when they reach their storage limit
Path dataPath = Paths.get(Settings.getInstance().getDataPath());
for (int i=0; i<CHUNK_DELETION_BATCH_SIZE; i++) {
this.deleteRandomFile(repository, dataPath.toFile(), name);
}
}
/**
* Iteratively walk through given directory and delete a single random file
*
* @param directory - the base directory
* @return boolean - whether a file was deleted
*/
private boolean deleteRandomFile(Repository repository, File directory, String name) {
Path tempDataPath = Paths.get(Settings.getInstance().getTempDataPath());
// Pick a random directory
final File[] contentsList = directory.listFiles();
if (contentsList != null) {
SecureRandom random = new SecureRandom();
// If the directory is empty, there's nothing to do
if (contentsList.length == 0) {
return false;
}
File randomItem = contentsList[random.nextInt(contentsList.length)];
// Skip anything relating to the temp directory
if (FilesystemUtils.isChild(randomItem.toPath(), tempDataPath)) {
return false;
}
// Make sure it exists
if (!randomItem.exists()) {
return false;
}
// If it's a directory, iteratively repeat the process
if (randomItem.isDirectory()) {
return this.deleteRandomFile(repository, randomItem, name);
}
// If it's a file, we might be able to delete it
if (randomItem.isFile()) {
// If the parent directory contains an ".original" file, don't delete anything
// This indicates that the content was originally updated by this node and so
// could be the only copy that exists.
Path originalCopyIndicatorPath = Paths.get(randomItem.getParent(), ".original");
if (Files.exists(originalCopyIndicatorPath)) {
// This is an original seed copy and so shouldn't be deleted
return false;
}
if (name != null) {
// A name has been specified, so we need to make sure this file relates to
// the name we want to delete. The signature should be the name of parent directory.
try {
Path parentFileNamePath = randomItem.toPath().toAbsolutePath().getParent().getFileName();
if (parentFileNamePath != null) {
String signature58 = parentFileNamePath.toString();
byte[] signature = Base58.decode(signature58);
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (transactionData == null || transactionData.getType() != Transaction.TransactionType.ARBITRARY) {
// Not what we were expecting, so don't delete it
return false;
}
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
if (!Objects.equals(arbitraryTransactionData.getName(), name)) {
// Relates to a different name - don't delete it
return false;
}
}
} catch (DataException e) {
// Something went wrong and we weren't able to make a decision - so it's best not to delete this file
return false;
}
}
LOGGER.info("Deleting random file {} because we have reached max storage capacity...", randomItem.toString());
boolean success = randomItem.delete();
if (success) {
try {
FilesystemUtils.safeDeleteEmptyParentDirectories(randomItem.toPath().getParent());
} catch (IOException e) {
// Ignore cleanup failure
}
}
return success;
}
}
return false;
}
private void removePeersHostingTransactionData(Repository repository, ArbitraryTransactionData transactionData) {
byte[] signature = transactionData.getSignature();
try {
repository.getArbitraryRepository().deleteArbitraryPeersWithSignature(signature);
repository.saveChanges();
} catch (DataException e) {
LOGGER.debug("Unable to delete peers from lookup table for signature: {}", Base58.encode(signature));
}
}
private void cleanupTempDirectory(String folder, long now, long minAge) {
String baseDir = Settings.getInstance().getTempDataPath();
Path tempDir = Paths.get(baseDir, folder);
int contentsCount = 0;
// Loop through the contents and check each one
final File[] directories = tempDir.toFile().listFiles();
if (directories != null) {
for (final File directory : directories) {
contentsCount++;
// We're expecting the contents of each subfolder to be a directory
if (directory.isDirectory()) {
if (!ArbitraryTransactionUtils.isFileRecent(directory.toPath(), now, minAge)) {
// File isn't recent, so can be deleted
this.safeDeleteDirectory(directory, "not recent");
}
}
}
}
// If the directory is empty, we still need to delete its parent folder
if (contentsCount == 0 && tempDir.toFile().isDirectory() && tempDir.toFile().exists()) {
try {
LOGGER.debug("Parent directory {} is empty, so deleting it", tempDir);
FilesystemUtils.safeDeleteDirectory(tempDir, false);
} catch(IOException e){
LOGGER.info("Unable to delete parent directory: {}", tempDir);
}
}
}
private void cleanupReaderCache(Long now) {
ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance();
String baseDir = Settings.getInstance().getTempDataPath();
Path readerCachePath = Paths.get(baseDir, "reader");
// Clean up names
Path readerCacheNamesPath = Paths.get(readerCachePath.toString(), "NAME");
// Loop through the contents and check each one
final File[] directories = readerCacheNamesPath.toFile().listFiles();
if (directories != null) {
for (final File directory : directories) {
// Delete data relating to blocked names
String name = directory.getName();
if (name != null && storageManager.isNameBlocked(name)) {
this.safeDeleteDirectory(directory, "blocked name");
}
// Delete cached reader data that has reached its expiry
this.cleanupReaderCacheForName(name, now);
}
}
}
private void cleanupReaderCacheForName(String name, Long now) {
if (name == null) {
return;
}
String baseDir = Settings.getInstance().getTempDataPath();
Path readerNameCachePath = Paths.get(baseDir, "reader", "NAME", name);
// Loop through the contents and check each one
final File[] directories = readerNameCachePath.toFile().listFiles();
if (directories != null) {
for (final File directory : directories) {
// Each directory is a "service" type
String service = directory.getName();
this.cleanupReaderCacheForNameAndService(name, service, now);
}
}
}
private void cleanupReaderCacheForNameAndService(String name, String service, Long now) {
if (name == null || service == null) {
return;
}
Path readerNameServiceCachePath = Paths.get("reader", "NAME", name, service);
Long expiry = Settings.getInstance().getBuiltDataExpiryInterval();
this.cleanupTempDirectory(readerNameServiceCachePath.toString(), now, expiry);
}
private void cleanupTempDirectory(long now) {
// Use the "stale file timeout" for the intermediate directories.
// These aren't used for serving content - only for building it.
// Once the files have become stale, it's safe to delete them.
this.cleanupTempDirectory("diff", now, STALE_FILE_TIMEOUT);
this.cleanupTempDirectory("join", now, STALE_FILE_TIMEOUT);
this.cleanupTempDirectory("merge", now, STALE_FILE_TIMEOUT);
this.cleanupTempDirectory("writer", now, STALE_FILE_TIMEOUT);
// Built resources are served out of the "reader" directory so these
// need to be kept around for much longer.
// Purging currently disabled, as it's not very helpful. Will revisit
// once we implement local storage limits.
this.cleanupReaderCache(now);
}
private boolean safeDeleteDirectory(File directory, String reason) {
LOGGER.info("Deleting directory {} due to reason: {}", directory, reason);
try {
FilesystemUtils.safeDeleteDirectory(directory.toPath(), true);
return true;
} catch (IOException e) {
LOGGER.debug("Unable to delete directory: {}", directory);
}
return false;
}
public void shutdown() {
isStopping = true;
this.interrupt();
}
}

View File

@ -0,0 +1,508 @@
package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.ArbitraryDataFileChunk;
import org.qortal.controller.Controller;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.ArbitraryDataFileListMessage;
import org.qortal.network.message.GetArbitraryDataFileListMessage;
import org.qortal.network.message.Message;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import org.qortal.utils.Triple;
import java.util.*;
public class ArbitraryDataFileListManager {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileListManager.class);
private static ArbitraryDataFileListManager instance;
/**
* Map of recent incoming requests for ARBITRARY transaction data file lists.
* <p>
* Key is original request's message ID<br>
* Value is Triple&lt;transaction signature in base58, first requesting peer, first request's timestamp&gt;
* <p>
* If peer is null then either:<br>
* <ul>
* <li>we are the original requesting peer</li>
* <li>we have already sent data payload to original requesting peer.</li>
* </ul>
* If signature is null then we have already received the file list and either:<br>
* <ul>
* <li>we are the original requesting peer and have processed it</li>
* <li>we have forwarded the file list</li>
* </ul>
*/
public Map<Integer, Triple<String, Peer, Long>> arbitraryDataFileListRequests = Collections.synchronizedMap(new HashMap<>());
/**
* Map to keep track of in progress arbitrary data signature requests
* Key: string - the signature encoded in base58
* Value: Triple<networkBroadcastCount, directPeerRequestCount, lastAttemptTimestamp>
*/
private Map<String, Triple<Integer, Integer, Long>> arbitraryDataSignatureRequests = Collections.synchronizedMap(new HashMap<>());
/** Maximum number of seconds that a file list relay request is able to exist on the network */
private static long RELAY_REQUEST_MAX_DURATION = 5000L;
/** Maximum number of hops that a file list relay request is allowed to make */
private static int RELAY_REQUEST_MAX_HOPS = 3;
private ArbitraryDataFileListManager() {
}
public static ArbitraryDataFileListManager getInstance() {
if (instance == null)
instance = new ArbitraryDataFileListManager();
return instance;
}
public void cleanupRequestCache(Long now) {
if (now == null) {
return;
}
final long requestMinimumTimestamp = now - ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT;
arbitraryDataFileListRequests.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < requestMinimumTimestamp);
}
// Track file list lookups by signature
private boolean shouldMakeFileListRequestForSignature(String signature58) {
Triple<Integer, Integer, Long> request = arbitraryDataSignatureRequests.get(signature58);
if (request == null) {
// Not attempted yet
return true;
}
// Extract the components
Integer networkBroadcastCount = request.getA();
// Integer directPeerRequestCount = request.getB();
Long lastAttemptTimestamp = request.getC();
if (lastAttemptTimestamp == null) {
// Not attempted yet
return true;
}
long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp;
if (timeSinceLastAttempt > 5 * 60 * 1000L) {
// We haven't tried for at least 5 minutes
if (networkBroadcastCount < 5) {
// We've made less than 5 total attempts
return true;
}
}
if (timeSinceLastAttempt > 24 * 60 * 60 * 1000L) {
// We haven't tried for at least 24 hours
return true;
}
return false;
}
private boolean shouldMakeDirectFileRequestsForSignature(String signature58) {
if (!Settings.getInstance().isDirectDataRetrievalEnabled()) {
// Direct connections are disabled in the settings
return false;
}
Triple<Integer, Integer, Long> request = arbitraryDataSignatureRequests.get(signature58);
if (request == null) {
// Not attempted yet
return true;
}
// Extract the components
//Integer networkBroadcastCount = request.getA();
Integer directPeerRequestCount = request.getB();
Long lastAttemptTimestamp = request.getC();
if (lastAttemptTimestamp == null) {
// Not attempted yet
return true;
}
if (directPeerRequestCount == 0) {
// We haven't tried asking peers directly yet, so we should
return true;
}
long timeSinceLastAttempt = NTP.getTime() - lastAttemptTimestamp;
if (timeSinceLastAttempt > 10 * 1000L) {
// We haven't tried for at least 10 seconds
if (directPeerRequestCount < 5) {
// We've made less than 5 total attempts
return true;
}
}
if (timeSinceLastAttempt > 5 * 60 * 1000L) {
// We haven't tried for at least 5 minutes
if (directPeerRequestCount < 10) {
// We've made less than 10 total attempts
return true;
}
}
if (timeSinceLastAttempt > 24 * 60 * 60 * 1000L) {
// We haven't tried for at least 24 hours
return true;
}
return false;
}
public boolean isSignatureRateLimited(byte[] signature) {
String signature58 = Base58.encode(signature);
return !this.shouldMakeFileListRequestForSignature(signature58)
&& !this.shouldMakeDirectFileRequestsForSignature(signature58);
}
public long lastRequestForSignature(byte[] signature) {
String signature58 = Base58.encode(signature);
Triple<Integer, Integer, Long> request = arbitraryDataSignatureRequests.get(signature58);
if (request == null) {
// Not attempted yet
return 0;
}
// Extract the components
Long lastAttemptTimestamp = request.getC();
if (lastAttemptTimestamp != null) {
return lastAttemptTimestamp;
}
return 0;
}
public void addToSignatureRequests(String signature58, boolean incrementNetworkRequests, boolean incrementPeerRequests) {
Triple<Integer, Integer, Long> request = arbitraryDataSignatureRequests.get(signature58);
Long now = NTP.getTime();
if (request == null) {
// No entry yet
Triple<Integer, Integer, Long> newRequest = new Triple<>(0, 0, now);
arbitraryDataSignatureRequests.put(signature58, newRequest);
}
else {
// There is an existing entry
if (incrementNetworkRequests) {
request.setA(request.getA() + 1);
}
if (incrementPeerRequests) {
request.setB(request.getB() + 1);
}
request.setC(now);
arbitraryDataSignatureRequests.put(signature58, request);
}
}
public void removeFromSignatureRequests(String signature58) {
arbitraryDataSignatureRequests.remove(signature58);
}
// Lookup file lists by signature
public boolean fetchArbitraryDataFileList(ArbitraryTransactionData arbitraryTransactionData) {
byte[] signature = arbitraryTransactionData.getSignature();
String signature58 = Base58.encode(signature);
// Require an NTP sync
Long now = NTP.getTime();
if (now == null) {
return false;
}
// If we've already tried too many times in a short space of time, make sure to give up
if (!this.shouldMakeFileListRequestForSignature(signature58)) {
// Check if we should make direct connections to peers
if (this.shouldMakeDirectFileRequestsForSignature(signature58)) {
return ArbitraryDataFileManager.getInstance().fetchDataFilesFromPeersForSignature(signature);
}
LOGGER.debug("Skipping file list request for signature {} due to rate limit", signature58);
return false;
}
this.addToSignatureRequests(signature58, true, false);
List<Peer> handshakedPeers = Network.getInstance().getHandshakedPeers();
LOGGER.debug(String.format("Sending data file list request for signature %s to %d peers...", signature58, handshakedPeers.size()));
// Build request
Message getArbitraryDataFileListMessage = new GetArbitraryDataFileListMessage(signature, now, 0);
// Save our request into requests map
Triple<String, Peer, Long> requestEntry = new Triple<>(signature58, null, NTP.getTime());
// Assign random ID to this message
int id;
do {
id = new Random().nextInt(Integer.MAX_VALUE - 1) + 1;
// Put queue into map (keyed by message ID) so we can poll for a response
// If putIfAbsent() doesn't return null, then this ID is already taken
} while (arbitraryDataFileListRequests.put(id, requestEntry) != null);
getArbitraryDataFileListMessage.setId(id);
// Broadcast request
Network.getInstance().broadcast(peer -> getArbitraryDataFileListMessage);
// Poll to see if data has arrived
final long singleWait = 100;
long totalWait = 0;
while (totalWait < ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT) {
try {
Thread.sleep(singleWait);
} catch (InterruptedException e) {
break;
}
requestEntry = arbitraryDataFileListRequests.get(id);
if (requestEntry == null)
return false;
if (requestEntry.getA() == null)
break;
totalWait += singleWait;
}
return true;
}
// Network handlers
public void onNetworkArbitraryDataFileListMessage(Peer peer, Message message) {
// Don't process if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
return;
}
ArbitraryDataFileListMessage arbitraryDataFileListMessage = (ArbitraryDataFileListMessage) message;
LOGGER.debug("Received hash list from peer {} with {} hashes", peer, arbitraryDataFileListMessage.getHashes().size());
// Do we have a pending request for this data? // TODO: might we want to relay all of them anyway?
Triple<String, Peer, Long> request = arbitraryDataFileListRequests.get(message.getId());
if (request == null || request.getA() == null) {
return;
}
boolean isRelayRequest = (request.getB() != null);
// Does this message's signature match what we're expecting?
byte[] signature = arbitraryDataFileListMessage.getSignature();
String signature58 = Base58.encode(signature);
if (!request.getA().equals(signature58)) {
return;
}
List<byte[]> hashes = arbitraryDataFileListMessage.getHashes();
if (hashes == null || hashes.isEmpty()) {
return;
}
ArbitraryTransactionData arbitraryTransactionData = null;
ArbitraryDataFileManager arbitraryDataFileManager = ArbitraryDataFileManager.getInstance();
// Check transaction exists and hashes are correct
try (final Repository repository = RepositoryManager.getRepository()) {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (!(transactionData instanceof ArbitraryTransactionData))
return;
arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
arbitraryDataFile.setMetadataHash(arbitraryTransactionData.getMetadataHash());
// // Check all hashes exist
// for (byte[] hash : hashes) {
// //LOGGER.debug("Received hash {}", Base58.encode(hash));
// if (!arbitraryDataFile.containsChunk(hash)) {
// // Check the hash against the complete file
// if (!Arrays.equals(arbitraryDataFile.getHash(), hash)) {
// LOGGER.info("Received non-matching chunk hash {} for signature {}. This could happen if we haven't obtained the metadata file yet.", Base58.encode(hash), signature58);
// return;
// }
// }
// }
// Update requests map to reflect that we've received it
Triple<String, Peer, Long> newEntry = new Triple<>(null, null, request.getC());
arbitraryDataFileListRequests.put(message.getId(), newEntry);
if (!isRelayRequest || !Settings.getInstance().isRelayModeEnabled()) {
// Go and fetch the actual data, since this isn't a relay request
arbitraryDataFileManager.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, hashes);
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while finding arbitrary transaction data list for peer %s", peer), e);
}
// Forwarding
if (isRelayRequest && Settings.getInstance().isRelayModeEnabled()) {
boolean isBlocked = (arbitraryTransactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(arbitraryTransactionData.getName()));
if (!isBlocked) {
Peer requestingPeer = request.getB();
if (requestingPeer != null) {
// Add each hash to our local mapping so we know who to ask later
Long now = NTP.getTime();
for (byte[] hash : hashes) {
String hash58 = Base58.encode(hash);
Triple<String, Peer, Long> value = new Triple<>(signature58, peer, now);
arbitraryDataFileManager.arbitraryRelayMap.put(hash58, value);
LOGGER.debug("Added {} to relay map: {}, {}, {}", hash58, signature58, peer, now);
}
// Forward to requesting peer
LOGGER.debug("Forwarding file list with {} hashes to requesting peer: {}", hashes.size(), requestingPeer);
if (!requestingPeer.sendMessage(arbitraryDataFileListMessage)) {
requestingPeer.disconnect("failed to forward arbitrary data file list");
}
}
}
}
}
public void onNetworkGetArbitraryDataFileListMessage(Peer peer, Message message) {
// Don't respond if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
return;
}
Controller.getInstance().stats.getArbitraryDataFileListMessageStats.requests.incrementAndGet();
GetArbitraryDataFileListMessage getArbitraryDataFileListMessage = (GetArbitraryDataFileListMessage) message;
byte[] signature = getArbitraryDataFileListMessage.getSignature();
String signature58 = Base58.encode(signature);
Long now = NTP.getTime();
Triple<String, Peer, Long> newEntry = new Triple<>(signature58, peer, now);
// If we've seen this request recently, then ignore
if (arbitraryDataFileListRequests.putIfAbsent(message.getId(), newEntry) != null) {
LOGGER.debug("Ignoring hash list request from peer {} for signature {}", peer, signature58);
return;
}
LOGGER.debug("Received hash list request from peer {} for signature {}", peer, signature58);
List<byte[]> hashes = new ArrayList<>();
ArbitraryTransactionData transactionData = null;
try (final Repository repository = RepositoryManager.getRepository()) {
// Firstly we need to lookup this file on chain to get a list of its hashes
transactionData = (ArbitraryTransactionData)repository.getTransactionRepository().fromSignature(signature);
if (transactionData instanceof ArbitraryTransactionData) {
// Check if we're even allowed to serve data for this transaction
if (ArbitraryDataStorageManager.getInstance().canStoreData(transactionData)) {
byte[] hash = transactionData.getData();
byte[] metadataHash = transactionData.getMetadataHash();
// Load file(s) and add any that exist to the list of hashes
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
if (metadataHash != null) {
arbitraryDataFile.setMetadataHash(metadataHash);
// If we have the metadata file, add its hash
if (arbitraryDataFile.getMetadataFile().exists()) {
hashes.add(arbitraryDataFile.getMetadataHash());
}
for (ArbitraryDataFileChunk chunk : arbitraryDataFile.getChunks()) {
if (chunk.exists()) {
hashes.add(chunk.getHash());
//LOGGER.trace("Added hash {}", chunk.getHash58());
} else {
LOGGER.debug("Couldn't add hash {} because it doesn't exist", chunk.getHash58());
}
}
} else {
// This transaction has no chunks, so include the complete file if we have it
if (arbitraryDataFile.exists()) {
hashes.add(arbitraryDataFile.getHash());
}
}
}
}
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while fetching arbitrary file list for peer %s", peer), e);
}
// We should only respond if we have at least one hash
if (hashes.size() > 0) {
// Update requests map to reflect that we've sent it
newEntry = new Triple<>(signature58, null, now);
arbitraryDataFileListRequests.put(message.getId(), newEntry);
ArbitraryDataFileListMessage arbitraryDataFileListMessage = new ArbitraryDataFileListMessage(signature, hashes);
arbitraryDataFileListMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataFileListMessage)) {
LOGGER.debug("Couldn't send list of hashes");
peer.disconnect("failed to send list of hashes");
}
LOGGER.debug("Sent list of hashes (count: {})", hashes.size());
}
else {
boolean isBlocked = (transactionData == null || ArbitraryDataStorageManager.getInstance().isNameBlocked(transactionData.getName()));
if (Settings.getInstance().isRelayModeEnabled() && !isBlocked) {
// In relay mode - so ask our other peers if they have it
long requestTime = getArbitraryDataFileListMessage.getRequestTime();
int requestHops = getArbitraryDataFileListMessage.getRequestHops();
getArbitraryDataFileListMessage.setRequestHops(++requestHops);
long totalRequestTime = now - requestTime;
if (totalRequestTime < RELAY_REQUEST_MAX_DURATION) {
// Relay request hasn't timed out yet, so can potentially be rebroadcast
if (requestHops < RELAY_REQUEST_MAX_HOPS) {
// Relay request hasn't reached the maximum number of hops yet, so can be rebroadcast
LOGGER.info("Rebroadcasting hash list request from peer {} for signature {} to our other peers... totalRequestTime: {}, requestHops: {}", peer, Base58.encode(signature), totalRequestTime, requestHops);
Network.getInstance().broadcast(
broadcastPeer -> broadcastPeer == peer ||
Objects.equals(broadcastPeer.getPeerData().getAddress().getHost(), peer.getPeerData().getAddress().getHost())
? null : getArbitraryDataFileListMessage);
}
else {
// This relay request has reached the maximum number of allowed hops
}
}
else {
// This relay request has timed out
}
}
}
}
}

View File

@ -0,0 +1,403 @@
package org.qortal.controller.arbitrary;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.controller.Controller;
import org.qortal.data.network.ArbitraryPeerData;
import org.qortal.data.network.PeerData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.*;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
import org.qortal.utils.Triple;
import java.security.SecureRandom;
import java.util.*;
import java.util.stream.Collectors;
public class ArbitraryDataFileManager {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataFileManager.class);
private static ArbitraryDataFileManager instance;
/**
* Map to keep track of our in progress (outgoing) arbitrary data file requests
*/
private Map<String, Long> arbitraryDataFileRequests = Collections.synchronizedMap(new HashMap<>());
/**
* Map to keep track of hashes that we might need to relay, keyed by the hash of the file (base58 encoded).
* Value is comprised of the base58-encoded signature, the peer that is hosting it, and the timestamp that it was added
*/
public Map<String, Triple<String, Peer, Long>> arbitraryRelayMap = Collections.synchronizedMap(new HashMap<>());
private ArbitraryDataFileManager() {
}
public static ArbitraryDataFileManager getInstance() {
if (instance == null)
instance = new ArbitraryDataFileManager();
return instance;
}
public void cleanupRequestCache(Long now) {
if (now == null) {
return;
}
final long requestMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_REQUEST_TIMEOUT;
arbitraryDataFileRequests.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < requestMinimumTimestamp);
final long relayMinimumTimestamp = now - ArbitraryDataManager.getInstance().ARBITRARY_RELAY_TIMEOUT;
arbitraryRelayMap.entrySet().removeIf(entry -> entry.getValue().getC() == null || entry.getValue().getC() < relayMinimumTimestamp);
}
// Fetch data files by hash
public boolean fetchAllArbitraryDataFiles(Repository repository, Peer peer, byte[] signature) {
try {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (!(transactionData instanceof ArbitraryTransactionData))
return false;
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
// We use null to represent all hashes associated with this transaction
return this.fetchArbitraryDataFiles(repository, peer, signature, arbitraryTransactionData, null);
} catch (DataException e) {}
return false;
}
public boolean fetchArbitraryDataFiles(Repository repository,
Peer peer,
byte[] signature,
ArbitraryTransactionData arbitraryTransactionData,
List<byte[]> hashes) throws DataException {
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(arbitraryTransactionData.getData(), signature);
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
arbitraryDataFile.setMetadataHash(metadataHash);
// If hashes are null, we will treat this to mean all data hashes associated with this file
if (hashes == null) {
if (metadataHash == null) {
// This transaction has no metadata/chunks, so use the main file hash
hashes = Arrays.asList(arbitraryDataFile.getHash());
}
else if (!arbitraryDataFile.getMetadataFile().exists()) {
// We don't have the metadata file yet, so request it
hashes = Arrays.asList(arbitraryDataFile.getMetadataFile().getHash());
}
else {
// Add the chunk hashes
hashes = arbitraryDataFile.getChunkHashes();
}
}
boolean receivedAtLeastOneFile = false;
// Now fetch actual data from this peer
for (byte[] hash : hashes) {
if (!arbitraryDataFile.chunkExists(hash)) {
// Only request the file if we aren't already requesting it from someone else
if (!arbitraryDataFileRequests.containsKey(Base58.encode(hash))) {
ArbitraryDataFileMessage receivedArbitraryDataFileMessage = fetchArbitraryDataFile(peer, null, signature, hash, null);
if (receivedArbitraryDataFileMessage != null) {
LOGGER.debug("Received data file {} from peer {}", receivedArbitraryDataFileMessage.getArbitraryDataFile().getHash58(), peer);
receivedAtLeastOneFile = true;
}
else {
LOGGER.debug("Peer {} didn't respond with data file {} for signature {}", peer, Base58.encode(hash), Base58.encode(signature));
}
}
else {
LOGGER.debug("Already requesting data file {} for signature {}", arbitraryDataFile, Base58.encode(signature));
}
}
}
if (receivedAtLeastOneFile) {
// Update our lookup table to indicate that this peer holds data for this signature
String peerAddress = peer.getPeerData().getAddress().toString();
ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peer);
repository.discardChanges();
if (arbitraryPeerData.isPeerAddressValid()) {
LOGGER.debug("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature));
repository.getArbitraryRepository().save(arbitraryPeerData);
repository.saveChanges();
}
// Invalidate the hosted transactions cache as we are now hosting something new
ArbitraryDataStorageManager.getInstance().invalidateHostedTransactionsCache();
}
// Check if we have all the files we need for this transaction
if (arbitraryDataFile.allFilesExist()) {
// We have all the chunks for this transaction, so we should invalidate the transaction's name's
// data cache so that it is rebuilt the next time we serve it
ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData);
// We may also need to broadcast to the network that we are now hosting files for this transaction,
// but only if these files are in accordance with our storage policy
if (ArbitraryDataStorageManager.getInstance().canStoreData(arbitraryTransactionData)) {
// Use a null peer address to indicate our own
Message newArbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, Arrays.asList(signature));
Network.getInstance().broadcast(broadcastPeer -> newArbitrarySignatureMessage);
}
}
return receivedAtLeastOneFile;
}
private ArbitraryDataFileMessage fetchArbitraryDataFile(Peer peer, Peer requestingPeer, byte[] signature, byte[] hash, Message originalMessage) throws DataException {
ArbitraryDataFile existingFile = ArbitraryDataFile.fromHash(hash, signature);
boolean fileAlreadyExists = existingFile.exists();
Message message = null;
// Fetch the file if it doesn't exist locally
if (!fileAlreadyExists) {
String hash58 = Base58.encode(hash);
LOGGER.debug(String.format("Fetching data file %.8s from peer %s", hash58, peer));
arbitraryDataFileRequests.put(hash58, NTP.getTime());
Message getArbitraryDataFileMessage = new GetArbitraryDataFileMessage(signature, hash);
try {
message = peer.getResponseWithTimeout(getArbitraryDataFileMessage, (int) ArbitraryDataManager.ARBITRARY_REQUEST_TIMEOUT);
} catch (InterruptedException e) {
// Will return below due to null message
}
arbitraryDataFileRequests.remove(hash58);
LOGGER.trace(String.format("Removed hash %.8s from arbitraryDataFileRequests", hash58));
if (message == null || message.getType() != Message.MessageType.ARBITRARY_DATA_FILE) {
return null;
}
}
ArbitraryDataFileMessage arbitraryDataFileMessage = (ArbitraryDataFileMessage) message;
// We might want to forward the request to the peer that originally requested it
this.handleArbitraryDataFileForwarding(requestingPeer, message, originalMessage);
boolean isRelayRequest = (requestingPeer != null);
if (isRelayRequest) {
if (!fileAlreadyExists) {
// File didn't exist locally before the request, and it's a forwarding request, so delete it
LOGGER.debug("Deleting file {} because it was needed for forwarding only", Base58.encode(hash));
ArbitraryDataFile dataFile = arbitraryDataFileMessage.getArbitraryDataFile();
dataFile.delete();
}
}
return arbitraryDataFileMessage;
}
public void handleArbitraryDataFileForwarding(Peer requestingPeer, Message message, Message originalMessage) {
// Return if there is no originally requesting peer to forward to
if (requestingPeer == null) {
return;
}
// Return if we're not in relay mode or if this request doesn't need forwarding
if (!Settings.getInstance().isRelayModeEnabled()) {
return;
}
LOGGER.debug("Received arbitrary data file - forwarding is needed");
// The ID needs to match that of the original request
message.setId(originalMessage.getId());
if (!requestingPeer.sendMessage(message)) {
LOGGER.debug("Failed to forward arbitrary data file to peer {}", requestingPeer);
requestingPeer.disconnect("failed to forward arbitrary data file");
}
else {
LOGGER.debug("Forwarded arbitrary data file to peer {}", requestingPeer);
}
}
// Fetch data directly from peers
public boolean fetchDataFilesFromPeersForSignature(byte[] signature) {
String signature58 = Base58.encode(signature);
ArbitraryDataFileListManager.getInstance().addToSignatureRequests(signature58, false, true);
// Firstly fetch peers that claim to be hosting files for this signature
try (final Repository repository = RepositoryManager.getRepository()) {
List<ArbitraryPeerData> peers = repository.getArbitraryRepository().getArbitraryPeerDataForSignature(signature);
if (peers == null || peers.isEmpty()) {
LOGGER.debug("No peers found for signature {}", signature58);
return false;
}
LOGGER.debug("Attempting a direct peer connection for signature {}...", signature58);
// Peers found, so pick a random one and request data from it
int index = new SecureRandom().nextInt(peers.size());
ArbitraryPeerData arbitraryPeerData = peers.get(index);
String peerAddressString = arbitraryPeerData.getPeerAddress();
boolean success = Network.getInstance().requestDataFromPeer(peerAddressString, signature);
// Parse the peer address to find the host and port
String host = null;
int port = -1;
String[] parts = peerAddressString.split(":");
if (parts.length > 1) {
host = parts[0];
port = Integer.parseInt(parts[1]);
}
// If unsuccessful, and using a non-standard port, try a second connection with the default listen port,
// since almost all nodes use that. This is a workaround to account for any ephemeral ports that may
// have made it into the dataset.
if (!success) {
if (host != null && port > 0) {
int defaultPort = Settings.getInstance().getDefaultListenPort();
if (port != defaultPort) {
String newPeerAddressString = String.format("%s:%d", host, defaultPort);
success = Network.getInstance().requestDataFromPeer(newPeerAddressString, signature);
}
}
}
// If _still_ unsuccessful, try matching the peer's IP address with some known peers, and then connect
// to each of those in turn until one succeeds.
if (!success) {
if (host != null) {
final String finalHost = host;
List<PeerData> knownPeers = Network.getInstance().getAllKnownPeers().stream()
.filter(knownPeerData -> knownPeerData.getAddress().getHost().equals(finalHost))
.collect(Collectors.toList());
// Loop through each match and attempt a connection
for (PeerData matchingPeer : knownPeers) {
String matchingPeerAddress = matchingPeer.getAddress().toString();
success = Network.getInstance().requestDataFromPeer(matchingPeerAddress, signature);
if (success) {
// Successfully connected, so stop making connections
break;
}
}
}
}
// Keep track of the success or failure
arbitraryPeerData.markAsAttempted();
if (success) {
arbitraryPeerData.markAsRetrieved();
arbitraryPeerData.incrementSuccesses();
}
else {
arbitraryPeerData.incrementFailures();
}
repository.discardChanges();
repository.getArbitraryRepository().save(arbitraryPeerData);
repository.saveChanges();
return success;
} catch (DataException e) {
LOGGER.debug("Unable to fetch peer list from repository");
}
return false;
}
// Network handlers
public void onNetworkGetArbitraryDataFileMessage(Peer peer, Message message) {
// Don't respond if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
return;
}
GetArbitraryDataFileMessage getArbitraryDataFileMessage = (GetArbitraryDataFileMessage) message;
byte[] hash = getArbitraryDataFileMessage.getHash();
String hash58 = Base58.encode(hash);
byte[] signature = getArbitraryDataFileMessage.getSignature();
Controller.getInstance().stats.getArbitraryDataFileMessageStats.requests.incrementAndGet();
LOGGER.debug("Received GetArbitraryDataFileMessage from peer {} for hash {}", peer, Base58.encode(hash));
try {
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
Triple<String, Peer, Long> relayInfo = this.arbitraryRelayMap.get(hash58);
if (arbitraryDataFile.exists()) {
LOGGER.trace("Hash {} exists", hash58);
// We can serve the file directly as we already have it
ArbitraryDataFileMessage arbitraryDataFileMessage = new ArbitraryDataFileMessage(signature, arbitraryDataFile);
arbitraryDataFileMessage.setId(message.getId());
if (!peer.sendMessage(arbitraryDataFileMessage)) {
LOGGER.debug("Couldn't sent file");
peer.disconnect("failed to send file");
}
LOGGER.debug("Sent file {}", arbitraryDataFile);
}
else if (relayInfo != null) {
LOGGER.debug("We have relay info for hash {}", Base58.encode(hash));
// We need to ask this peer for the file
Peer peerToAsk = relayInfo.getB();
if (peerToAsk != null) {
// Forward the message to this peer
LOGGER.debug("Asking peer {} for hash {}", peerToAsk, hash58);
this.fetchArbitraryDataFile(peerToAsk, peer, signature, hash, message);
// Remove from the map regardless of outcome, as the relay attempt is now considered complete
arbitraryRelayMap.remove(hash58);
}
else {
LOGGER.debug("Peer {} not found in relay info", peer);
}
}
else {
LOGGER.debug("Hash {} doesn't exist and we don't have relay info", hash58);
// We don't have this file
Controller.getInstance().stats.getArbitraryDataFileMessageStats.unknownFiles.getAndIncrement();
// Send valid, yet unexpected message type in response, so peer's synchronizer doesn't have to wait for timeout
LOGGER.debug(String.format("Sending 'file unknown' response to peer %s for GET_FILE request for unknown file %s", peer, arbitraryDataFile));
// We'll send empty block summaries message as it's very short
// TODO: use a different message type here
Message fileUnknownMessage = new BlockSummariesMessage(Collections.emptyList());
fileUnknownMessage.setId(message.getId());
if (!peer.sendMessage(fileUnknownMessage)) {
LOGGER.debug("Couldn't sent file-unknown response");
peer.disconnect("failed to send file-unknown response");
}
else {
LOGGER.debug("Sent file-unknown response for file {}", arbitraryDataFile);
}
}
}
catch (DataException e) {
LOGGER.debug("Unable to handle request for arbitrary data file: {}", hash58);
}
}
}

View File

@ -0,0 +1,451 @@
package org.qortal.controller.arbitrary;
import java.io.IOException;
import java.util.*;
import java.util.stream.Collectors;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.arbitrary.ArbitraryDataResource;
import org.qortal.arbitrary.misc.Service;
import org.qortal.controller.Controller;
import org.qortal.data.network.ArbitraryPeerData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.list.ResourceListManager;
import org.qortal.network.Network;
import org.qortal.network.Peer;
import org.qortal.network.message.*;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction.TransactionType;
import org.qortal.utils.ArbitraryTransactionUtils;
import org.qortal.utils.Base58;
import org.qortal.utils.NTP;
public class ArbitraryDataManager extends Thread {
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataManager.class);
private static final List<TransactionType> ARBITRARY_TX_TYPE = Arrays.asList(TransactionType.ARBITRARY);
/** Difficulty (leading zero bits) used in arbitrary data transactions
* Set here so that it can be more easily reduced when running unit tests */
private int powDifficulty = 14; // Must not be final, as unit tests need to reduce this value
/** Request timeout when transferring arbitrary data */
public static final long ARBITRARY_REQUEST_TIMEOUT = 10 * 1000L; // ms
/** Maximum time to hold information about an in-progress relay */
public static final long ARBITRARY_RELAY_TIMEOUT = 30 * 1000L; // ms
private static ArbitraryDataManager instance;
private final Object peerDataLock = new Object();
private volatile boolean isStopping = false;
/**
* Map to keep track of cached arbitrary transaction resources.
* When an item is present in this list with a timestamp in the future, we won't invalidate
* its cache when serving that data. This reduces the amount of database lookups that are needed.
*/
private Map<String, Long> arbitraryDataCachedResources = Collections.synchronizedMap(new HashMap<>());
/**
* The amount of time to cache a data resource before it is invalidated
*/
private static long ARBITRARY_DATA_CACHE_TIMEOUT = 60 * 60 * 1000L; // 60 minutes
private ArbitraryDataManager() {
}
public static ArbitraryDataManager getInstance() {
if (instance == null)
instance = new ArbitraryDataManager();
return instance;
}
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Manager");
try {
while (!isStopping) {
Thread.sleep(2000);
// Don't run if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
Thread.sleep(60 * 60 * 1000L);
continue;
}
List<Peer> peers = Network.getInstance().getHandshakedPeers();
// Disregard peers that have "misbehaved" recently
peers.removeIf(Controller.hasMisbehaved);
// Don't fetch data if we don't have enough up-to-date peers
if (peers.size() < Settings.getInstance().getMinBlockchainPeers()) {
continue;
}
// Fetch data according to storage policy
switch (Settings.getInstance().getStoragePolicy()) {
case FOLLOWED:
case FOLLOWED_AND_VIEWED:
this.processNames();
break;
case ALL:
this.processAll();
case NONE:
case VIEWED:
default:
// Nothing to fetch in advance
Thread.sleep(60000);
break;
}
}
} catch (InterruptedException e) {
// Fall-through to exit thread...
}
}
public void shutdown() {
isStopping = true;
this.interrupt();
}
private void processNames() {
// Fetch latest list of followed names
List<String> followedNames = ResourceListManager.getInstance().getStringsInList("followedNames");
if (followedNames == null || followedNames.isEmpty()) {
return;
}
// Loop through the names in the list and fetch transactions for each
for (String name : followedNames) {
this.fetchAndProcessTransactions(name);
}
}
private void processAll() {
this.fetchAndProcessTransactions(null);
}
private void fetchAndProcessTransactions(String name) {
ArbitraryDataStorageManager storageManager = ArbitraryDataStorageManager.getInstance();
// Paginate queries when fetching arbitrary transactions
final int limit = 100;
int offset = 0;
while (!isStopping) {
// Any arbitrary transactions we want to fetch data for?
try (final Repository repository = RepositoryManager.getRepository()) {
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(null, null, null, ARBITRARY_TX_TYPE, null, name, null, ConfirmationStatus.BOTH, limit, offset, true);
// LOGGER.trace("Found {} arbitrary transactions at offset: {}, limit: {}", signatures.size(), offset, limit);
if (signatures == null || signatures.isEmpty()) {
offset = 0;
break;
}
offset += limit;
// Loop through signatures and remove ones we don't need to process
Iterator iterator = signatures.iterator();
while (iterator.hasNext()) {
byte[] signature = (byte[]) iterator.next();
ArbitraryTransaction arbitraryTransaction = fetchTransaction(repository, signature);
if (arbitraryTransaction == null) {
// Best not to process this one
iterator.remove();
continue;
}
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) arbitraryTransaction.getTransactionData();
// Skip transactions that we don't need to proactively store data for
if (!storageManager.shouldPreFetchData(repository, arbitraryTransactionData)) {
iterator.remove();
continue;
}
// Remove transactions that we already have local data for
if (hasLocalData(arbitraryTransaction)) {
iterator.remove();
continue;
}
}
if (signatures.isEmpty()) {
continue;
}
// Pick one at random
final int index = new Random().nextInt(signatures.size());
byte[] signature = signatures.get(index);
if (signature == null) {
continue;
}
// Check to see if we have had a more recent PUT
ArbitraryTransactionData arbitraryTransactionData = ArbitraryTransactionUtils.fetchTransactionData(repository, signature);
boolean hasMoreRecentPutTransaction = ArbitraryTransactionUtils.hasMoreRecentPutTransaction(repository, arbitraryTransactionData);
if (hasMoreRecentPutTransaction) {
// There is a more recent PUT transaction than the one we are currently processing.
// When a PUT is issued, it replaces any layers that would have been there before.
// Therefore any data relating to this older transaction is no longer needed and we
// shouldn't fetch it from the network.
continue;
}
// Ask our connected peers if they have files for this signature
// This process automatically then fetches the files themselves if a peer is found
fetchData(arbitraryTransactionData);
} catch (DataException e) {
LOGGER.error("Repository issue when fetching arbitrary transaction data", e);
}
}
}
private ArbitraryTransaction fetchTransaction(final Repository repository, byte[] signature) {
try {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (!(transactionData instanceof ArbitraryTransactionData))
return null;
return new ArbitraryTransaction(repository, transactionData);
} catch (DataException e) {
return null;
}
}
private boolean hasLocalData(ArbitraryTransaction arbitraryTransaction) {
try {
return arbitraryTransaction.isDataLocal();
} catch (DataException e) {
LOGGER.error("Repository issue when checking arbitrary transaction's data is local", e);
return true;
}
}
// Entrypoint to request new data from peers
public boolean fetchData(ArbitraryTransactionData arbitraryTransactionData) {
return ArbitraryDataFileListManager.getInstance().fetchArbitraryDataFileList(arbitraryTransactionData);
}
// Useful methods used by other parts of the app
public boolean isSignatureRateLimited(byte[] signature) {
return ArbitraryDataFileListManager.getInstance().isSignatureRateLimited(signature);
}
public long lastRequestForSignature(byte[] signature) {
return ArbitraryDataFileListManager.getInstance().lastRequestForSignature(signature);
}
// Arbitrary data resource cache
public void cleanupRequestCache(Long now) {
if (now == null) {
return;
}
// Cleanup file list request caches
ArbitraryDataFileListManager.getInstance().cleanupRequestCache(now);
// Cleanup file request caches
ArbitraryDataFileManager.getInstance().cleanupRequestCache(now);
}
public boolean isResourceCached(ArbitraryDataResource resource) {
if (resource == null) {
return false;
}
String key = resource.getUniqueKey();
// We don't have an entry for this resource ID, it is not cached
if (this.arbitraryDataCachedResources == null) {
return false;
}
if (!this.arbitraryDataCachedResources.containsKey(key)) {
return false;
}
Long timestamp = this.arbitraryDataCachedResources.get(key);
if (timestamp == null) {
return false;
}
// If the timestamp has reached the timeout, we should remove it from the cache
long now = NTP.getTime();
if (now > timestamp) {
this.arbitraryDataCachedResources.remove(key);
return false;
}
// Current time hasn't reached the timeout, so treat it as cached
return true;
}
public void addResourceToCache(ArbitraryDataResource resource) {
if (resource == null) {
return;
}
String key = resource.getUniqueKey();
// Just in case
if (this.arbitraryDataCachedResources == null) {
this.arbitraryDataCachedResources = new HashMap<>();
}
Long now = NTP.getTime();
if (now == null) {
return;
}
// Set the timestamp to now + the timeout
Long timestamp = NTP.getTime() + ARBITRARY_DATA_CACHE_TIMEOUT;
this.arbitraryDataCachedResources.put(key, timestamp);
}
public void invalidateCache(ArbitraryTransactionData arbitraryTransactionData) {
String signature58 = Base58.encode(arbitraryTransactionData.getSignature());
if (arbitraryTransactionData.getName() != null) {
String resourceId = arbitraryTransactionData.getName().toLowerCase();
Service service = arbitraryTransactionData.getService();
String identifier = arbitraryTransactionData.getIdentifier();
ArbitraryDataResource resource =
new ArbitraryDataResource(resourceId, ArbitraryDataFile.ResourceIdType.NAME, service, identifier);
String key = resource.getUniqueKey();
LOGGER.info("Clearing cache for {}...", resource);
if (this.arbitraryDataCachedResources.containsKey(key)) {
this.arbitraryDataCachedResources.remove(key);
}
// Also remove from the failed builds queue in case it previously failed due to missing chunks
ArbitraryDataBuildManager buildManager = ArbitraryDataBuildManager.getInstance();
if (buildManager.arbitraryDataFailedBuilds.containsKey(key)) {
buildManager.arbitraryDataFailedBuilds.remove(key);
}
// Remove from the signature requests list now that we have all files for this signature
ArbitraryDataFileListManager.getInstance().removeFromSignatureRequests(signature58);
// Delete cached files themselves
try {
resource.deleteCache();
} catch (IOException e) {
LOGGER.info("Unable to delete cache for resource {}: {}", resource, e.getMessage());
}
}
}
// Broadcast list of hosted signatures
public void broadcastHostedSignatureList() {
try (final Repository repository = RepositoryManager.getRepository()) {
List<ArbitraryTransactionData> hostedTransactions = ArbitraryDataStorageManager.getInstance().listAllHostedTransactions(repository);
List<byte[]> hostedSignatures = hostedTransactions.stream().map(ArbitraryTransactionData::getSignature).collect(Collectors.toList());
// Broadcast the list, using null to represent our peer address
LOGGER.info("Broadcasting list of hosted signatures...");
Message arbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, hostedSignatures);
Network.getInstance().broadcast(broadcastPeer -> arbitrarySignatureMessage);
} catch (DataException e) {
LOGGER.error("Repository issue when fetching arbitrary transaction data for broadcast", e);
}
}
// Handle incoming arbitrary signatures messages
public void onNetworkArbitrarySignaturesMessage(Peer peer, Message message) {
// Don't process if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
return;
}
LOGGER.debug("Received arbitrary signature list from peer {}", peer);
ArbitrarySignaturesMessage arbitrarySignaturesMessage = (ArbitrarySignaturesMessage) message;
List<byte[]> signatures = arbitrarySignaturesMessage.getSignatures();
String peerAddress = peer.getPeerData().getAddress().toString();
if (arbitrarySignaturesMessage.getPeerAddress() != null) {
// This message is about a different peer than the one that sent it
peerAddress = arbitrarySignaturesMessage.getPeerAddress();
}
boolean containsNewEntry = false;
// Synchronize peer data lookups to make this process thread safe. Otherwise we could broadcast
// the same data multiple times, due to more than one thread processing the same message from different peers
synchronized (this.peerDataLock) {
try (final Repository repository = RepositoryManager.getRepository()) {
for (byte[] signature : signatures) {
// Check if a record already exists for this hash/host combination
// The port is not checked here - only the host/ip - in order to avoid duplicates
// from filling up the db due to dynamic/ephemeral ports
ArbitraryPeerData existingEntry = repository.getArbitraryRepository()
.getArbitraryPeerDataForSignatureAndHost(signature, peer.getPeerData().getAddress().getHost());
if (existingEntry == null) {
// We haven't got a record of this mapping yet, so add it
ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(signature, peerAddress);
repository.discardChanges();
if (arbitraryPeerData.isPeerAddressValid()) {
LOGGER.debug("Adding arbitrary peer: {} for signature {}", peerAddress, Base58.encode(signature));
repository.getArbitraryRepository().save(arbitraryPeerData);
repository.saveChanges();
// Remember that this data is new, so that it can be rebroadcast later
containsNewEntry = true;
}
}
}
// If at least one signature in this batch was new to us, we should rebroadcast the message to the
// network in case some peers haven't received it yet
if (containsNewEntry) {
LOGGER.debug("Rebroadcasting arbitrary signature list for peer {}", peerAddress);
Network.getInstance().broadcast(broadcastPeer -> broadcastPeer == peer ? null : arbitrarySignaturesMessage);
} else {
// Don't rebroadcast as otherwise we could get into a loop
}
// If anything needed saving, it would already have called saveChanges() above
repository.discardChanges();
} catch (DataException e) {
LOGGER.error(String.format("Repository issue while processing arbitrary transaction signature list from peer %s", peer), e);
}
}
}
public int getPowDifficulty() {
return this.powDifficulty;
}
}

View File

@ -0,0 +1,86 @@
package org.qortal.controller.arbitrary;
import org.qortal.arbitrary.ArbitraryDataResource;
import org.qortal.utils.NTP;
import java.util.*;
public class ArbitraryDataRenderManager extends Thread {
private static ArbitraryDataRenderManager instance;
private volatile boolean isStopping = false;
/**
* Map to keep track of authorized resources for rendering.
* Keyed by resource ID, with the authorization time as the value.
*/
private Map<String, Long> authorizedResources = Collections.synchronizedMap(new HashMap<>());
private static long AUTHORIZATION_TIMEOUT = 60 * 60 * 1000L; // 1 hour
public ArbitraryDataRenderManager() {
}
public static ArbitraryDataRenderManager getInstance() {
if (instance == null)
instance = new ArbitraryDataRenderManager();
return instance;
}
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Manager");
try {
while (!isStopping) {
Thread.sleep(60000);
Long now = NTP.getTime();
this.cleanup(now);
}
} catch (InterruptedException e) {
// Fall-through to exit thread...
}
}
public void shutdown() {
isStopping = true;
this.interrupt();
}
public void cleanup(Long now) {
if (now == null) {
return;
}
final long minimumTimestamp = now - AUTHORIZATION_TIMEOUT;
this.authorizedResources.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue() < minimumTimestamp);
}
public boolean isAuthorized(ArbitraryDataResource resource) {
ArbitraryDataResource broadResource = new ArbitraryDataResource(resource.getResourceId(), null, null, null);
for (String authorizedResourceKey : this.authorizedResources.keySet()) {
if (authorizedResourceKey != null && resource != null) {
// Check for exact match
if (Objects.equals(authorizedResourceKey, resource.getUniqueKey())) {
return true;
}
// Check for a broad authorization (which applies to all services and identifiers under an authorized name)
if (Objects.equals(authorizedResourceKey, broadResource.getUniqueKey())) {
return true;
}
}
}
return false;
}
public void addToAuthorizedResources(ArbitraryDataResource resource) {
if (!this.isAuthorized(resource)) {
this.authorizedResources.put(resource.getUniqueKey(), NTP.getTime());
}
}
}

View File

@ -0,0 +1,495 @@
package org.qortal.controller.arbitrary;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.list.ResourceListManager;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.settings.Settings;
import org.qortal.transaction.Transaction;
import org.qortal.utils.Base58;
import org.qortal.utils.FilesystemUtils;
import org.qortal.utils.NTP;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
public class ArbitraryDataStorageManager extends Thread {
public enum StoragePolicy {
FOLLOWED_AND_VIEWED,
FOLLOWED,
VIEWED,
ALL,
NONE
}
private static final Logger LOGGER = LogManager.getLogger(ArbitraryDataStorageManager.class);
private static ArbitraryDataStorageManager instance;
private volatile boolean isStopping = false;
private Long storageCapacity = null;
private long totalDirectorySize = 0L;
private long lastDirectorySizeCheck = 0;
private List<ArbitraryTransactionData> hostedTransactions;
private static final long DIRECTORY_SIZE_CHECK_INTERVAL = 10 * 60 * 1000L; // 10 minutes
/** Treat storage as full at 90% usage, to reduce risk of going over the limit.
* This is necessary because we don't calculate total storage values before every write.
* It also helps avoid a fetch/delete loop, as we will stop fetching before the hard limit.
* This must be lower than DELETION_THRESHOLD. */
private static final double STORAGE_FULL_THRESHOLD = 0.90f; // 90%
/** Start deleting files once we reach 98% usage.
* This must be higher than STORAGE_FULL_THRESHOLD in order to avoid a fetch/delete loop. */
public static final double DELETION_THRESHOLD = 0.98f; // 98%
public ArbitraryDataStorageManager() {
}
public static ArbitraryDataStorageManager getInstance() {
if (instance == null)
instance = new ArbitraryDataStorageManager();
return instance;
}
@Override
public void run() {
Thread.currentThread().setName("Arbitrary Data Storage Manager");
try {
while (!isStopping) {
Thread.sleep(1000);
// Don't run if QDN is disabled
if (!Settings.getInstance().isQdnEnabled()) {
Thread.sleep(60 * 60 * 1000L);
continue;
}
Long now = NTP.getTime();
if (now == null) {
continue;
}
// Check the total directory size if we haven't in a while
if (this.shouldCalculateDirectorySize(now)) {
this.calculateDirectorySize(now);
}
Thread.sleep(59000);
}
} catch (InterruptedException e) {
// Fall-through to exit thread...
}
}
public void shutdown() {
isStopping = true;
this.interrupt();
instance = null;
}
/**
* Check if data relating to a transaction is allowed to
* exist on this node, therefore making it a mirror for this data.
*
* @param arbitraryTransactionData - the transaction
* @return boolean - whether to prefetch or not
*/
public boolean canStoreData(ArbitraryTransactionData arbitraryTransactionData) {
String name = arbitraryTransactionData.getName();
// We already have RAW_DATA on chain, so we only need to store data associated with hashes
if (arbitraryTransactionData.getDataType() != ArbitraryTransactionData.DataType.DATA_HASH) {
return false;
}
// Don't store data unless it's an allowed type (public/private)
if (!this.isDataTypeAllowed(arbitraryTransactionData)) {
return false;
}
// Don't check for storage limits here, as it can cause the cleanup manager to delete existing data
// Check if our storage policy and and lists allow us to host data for this name
switch (Settings.getInstance().getStoragePolicy()) {
case FOLLOWED_AND_VIEWED:
case ALL:
case VIEWED:
// If the policy includes viewed data, we can host it as long as it's not blocked
return !this.isNameBlocked(name);
case FOLLOWED:
// If the policy is for followed data only, we have to be following it
return this.isFollowingName(name);
// For NONE or all else, we shouldn't host this data
case NONE:
default:
return false;
}
}
/**
* Check if data relating to a transaction should be downloaded
* automatically, making this node a mirror for that data.
*
* @param arbitraryTransactionData - the transaction
* @return boolean - whether to prefetch or not
*/
public boolean shouldPreFetchData(Repository repository, ArbitraryTransactionData arbitraryTransactionData) {
String name = arbitraryTransactionData.getName();
// Only fetch data associated with hashes, as we already have RAW_DATA
if (arbitraryTransactionData.getDataType() != ArbitraryTransactionData.DataType.DATA_HASH) {
return false;
}
// Don't fetch anything more if we're (nearly) out of space
// Make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to
// avoid a fetch/delete loop
if (!this.isStorageSpaceAvailable(STORAGE_FULL_THRESHOLD)) {
return false;
}
// Don't fetch anything if we're (nearly) out of space for this name
// Again, make sure to keep STORAGE_FULL_THRESHOLD considerably less than 1, to
// avoid a fetch/delete loop
if (!this.isStorageSpaceAvailableForName(repository, arbitraryTransactionData.getName(), STORAGE_FULL_THRESHOLD)) {
return false;
}
// Don't store data unless it's an allowed type (public/private)
if (!this.isDataTypeAllowed(arbitraryTransactionData)) {
return false;
}
// Handle transactions without names differently
if (name == null) {
return this.shouldPreFetchDataWithoutName();
}
// Never fetch data from blocked names, even if they are followed
if (this.isNameBlocked(name)) {
return false;
}
switch (Settings.getInstance().getStoragePolicy()) {
case FOLLOWED:
case FOLLOWED_AND_VIEWED:
return this.isFollowingName(name);
case ALL:
return true;
case NONE:
case VIEWED:
default:
return false;
}
}
/**
* Don't call this method directly.
* Use the wrapper method shouldPreFetchData() instead, as it contains
* additional checks.
*
* @return boolean - whether the storage policy allows for unnamed data
*/
private boolean shouldPreFetchDataWithoutName() {
switch (Settings.getInstance().getStoragePolicy()) {
case ALL:
return true;
case NONE:
case VIEWED:
case FOLLOWED:
case FOLLOWED_AND_VIEWED:
default:
return false;
}
}
private boolean isDataTypeAllowed(ArbitraryTransactionData arbitraryTransactionData) {
byte[] secret = arbitraryTransactionData.getSecret();
boolean hasSecret = (secret != null && secret.length == 32);
if (!Settings.getInstance().isPrivateDataEnabled() && !hasSecret) {
// Private data isn't enabled so we can't store data without a valid secret
return false;
}
if (!Settings.getInstance().isPublicDataEnabled() && hasSecret) {
// Public data isn't enabled so we can't store data with a secret
return false;
}
return true;
}
public boolean isNameBlocked(String name) {
return ResourceListManager.getInstance().listContains("blockedNames", name, false);
}
private boolean isFollowingName(String name) {
return ResourceListManager.getInstance().listContains("followedNames", name, false);
}
public List<String> followedNames() {
return ResourceListManager.getInstance().getStringsInList("followedNames");
}
private int followedNamesCount() {
return ResourceListManager.getInstance().getItemCountForList("followedNames");
}
// Hosted data
public List<ArbitraryTransactionData> listAllHostedTransactions(Repository repository) {
// Load from cache if we can, to avoid disk reads
if (this.hostedTransactions != null) {
return this.hostedTransactions;
}
List<ArbitraryTransactionData> arbitraryTransactionDataList = new ArrayList<>();
// Find all hosted paths
List<Path> allPaths = this.findAllHostedPaths();
// Loop through each path and attempt to match it to a signature
for (Path path : allPaths) {
try {
String[] contents = path.toFile().list();
if (contents == null || contents.length == 0) {
// Ignore empty directories
continue;
}
String signature58 = path.getFileName().toString();
byte[] signature = Base58.decode(signature58);
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);
if (transactionData == null || transactionData.getType() != Transaction.TransactionType.ARBITRARY) {
continue;
}
arbitraryTransactionDataList.add((ArbitraryTransactionData) transactionData);
} catch (DataException e) {
continue;
}
}
// Update cache
this.hostedTransactions = arbitraryTransactionDataList;
return arbitraryTransactionDataList;
}
/**
* Warning: this method will walk through the entire data directory
* Do not call it too frequently as it could create high disk load
* in environments with a large amount of hosted data.
* @return a list of paths that are being hosted
*/
public List<Path> findAllHostedPaths() {
Path dataPath = Paths.get(Settings.getInstance().getDataPath());
Path tempPath = Paths.get(Settings.getInstance().getTempDataPath());
// Walk through 3 levels of the file tree and find directories that are greater than 32 characters in length
// Also exclude the _temp and _misc paths if present
List<Path> allPaths = new ArrayList<>();
try {
allPaths = Files.walk(dataPath, 3)
.filter(Files::isDirectory)
.filter(path -> !path.toAbsolutePath().toString().contains(tempPath.toAbsolutePath().toString())
&& !path.toString().contains("_misc")
&& path.getFileName().toString().length() > 32)
.collect(Collectors.toList());
}
catch (IOException e) {
LOGGER.info("Unable to walk through hosted data: {}", e.getMessage());
}
return allPaths;
}
public void invalidateHostedTransactionsCache() {
this.hostedTransactions = null;
}
// Size limits
/**
* Rate limit to reduce IO load
*/
public boolean shouldCalculateDirectorySize(Long now) {
if (now == null) {
return false;
}
// If storage capacity is null, we need to calculate it
if (this.storageCapacity == null) {
return true;
}
// If we haven't checked for a while, we need to check it now
if (now - lastDirectorySizeCheck > DIRECTORY_SIZE_CHECK_INTERVAL) {
return true;
}
// We shouldn't check this time, as we want to reduce IO load on the SSD/HDD
return false;
}
public void calculateDirectorySize(Long now) {
if (now == null) {
return;
}
long totalSize = 0;
long remainingCapacity = 0;
// Calculate remaining capacity
try {
remainingCapacity = this.getRemainingUsableStorageCapacity();
} catch (IOException e) {
LOGGER.info("Unable to calculate remaining storage capacity: {}", e.getMessage());
return;
}
// Calculate total size of data directory
LOGGER.trace("Calculating data directory size...");
Path dataDirectoryPath = Paths.get(Settings.getInstance().getDataPath());
if (dataDirectoryPath.toFile().exists()) {
totalSize += FileUtils.sizeOfDirectory(dataDirectoryPath.toFile());
}
// Add total size of temp directory, if it's not already inside the data directory
Path tempDirectoryPath = Paths.get(Settings.getInstance().getTempDataPath());
if (tempDirectoryPath.toFile().exists()) {
if (!FilesystemUtils.isChild(tempDirectoryPath, dataDirectoryPath)) {
LOGGER.trace("Calculating temp directory size...");
totalSize += FileUtils.sizeOfDirectory(dataDirectoryPath.toFile());
}
}
this.totalDirectorySize = totalSize;
this.lastDirectorySizeCheck = now;
// It's essential that used space (this.totalDirectorySize) is included in the storage capacity
LOGGER.trace("Calculating total storage capacity...");
long storageCapacity = remainingCapacity + this.totalDirectorySize;
// Make sure to limit the storage capacity if the user is overriding it in the settings
if (Settings.getInstance().getMaxStorageCapacity() != null) {
storageCapacity = Math.min(storageCapacity, Settings.getInstance().getMaxStorageCapacity());
}
this.storageCapacity = storageCapacity;
LOGGER.info("Total used: {} bytes, Total capacity: {} bytes", this.totalDirectorySize, this.storageCapacity);
}
private long getRemainingUsableStorageCapacity() throws IOException {
// Create data directory if it doesn't exist so that we can perform calculations on it
Path dataDirectoryPath = Paths.get(Settings.getInstance().getDataPath());
if (!dataDirectoryPath.toFile().exists()) {
Files.createDirectories(dataDirectoryPath);
}
return dataDirectoryPath.toFile().getUsableSpace();
}
public long getTotalDirectorySize() {
return this.totalDirectorySize;
}
public boolean isStorageSpaceAvailable(double threshold) {
if (!this.isStorageCapacityCalculated()) {
return false;
}
long maxStorageCapacity = (long)((double)this.storageCapacity * threshold);
if (this.totalDirectorySize >= maxStorageCapacity) {
return false;
}
return true;
}
public boolean isStorageSpaceAvailableForName(Repository repository, String name, double threshold) {
if (!this.isStorageSpaceAvailable(threshold)) {
// No storage space available at all, so no need to check this name
return false;
}
if (name == null) {
// This transaction doesn't have a name, so fall back to total space limitations
return true;
}
int followedNamesCount = this.followedNamesCount();
if (followedNamesCount == 0) {
// Not following any names, so we have space
return true;
}
long totalSizeForName = 0;
long maxStoragePerName = this.storageCapacityPerName(threshold);
// Fetch all hosted transactions
List<ArbitraryTransactionData> hostedTransactions = this.listAllHostedTransactions(repository);
for (ArbitraryTransactionData transactionData : hostedTransactions) {
String transactionName = transactionData.getName();
if (!Objects.equals(name, transactionName)) {
// Transaction relates to a different name
continue;
}
totalSizeForName += transactionData.getSize();
}
// Have we reached the limit for this name?
if (totalSizeForName > maxStoragePerName) {
return false;
}
return true;
}
public long storageCapacityPerName(double threshold) {
int followedNamesCount = this.followedNamesCount();
if (followedNamesCount == 0) {
// Not following any names, so we have the total space available
return this.getStorageCapacityIncludingThreshold(threshold);
}
double maxStorageCapacity = (double)this.storageCapacity * threshold;
long maxStoragePerName = (long)(maxStorageCapacity / (double)followedNamesCount);
return maxStoragePerName;
}
public boolean isStorageCapacityCalculated() {
return (this.storageCapacity != null);
}
public Long getStorageCapacity() {
return this.storageCapacity;
}
public Long getStorageCapacityIncludingThreshold(double threshold) {
if (this.storageCapacity == null) {
return null;
}
return (long)(this.storageCapacity * threshold);
}
}

View File

@ -310,7 +310,7 @@ public class NamesDatabaseIntegrityCheck {
// Fetch all the confirmed REGISTER_NAME transaction signatures
List<byte[]> signatures = repository.getTransactionRepository().getSignaturesMatchingCriteria(
null, null, null, ALL_NAME_TX_TYPE, null, null,
ConfirmationStatus.CONFIRMED, null, null, false);
null, ConfirmationStatus.CONFIRMED, null, null, false);
for (byte[] signature : signatures) {
TransactionData transactionData = repository.getTransactionRepository().fromSignature(signature);

View File

@ -0,0 +1,205 @@
/*
* MIT License
*
* Copyright (c) 2017 Eugen Paraschiv
* Modified in 2021 by CalDescent
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package org.qortal.crypto;
import javax.crypto.Cipher;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.SecretKey;
import javax.crypto.BadPaddingException;
import javax.crypto.KeyGenerator;
import javax.crypto.SecretKeyFactory;
import javax.crypto.SealedObject;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.Serializable;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.KeySpec;
import java.util.Base64;
public class AES {
public static String encrypt(String algorithm, String input, SecretKey key, IvParameterSpec iv)
throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException,
InvalidKeyException, BadPaddingException, IllegalBlockSizeException {
Cipher cipher = Cipher.getInstance(algorithm);
cipher.init(Cipher.ENCRYPT_MODE, key, iv);
byte[] cipherText = cipher.doFinal(input.getBytes());
return Base64.getEncoder()
.encodeToString(cipherText);
}
public static String decrypt(String algorithm, String cipherText, SecretKey key, IvParameterSpec iv)
throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException,
InvalidKeyException, BadPaddingException, IllegalBlockSizeException {
Cipher cipher = Cipher.getInstance(algorithm);
cipher.init(Cipher.DECRYPT_MODE, key, iv);
byte[] plainText = cipher.doFinal(Base64.getDecoder()
.decode(cipherText));
return new String(plainText);
}
public static SecretKey generateKey(int n) throws NoSuchAlgorithmException {
KeyGenerator keyGenerator = KeyGenerator.getInstance("AES");
keyGenerator.init(n);
SecretKey key = keyGenerator.generateKey();
return key;
}
public static SecretKey getKeyFromPassword(String password, String salt)
throws NoSuchAlgorithmException, InvalidKeySpecException {
SecretKeyFactory factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA256");
KeySpec spec = new PBEKeySpec(password.toCharArray(), salt.getBytes(), 65536, 256);
SecretKey secret = new SecretKeySpec(factory.generateSecret(spec)
.getEncoded(), "AES");
return secret;
}
public static IvParameterSpec generateIv() {
byte[] iv = new byte[16];
new SecureRandom().nextBytes(iv);
return new IvParameterSpec(iv);
}
public static void encryptFile(String algorithm, SecretKey key,
String inputFilePath, String outputFilePath) throws IOException,
NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, InvalidKeyException,
BadPaddingException, IllegalBlockSizeException {
File inputFile = new File(inputFilePath);
File outputFile = new File(outputFilePath);
IvParameterSpec iv = AES.generateIv();
Cipher cipher = Cipher.getInstance(algorithm);
cipher.init(Cipher.ENCRYPT_MODE, key, iv);
FileInputStream inputStream = new FileInputStream(inputFile);
FileOutputStream outputStream = new FileOutputStream(outputFile);
// Prepend the output stream with the 16 byte initialization vector
outputStream.write(iv.getIV());
byte[] buffer = new byte[1024];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
byte[] output = cipher.update(buffer, 0, bytesRead);
if (output != null) {
outputStream.write(output);
}
}
byte[] outputBytes = cipher.doFinal();
if (outputBytes != null) {
outputStream.write(outputBytes);
}
inputStream.close();
outputStream.close();
}
public static void decryptFile(String algorithm, SecretKey key, String encryptedFilePath,
String decryptedFilePath) throws IOException, NoSuchPaddingException,
NoSuchAlgorithmException, InvalidAlgorithmParameterException, InvalidKeyException,
BadPaddingException, IllegalBlockSizeException {
File encryptedFile = new File(encryptedFilePath);
File decryptedFile = new File(decryptedFilePath);
File parent = decryptedFile.getParentFile();
if (!parent.isDirectory() && !parent.mkdirs()) {
throw new IOException("Failed to create directory " + parent);
}
FileInputStream inputStream = new FileInputStream(encryptedFile);
FileOutputStream outputStream = new FileOutputStream(decryptedFile);
// Read the initialization vector from the first 16 bytes of the file
byte[] iv = new byte[16];
inputStream.read(iv);
Cipher cipher = Cipher.getInstance(algorithm);
cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(iv));
byte[] buffer = new byte[64];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
byte[] output = cipher.update(buffer, 0, bytesRead);
if (output != null) {
outputStream.write(output);
}
}
byte[] output = cipher.doFinal();
if (output != null) {
outputStream.write(output);
}
inputStream.close();
outputStream.close();
}
public static SealedObject encryptObject(String algorithm, Serializable object, SecretKey key,
IvParameterSpec iv) throws NoSuchPaddingException, NoSuchAlgorithmException,
InvalidAlgorithmParameterException, InvalidKeyException, IOException, IllegalBlockSizeException {
Cipher cipher = Cipher.getInstance(algorithm);
cipher.init(Cipher.ENCRYPT_MODE, key, iv);
SealedObject sealedObject = new SealedObject(object, cipher);
return sealedObject;
}
public static Serializable decryptObject(String algorithm, SealedObject sealedObject, SecretKey key,
IvParameterSpec iv) throws NoSuchPaddingException, NoSuchAlgorithmException,
InvalidAlgorithmParameterException, InvalidKeyException, ClassNotFoundException,
BadPaddingException, IllegalBlockSizeException, IOException {
Cipher cipher = Cipher.getInstance(algorithm);
cipher.init(Cipher.DECRYPT_MODE, key, iv);
Serializable unsealObject = (Serializable) sealedObject.getObject(cipher);
return unsealObject;
}
public static String encryptPasswordBased(String plainText, SecretKey key, IvParameterSpec iv)
throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException,
InvalidKeyException, BadPaddingException, IllegalBlockSizeException {
Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding");
cipher.init(Cipher.ENCRYPT_MODE, key, iv);
return Base64.getEncoder()
.encodeToString(cipher.doFinal(plainText.getBytes()));
}
public static String decryptPasswordBased(String cipherText, SecretKey key, IvParameterSpec iv)
throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException,
InvalidKeyException, BadPaddingException, IllegalBlockSizeException {
Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5PADDING");
cipher.init(Cipher.DECRYPT_MODE, key, iv);
return new String(cipher.doFinal(Base64.getDecoder()
.decode(cipherText)));
}
}

View File

@ -0,0 +1,19 @@
package org.qortal.data.arbitrary;
import org.qortal.arbitrary.misc.Service;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class ArbitraryResourceInfo {
public String name;
public Service service;
public String identifier;
public ArbitraryResourceStatus status;
public ArbitraryResourceInfo() {
}
}

View File

@ -0,0 +1,17 @@
package org.qortal.data.arbitrary;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import java.util.ArrayList;
import java.util.List;
@XmlAccessorType(XmlAccessType.FIELD)
public class ArbitraryResourceNameInfo {
public String name;
public List<ArbitraryResourceInfo> resources = new ArrayList<>();
public ArbitraryResourceNameInfo() {
}
}

View File

@ -0,0 +1,42 @@
package org.qortal.data.arbitrary;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@XmlAccessorType(XmlAccessType.FIELD)
public class ArbitraryResourceStatus {
public enum Status {
NOT_STARTED("Not started", "Downloading not yet started"),
DOWNLOADING("Downloading", "Locating and downloading files..."),
DOWNLOADED("Downloaded", "Files downloaded"),
BUILDING("Building", "Building..."),
READY("Ready", "Ready"),
MISSING_DATA("Missing data", "Unable to locate all files. Please try again later"),
BUILD_FAILED("Build failed", "Build failed. Please try again later"),
UNSUPPORTED("Unsupported", "Unsupported request"),
BLOCKED("Blocked", "Name is blocked so content cannot be served");
private String title;
private String description;
Status(String title, String description) {
this.title = title;
this.description = description;
}
}
private String id;
private String title;
private String description;
public ArbitraryResourceStatus() {
}
public ArbitraryResourceStatus(Status status) {
this.id = status.toString();
this.title = status.title;
this.description = status.description;
}
}

View File

@ -9,7 +9,10 @@ import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import org.qortal.block.BlockChain;
import org.qortal.settings.Settings;
import org.qortal.crypto.Crypto;
import org.qortal.utils.NTP;
// All properties to be converted to JSON via JAX-RS
@XmlAccessorType(XmlAccessType.FIELD)
@ -208,6 +211,13 @@ public class BlockData implements Serializable {
this.onlineAccountsSignatures = onlineAccountsSignatures;
}
public boolean isTrimmed() {
long onlineAccountSignaturesTrimmedTimestamp = NTP.getTime() - BlockChain.getInstance().getOnlineAccountSignaturesMaxLifetime();
long currentTrimmableTimestamp = NTP.getTime() - Settings.getInstance().getAtStatesMaxLifetime();
long blockTimestamp = this.getTimestamp();
return blockTimestamp < onlineAccountSignaturesTrimmedTimestamp && blockTimestamp < currentTrimmableTimestamp;
}
// JAXB special
@XmlElement(name = "minterAddress")

View File

@ -0,0 +1,112 @@
package org.qortal.data.network;
import com.google.common.net.InetAddresses;
import org.qortal.crypto.Crypto;
import org.qortal.network.Peer;
import org.qortal.utils.NTP;
import java.net.InetAddress;
import java.net.UnknownHostException;
public class ArbitraryPeerData {
private final byte[] hash;
private final String peerAddress;
private Integer successes;
private Integer failures;
private Long lastAttempted;
private Long lastRetrieved;
public ArbitraryPeerData(byte[] hash, String peerAddress, Integer successes,
Integer failures, Long lastAttempted, Long lastRetrieved) {
this.hash = hash;
this.peerAddress = peerAddress;
this.successes = successes;
this.failures = failures;
this.lastAttempted = lastAttempted;
this.lastRetrieved = lastRetrieved;
}
public ArbitraryPeerData(byte[] signature, Peer peer) {
this(Crypto.digest(signature), peer.getPeerData().getAddress().toString(),
0, 0, 0L, 0L);
}
public ArbitraryPeerData(byte[] signature, String peerAddress) {
this(Crypto.digest(signature), peerAddress, 0, 0, 0L, 0L);
}
public boolean isPeerAddressValid() {
// Validate the peer address to prevent arbitrary values being added to the db
String[] parts = this.peerAddress.split(":");
if (parts.length != 2) {
// Invalid format
return false;
}
String host = parts[0];
if (!InetAddresses.isInetAddress(host)) {
// Invalid host
return false;
}
int port = Integer.valueOf(parts[1]);
if (port <= 0 || port > 65535) {
// Invalid port
return false;
}
// Make sure that it's not a local address
try {
InetAddress addr = InetAddress.getByName(host);
if (addr.isLoopbackAddress() || addr.isLinkLocalAddress() || addr.isSiteLocalAddress()) {
// Ignore local addresses
return false;
}
} catch (UnknownHostException e) {
return false;
}
// Valid host/port combination
return true;
}
public void incrementSuccesses() {
this.successes++;
}
public void incrementFailures() {
this.failures++;
}
public void markAsAttempted() {
this.lastAttempted = NTP.getTime();
}
public void markAsRetrieved() {
this.lastRetrieved = NTP.getTime();
}
public byte[] getHash() {
return this.hash;
}
public String getPeerAddress() {
return this.peerAddress;
}
public Integer getSuccesses() {
return this.successes;
}
public Integer getFailures() {
return this.failures;
}
public Long getLastAttempted() {
return this.lastAttempted;
}
public Long getLastRetrieved() {
return this.lastRetrieved;
}
}

View File

@ -13,6 +13,8 @@ import io.swagger.v3.oas.annotations.media.Schema;
@XmlAccessorType(XmlAccessType.FIELD)
public class PeerData {
public static final int MAX_PEER_ADDRESS_SIZE = 255;
// Properties
// Don't expose this via JAXB - use pretty getter instead

View File

@ -1,17 +1,22 @@
package org.qortal.data.transaction;
import java.util.List;
import java.util.Map;
import javax.xml.bind.Unmarshaller;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import org.eclipse.persistence.oxm.annotations.XmlDiscriminatorValue;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.PaymentData;
import org.qortal.transaction.Transaction.TransactionType;
import io.swagger.v3.oas.annotations.media.Schema;
import static java.util.Arrays.stream;
import static java.util.stream.Collectors.toMap;
// All properties to be converted to JSON via JAXB
@XmlAccessorType(XmlAccessType.FIELD)
@Schema(allOf = { TransactionData.class })
@ -25,17 +30,65 @@ public class ArbitraryTransactionData extends TransactionData {
DATA_HASH;
}
// Methods
public enum Method {
PUT(0), // A complete replacement of a resource
PATCH(1); // An update / partial replacement of a resource
public final int value;
private static final Map<Integer, Method> map = stream(Method.values())
.collect(toMap(method -> method.value, method -> method));
Method(int value) {
this.value = value;
}
public static Method valueOf(int value) {
return map.get(value);
}
}
// Compression types
public enum Compression {
NONE(0),
ZIP(1);
public final int value;
private static final Map<Integer, Compression> map = stream(Compression.values())
.collect(toMap(compression -> compression.value, compression -> compression));
Compression(int value) {
this.value = value;
}
public static Compression valueOf(int value) {
return map.get(value);
}
}
// Properties
private int version;
@Schema(example = "sender_public_key")
private byte[] senderPublicKey;
private int service;
private Service service;
private int nonce;
private int size;
private String name;
private String identifier;
private Method method;
private byte[] secret;
private Compression compression;
@Schema(example = "raw_data_in_base58")
private byte[] data;
private DataType dataType;
@Schema(example = "metadata_file_hash_in_base58")
private byte[] metadataHash;
private List<PaymentData> payments;
// Constructors
@ -50,14 +103,24 @@ public class ArbitraryTransactionData extends TransactionData {
}
public ArbitraryTransactionData(BaseTransactionData baseTransactionData,
int version, int service, byte[] data, DataType dataType, List<PaymentData> payments) {
int version, Service service, int nonce, int size,
String name, String identifier, Method method, byte[] secret, Compression compression,
byte[] data, DataType dataType, byte[] metadataHash, List<PaymentData> payments) {
super(TransactionType.ARBITRARY, baseTransactionData);
this.senderPublicKey = baseTransactionData.creatorPublicKey;
this.version = version;
this.service = service;
this.nonce = nonce;
this.size = size;
this.name = name;
this.identifier = identifier;
this.method = method;
this.secret = secret;
this.compression = compression;
this.data = data;
this.dataType = dataType;
this.metadataHash = metadataHash;
this.payments = payments;
}
@ -71,10 +134,42 @@ public class ArbitraryTransactionData extends TransactionData {
return this.version;
}
public int getService() {
public Service getService() {
return this.service;
}
public int getNonce() {
return this.nonce;
}
public void setNonce(int nonce) {
this.nonce = nonce;
}
public int getSize() {
return this.size;
}
public String getName() {
return this.name;
}
public String getIdentifier() {
return (this.identifier != "") ? this.identifier : null;
}
public Method getMethod() {
return this.method;
}
public byte[] getSecret() {
return this.secret;
}
public Compression getCompression() {
return this.compression;
}
public byte[] getData() {
return this.data;
}
@ -91,6 +186,14 @@ public class ArbitraryTransactionData extends TransactionData {
this.dataType = dataType;
}
public byte[] getMetadataHash() {
return this.metadataHash;
}
public void setMetadataHash(byte[] metadataHash) {
this.metadataHash = metadataHash;
}
public List<PaymentData> getPayments() {
return this.payments;
}

View File

@ -290,8 +290,8 @@ public class SysTray {
}
public void setTrayIcon(int iconid) {
if (trayIcon != null) {
try {
try {
if (trayIcon != null) {
switch (iconid) {
case 1:
this.trayIcon.setImage(Gui.loadImage("icons/qortal_ui_tray_syncing_time-alt.png"));
@ -306,9 +306,9 @@ public class SysTray {
this.trayIcon.setImage(Gui.loadImage("icons/qortal_ui_tray_synced.png"));
break;
}
} catch (NullPointerException e) {
LOGGER.info("Unable to set tray icon");
}
} catch (Exception e) {
LOGGER.info("Unable to set tray icon: {}", e.getMessage());
}
}

View File

@ -19,8 +19,7 @@ public class ResourceList {
private static final Logger LOGGER = LogManager.getLogger(ResourceList.class);
private String category;
private String resourceName;
private String name;
private List<String> list = new ArrayList<>();
/**
@ -29,13 +28,11 @@ public class ResourceList {
* This can be used for local blocking, or even for curating and sharing content lists
* Lists are backed off to JSON files (in the lists folder) to ease sharing between nodes and users
*
* @param category - for instance "blacklist", "whitelist", or "userlist"
* @param resourceName - for instance "address", "poll", or "group"
* @param name - the name of the list, for instance "blockedAddresses"
* @throws IOException
*/
public ResourceList(String category, String resourceName) throws IOException {
this.category = category;
this.resourceName = resourceName;
public ResourceList(String name) throws IOException {
this.name = name;
this.load();
}
@ -43,17 +40,13 @@ public class ResourceList {
/* Filesystem */
private Path getFilePath() {
String pathString = String.format("%s%s%s_%s.json", Settings.getInstance().getListsPath(),
File.separator, this.category, this.resourceName);
String pathString = String.format("%s.json", Paths.get(Settings.getInstance().getListsPath(), this.name));
return Paths.get(pathString);
}
public void save() throws IOException {
if (this.resourceName == null) {
throw new IllegalStateException("Can't save list with missing resource name");
}
if (this.category == null) {
throw new IllegalStateException("Can't save list with missing category");
if (this.name == null) {
throw new IllegalStateException("Can't save list with missing name");
}
String jsonString = ResourceList.listToJSONString(this.list);
Path filePath = this.getFilePath();
@ -91,7 +84,7 @@ public class ResourceList {
try {
return this.load();
} catch (IOException e) {
LOGGER.info("Unable to revert {} {}", this.resourceName, this.category);
LOGGER.info("Unable to revert list {}: {}", this.name, e.getMessage());
}
return false;
}
@ -103,7 +96,7 @@ public class ResourceList {
if (resource == null || this.list == null) {
return;
}
if (!this.contains(resource)) {
if (!this.contains(resource, true)) {
this.list.add(resource);
}
}
@ -115,11 +108,17 @@ public class ResourceList {
this.list.remove(resource);
}
public boolean contains(String resource) {
public boolean contains(String resource, boolean caseSensitive) {
if (resource == null || this.list == null) {
return false;
}
return this.list.contains(resource);
if (caseSensitive) {
return this.list.contains(resource);
}
else {
return this.list.stream().anyMatch(resource::equalsIgnoreCase);
}
}
@ -153,16 +152,16 @@ public class ResourceList {
return ResourceList.listToJSONString(this.list);
}
public String getCategory() {
return this.category;
public String getName() {
return this.name;
}
public String getResourceName() {
return this.resourceName;
public List<String> getList() {
return this.list;
}
public String toString() {
return String.format("%s %s", this.category, this.resourceName);
return this.name;
}
}

View File

@ -26,10 +26,9 @@ public class ResourceListManager {
return instance;
}
private ResourceList getList(String category, String resourceName) {
private ResourceList getList(String listName) {
for (ResourceList list : this.lists) {
if (Objects.equals(list.getCategory(), category) &&
Objects.equals(list.getResourceName(), resourceName)) {
if (Objects.equals(list.getName(), listName)) {
return list;
}
}
@ -37,19 +36,19 @@ public class ResourceListManager {
// List doesn't exist in array yet, so create it
// This will load any existing data from the filesystem
try {
ResourceList list = new ResourceList(category, resourceName);
ResourceList list = new ResourceList(listName);
this.lists.add(list);
return list;
} catch (IOException e) {
LOGGER.info("Unable to load or create list {} {}: {}", category, resourceName, e.getMessage());
LOGGER.info("Unable to load or create list {}: {}", listName, e.getMessage());
return null;
}
}
public boolean addToList(String category, String resourceName, String item, boolean save) {
ResourceList list = this.getList(category, resourceName);
public boolean addToList(String listName, String item, boolean save) {
ResourceList list = this.getList(listName);
if (list == null) {
return false;
}
@ -67,8 +66,8 @@ public class ResourceListManager {
}
}
public boolean removeFromList(String category, String resourceName, String item, boolean save) {
ResourceList list = this.getList(category, resourceName);
public boolean removeFromList(String listName, String item, boolean save) {
ResourceList list = this.getList(listName);
if (list == null) {
return false;
}
@ -87,16 +86,16 @@ public class ResourceListManager {
}
}
public boolean listContains(String category, String resourceName, String address) {
ResourceList list = this.getList(category, resourceName);
public boolean listContains(String listName, String item, boolean caseSensitive) {
ResourceList list = this.getList(listName);
if (list == null) {
return false;
}
return list.contains(address);
return list.contains(item, caseSensitive);
}
public void saveList(String category, String resourceName) {
ResourceList list = this.getList(category, resourceName);
public void saveList(String listName) {
ResourceList list = this.getList(listName);
if (list == null) {
return;
}
@ -109,20 +108,36 @@ public class ResourceListManager {
}
}
public void revertList(String category, String resourceName) {
ResourceList list = this.getList(category, resourceName);
public void revertList(String listName) {
ResourceList list = this.getList(listName);
if (list == null) {
return;
}
list.revert();
}
public String getJSONStringForList(String category, String resourceName) {
ResourceList list = this.getList(category, resourceName);
public String getJSONStringForList(String listName) {
ResourceList list = this.getList(listName);
if (list == null) {
return null;
}
return list.getJSONString();
}
public List<String> getStringsInList(String listName) {
ResourceList list = this.getList(listName);
if (list == null) {
return null;
}
return list.getList();
}
public int getItemCountForList(String listName) {
ResourceList list = this.getList(listName);
if (list == null) {
return 0;
}
return list.getList().size();
}
}

View File

@ -48,6 +48,9 @@ public enum Handshake {
return null;
}
// Make a note of the senderPeerAddress, as this should be our public IP
Network.getInstance().ourPeerAddressUpdated(helloMessage.getSenderPeerAddress());
String versionString = helloMessage.getVersionString();
Matcher matcher = peer.VERSION_PATTERN.matcher(versionString);
@ -87,8 +90,9 @@ public enum Handshake {
public void action(Peer peer) {
String versionString = Controller.getInstance().getVersionString();
long timestamp = NTP.getTime();
String senderPeerAddress = peer.getPeerData().getAddress().toString();
Message helloMessage = new HelloMessage(timestamp, versionString);
Message helloMessage = new HelloMessage(timestamp, versionString, senderPeerAddress);
if (!peer.sendMessage(helloMessage))
peer.disconnect("failed to send HELLO");
}

View File

@ -6,6 +6,8 @@ import org.bouncycastle.crypto.params.Ed25519PrivateKeyParameters;
import org.bouncycastle.crypto.params.Ed25519PublicKeyParameters;
import org.qortal.block.BlockChain;
import org.qortal.controller.Controller;
import org.qortal.controller.arbitrary.ArbitraryDataFileManager;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.crypto.Crypto;
import org.qortal.data.block.BlockData;
import org.qortal.data.network.PeerData;
@ -15,6 +17,7 @@ import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.repository.RepositoryManager;
import org.qortal.settings.Settings;
import org.qortal.utils.Base58;
import org.qortal.utils.ExecuteProduceConsume;
import org.qortal.utils.ExecuteProduceConsume.StatsSnapshot;
import org.qortal.utils.NTP;
@ -114,6 +117,9 @@ public class Network {
private final Lock mergePeersLock = new ReentrantLock();
private List<String> ourExternalIpAddressHistory = new ArrayList<>();
private String ourExternalIpAddress = null;
// Constructors
private Network() {
@ -234,6 +240,81 @@ public class Network {
}
}
public boolean requestDataFromPeer(String peerAddressString, byte[] signature) {
if (peerAddressString != null) {
PeerAddress peerAddress = PeerAddress.fromString(peerAddressString);
// Reuse an existing PeerData instance if it's already in the known peers list
PeerData peerData = this.allKnownPeers.stream()
.filter(knownPeerData -> knownPeerData.getAddress().equals(peerAddress))
.findFirst()
.orElse(null);
if (peerData == null) {
// Not a known peer, so we need to create one
Long addedWhen = NTP.getTime();
String addedBy = "requestDataFromPeer";
peerData = new PeerData(peerAddress, addedWhen, addedBy);
}
if (peerData == null) {
LOGGER.info("PeerData is null when trying to request data from peer {}", peerAddressString);
return false;
}
// Check if we're already connected to and handshaked with this peer
Peer connectedPeer = this.connectedPeers.stream()
.filter(p -> p.getPeerData().getAddress().equals(peerAddress))
.findFirst()
.orElse(null);
boolean isConnected = (connectedPeer != null);
boolean isHandshaked = this.getHandshakedPeers().stream()
.anyMatch(p -> p.getPeerData().getAddress().equals(peerAddress));
if (isConnected && isHandshaked) {
// Already connected
return this.requestDataFromConnectedPeer(connectedPeer, signature);
}
else {
// We need to connect to this peer before we can request data
try {
if (!isConnected) {
// Add this signature to the list of pending requests for this peer
LOGGER.info("Making connection to peer {} to request files for signature {}...", peerAddressString, Base58.encode(signature));
Peer peer = new Peer(peerData);
peer.addPendingSignatureRequest(signature);
return this.connectPeer(peer);
// If connection (and handshake) is successful, data will automatically be requested
}
else if (!isHandshaked) {
LOGGER.info("Peer {} is connected but not handshaked. Not attempting a new connection.", peerAddress);
return false;
}
} catch (InterruptedException e) {
LOGGER.info("Interrupted when connecting to peer {}", peerAddress);
return false;
}
}
}
return false;
}
private boolean requestDataFromConnectedPeer(Peer connectedPeer, byte[] signature) {
if (signature == null) {
// Nothing to do
return false;
}
try (final Repository repository = RepositoryManager.getRepository()) {
return ArbitraryDataFileManager.getInstance().fetchAllArbitraryDataFiles(repository, connectedPeer, signature);
} catch (DataException e) {
LOGGER.info("Unable to fetch arbitrary data files");
}
return false;
}
/**
* Returns list of connected peers that have completed handshaking.
*/
@ -648,14 +729,14 @@ public class Network {
}
}
private void connectPeer(Peer newPeer) throws InterruptedException {
private boolean connectPeer(Peer newPeer) throws InterruptedException {
SocketChannel socketChannel = newPeer.connect(this.channelSelector);
if (socketChannel == null) {
return;
return false;
}
if (Thread.currentThread().isInterrupted()) {
return;
return false;
}
synchronized (this.connectedPeers) {
@ -663,6 +744,8 @@ public class Network {
}
this.onPeerReady(newPeer);
return true;
}
private Peer getPeerFromChannel(SocketChannel socketChannel) {
@ -913,6 +996,17 @@ public class Network {
}
}
// Process any pending signature requests, as this peer may have been connected for this purpose only
List<byte[]> pendingSignatureRequests = new ArrayList<>(peer.getPendingSignatureRequests());
if (pendingSignatureRequests != null && !pendingSignatureRequests.isEmpty()) {
for (byte[] signature : pendingSignatureRequests) {
this.requestDataFromConnectedPeer(peer, signature);
peer.removePendingSignatureRequest(signature);
}
}
// FUTURE: we may want to disconnect from this peer if we've finished requesting data from it
// Start regular pings
peer.startPings();
@ -1011,6 +1105,66 @@ public class Network {
return new GetUnconfirmedTransactionsMessage();
}
// External IP / peerAddress tracking
public void ourPeerAddressUpdated(String peerAddress) {
if (peerAddress == null) {
return;
}
String[] parts = peerAddress.split(":");
if (parts.length != 2) {
return;
}
String host = parts[0];
try {
InetAddress addr = InetAddress.getByName(host);
if (addr.isAnyLocalAddress() || addr.isSiteLocalAddress()) {
// Ignore local addresses
return;
}
} catch (UnknownHostException e) {
return;
}
this.ourExternalIpAddressHistory.add(host);
// Limit to 10 entries
while (this.ourExternalIpAddressHistory.size() > 10) {
this.ourExternalIpAddressHistory.remove(0);
}
// If we've had 3 consecutive matching addresses, and they're different from
// our stored IP address value, treat it as updated.
int size = this.ourExternalIpAddressHistory.size();
if (size < 3) {
// Need at least 3 readings
return;
}
String ip1 = this.ourExternalIpAddressHistory.get(size - 1);
String ip2 = this.ourExternalIpAddressHistory.get(size - 2);
String ip3 = this.ourExternalIpAddressHistory.get(size - 3);
if (!Objects.equals(ip1, this.ourExternalIpAddress)) {
// Latest reading doesn't match our known value
if (Objects.equals(ip1, ip2) && Objects.equals(ip1, ip3)) {
// Last 3 readings were the same - i.e. more than one peer agreed on the new IP address
this.ourExternalIpAddress = ip1;
this.onExternalIpUpdate(ip1);
}
}
}
public void onExternalIpUpdate(String ipAddress) {
LOGGER.info("External IP address updated to {}", ipAddress);
ArbitraryDataManager.getInstance().broadcastHostedSignatureList();
}
// Peer-management calls
public void noteToSelf(Peer peer) {

View File

@ -47,6 +47,11 @@ public class Peer {
*/
private static final int RESPONSE_TIMEOUT = 3000; // ms
/**
* Maximum time to wait for a peer to respond with blocks (ms)
*/
public static final int FETCH_BLOCKS_TIMEOUT = 10000;
/**
* Interval between PING messages to a peer. (ms)
* <p>
@ -99,6 +104,11 @@ public class Peer {
private boolean syncInProgress = false;
/* Pending signature requests */
private List<byte[]> pendingSignatureRequests = Collections.synchronizedList(new ArrayList<>());
// Versioning
public static final Pattern VERSION_PATTERN = Pattern.compile(Controller.VERSION_PREFIX
+ "(\\d{1,3})\\.(\\d{1,5})\\.(\\d{1,5})");
@ -350,6 +360,34 @@ public class Peer {
this.syncInProgress = syncInProgress;
}
// Pending signature requests
public void addPendingSignatureRequest(byte[] signature) {
// Check if we already have this signature in the list
for (byte[] existingSignature : this.pendingSignatureRequests) {
if (Arrays.equals(existingSignature, signature )) {
return;
}
}
this.pendingSignatureRequests.add(signature);
}
public void removePendingSignatureRequest(byte[] signature) {
Iterator iterator = this.pendingSignatureRequests.iterator();
while (iterator.hasNext()) {
byte[] existingSignature = (byte[]) iterator.next();
if (Arrays.equals(existingSignature, signature)) {
iterator.remove();
}
}
}
public List<byte[]> getPendingSignatureRequests() {
return this.pendingSignatureRequests;
}
@Override
public String toString() {
// Easier, and nicer output, than peer.getRemoteSocketAddress()
@ -544,12 +582,22 @@ public class Peer {
}
/**
* Attempt to send Message to peer.
* Attempt to send Message to peer, using default RESPONSE_TIMEOUT.
*
* @param message message to be sent
* @return <code>true</code> if message successfully sent; <code>false</code> otherwise
*/
public boolean sendMessage(Message message) {
return this.sendMessageWithTimeout(message, RESPONSE_TIMEOUT);
}
/**
* Attempt to send Message to peer, using custom timeout.
*
* @param message message to be sent
* @return <code>true</code> if message successfully sent; <code>false</code> otherwise
*/
public boolean sendMessageWithTimeout(Message message, int timeout) {
if (!this.socketChannel.isOpen()) {
return false;
}
@ -563,12 +611,14 @@ public class Peer {
synchronized (this.socketChannel) {
final long sendStart = System.currentTimeMillis();
long totalBytes = 0;
while (outputBuffer.hasRemaining()) {
int bytesWritten = this.socketChannel.write(outputBuffer);
totalBytes += bytesWritten;
LOGGER.trace("[{}] Sent {} bytes of {} message with ID {} to peer {}", this.peerConnectionId,
bytesWritten, message.getType().name(), message.getId(), this);
LOGGER.trace("[{}] Sent {} bytes of {} message with ID {} to peer {} ({} total)", this.peerConnectionId,
bytesWritten, message.getType().name(), message.getId(), this, totalBytes);
if (bytesWritten == 0) {
// Underlying socket's internal buffer probably full,
@ -583,7 +633,7 @@ public class Peer {
*/
Thread.sleep(1L); //NOSONAR squid:S2276
if (System.currentTimeMillis() - sendStart > RESPONSE_TIMEOUT) {
if (System.currentTimeMillis() - sendStart > timeout) {
// We've taken too long to send this message
return false;
}
@ -604,7 +654,7 @@ public class Peer {
}
/**
* Send message to peer and await response.
* Send message to peer and await response, using default RESPONSE_TIMEOUT.
* <p>
* Message is assigned a random ID and sent.
* If a response with matching ID is received then it is returned to caller.
@ -618,6 +668,24 @@ public class Peer {
* @throws InterruptedException if interrupted while waiting
*/
public Message getResponse(Message message) throws InterruptedException {
return getResponseWithTimeout(message, RESPONSE_TIMEOUT);
}
/**
* Send message to peer and await response.
* <p>
* Message is assigned a random ID and sent.
* If a response with matching ID is received then it is returned to caller.
* <p>
* If no response with matching ID within timeout, or some other error/exception occurs,
* then return <code>null</code>.<br>
* (Assume peer will be rapidly disconnected after this).
*
* @param message message to send
* @return <code>Message</code> if valid response received; <code>null</code> if not or error/exception occurs
* @throws InterruptedException if interrupted while waiting
*/
public Message getResponseWithTimeout(Message message, int timeout) throws InterruptedException {
BlockingQueue<Message> blockingQueue = new ArrayBlockingQueue<>(1);
// Assign random ID to this message
@ -632,13 +700,13 @@ public class Peer {
message.setId(id);
// Try to send message
if (!this.sendMessage(message)) {
if (!this.sendMessageWithTimeout(message, timeout)) {
this.replyQueues.remove(id);
return null;
}
try {
return blockingQueue.poll(RESPONSE_TIMEOUT, TimeUnit.MILLISECONDS);
return blockingQueue.poll(timeout, TimeUnit.MILLISECONDS);
} finally {
this.replyQueues.remove(id);
}

View File

@ -0,0 +1,90 @@
package org.qortal.network.message;
import com.google.common.primitives.Ints;
import org.qortal.transform.TransformationException;
import org.qortal.transform.Transformer;
import org.qortal.utils.Serialization;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
public class ArbitraryDataFileListMessage extends Message {
private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH;
private static final int HASH_LENGTH = Transformer.SHA256_LENGTH;
private final byte[] signature;
private final List<byte[]> hashes;
public ArbitraryDataFileListMessage(byte[] signature, List<byte[]> hashes) {
super(MessageType.ARBITRARY_DATA_FILE_LIST);
this.signature = signature;
this.hashes = hashes;
}
public ArbitraryDataFileListMessage(int id, byte[] signature, List<byte[]> hashes) {
super(id, MessageType.ARBITRARY_DATA_FILE_LIST);
this.signature = signature;
this.hashes = hashes;
}
public List<byte[]> getHashes() {
return this.hashes;
}
public byte[] getSignature() {
return this.signature;
}
public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException, TransformationException {
byte[] signature = new byte[SIGNATURE_LENGTH];
bytes.get(signature);
int count = bytes.getInt();
if (bytes.remaining() != count * HASH_LENGTH)
return null;
List<byte[]> hashes = new ArrayList<>();
for (int i = 0; i < count; ++i) {
byte[] hash = new byte[HASH_LENGTH];
bytes.get(hash);
hashes.add(hash);
}
return new ArbitraryDataFileListMessage(id, signature, hashes);
}
@Override
protected byte[] toData() {
try {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
bytes.write(this.signature);
bytes.write(Ints.toByteArray(this.hashes.size()));
for (byte[] hash : this.hashes) {
bytes.write(hash);
}
return bytes.toByteArray();
} catch (IOException e) {
return null;
}
}
public ArbitraryDataFileListMessage cloneWithNewId(int newId) {
ArbitraryDataFileListMessage clone = new ArbitraryDataFileListMessage(this.signature, this.hashes);
clone.setId(newId);
return clone;
}
}

View File

@ -0,0 +1,91 @@
package org.qortal.network.message;
import com.google.common.primitives.Ints;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.repository.DataException;
import org.qortal.transform.Transformer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
public class ArbitraryDataFileMessage extends Message {
private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH;
private final byte[] signature;
private final ArbitraryDataFile arbitraryDataFile;
public ArbitraryDataFileMessage(byte[] signature, ArbitraryDataFile arbitraryDataFile) {
super(MessageType.ARBITRARY_DATA_FILE);
this.signature = signature;
this.arbitraryDataFile = arbitraryDataFile;
}
public ArbitraryDataFileMessage(int id, byte[] signature, ArbitraryDataFile arbitraryDataFile) {
super(id, MessageType.ARBITRARY_DATA_FILE);
this.signature = signature;
this.arbitraryDataFile = arbitraryDataFile;
}
public ArbitraryDataFile getArbitraryDataFile() {
return this.arbitraryDataFile;
}
public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) throws UnsupportedEncodingException {
byte[] signature = new byte[SIGNATURE_LENGTH];
byteBuffer.get(signature);
int dataLength = byteBuffer.getInt();
if (byteBuffer.remaining() != dataLength)
return null;
byte[] data = new byte[dataLength];
byteBuffer.get(data);
try {
ArbitraryDataFile arbitraryDataFile = new ArbitraryDataFile(data, signature);
return new ArbitraryDataFileMessage(id, signature, arbitraryDataFile);
}
catch (DataException e) {
return null;
}
}
@Override
protected byte[] toData() {
if (this.arbitraryDataFile == null) {
return null;
}
byte[] data = this.arbitraryDataFile.getBytes();
if (data == null) {
return null;
}
try {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
bytes.write(signature);
bytes.write(Ints.toByteArray(data.length));
bytes.write(data);
return bytes.toByteArray();
} catch (IOException e) {
return null;
}
}
public ArbitraryDataFileMessage cloneWithNewId(int newId) {
ArbitraryDataFileMessage clone = new ArbitraryDataFileMessage(this.signature, this.arbitraryDataFile);
clone.setId(newId);
return clone;
}
}

View File

@ -0,0 +1,79 @@
package org.qortal.network.message;
import com.google.common.primitives.Ints;
import org.qortal.data.network.PeerData;
import org.qortal.transaction.DeployAtTransaction;
import org.qortal.transform.TransformationException;
import org.qortal.transform.Transformer;
import org.qortal.utils.Serialization;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
public class ArbitrarySignaturesMessage extends Message {
private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH;
private String peerAddress;
private List<byte[]> signatures;
public ArbitrarySignaturesMessage(String peerAddress, List<byte[]> signatures) {
this(-1, peerAddress, signatures);
}
private ArbitrarySignaturesMessage(int id, String peerAddress, List<byte[]> signatures) {
super(id, MessageType.ARBITRARY_SIGNATURES);
this.peerAddress = peerAddress;
this.signatures = signatures;
}
public String getPeerAddress() {
return this.peerAddress;
}
public List<byte[]> getSignatures() {
return this.signatures;
}
public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException, TransformationException {
String peerAddress = Serialization.deserializeSizedString(bytes, PeerData.MAX_PEER_ADDRESS_SIZE);
int signatureCount = bytes.getInt();
if (bytes.remaining() != signatureCount * SIGNATURE_LENGTH)
return null;
List<byte[]> signatures = new ArrayList<>();
for (int i = 0; i < signatureCount; ++i) {
byte[] signature = new byte[SIGNATURE_LENGTH];
bytes.get(signature);
signatures.add(signature);
}
return new ArbitrarySignaturesMessage(id, peerAddress, signatures);
}
@Override
protected byte[] toData() {
try {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
Serialization.serializeSizedString(bytes, this.peerAddress);
bytes.write(Ints.toByteArray(this.signatures.size()));
for (byte[] signature : this.signatures)
bytes.write(signature);
return bytes.toByteArray();
} catch (IOException e) {
return null;
}
}
}

View File

@ -0,0 +1,82 @@
package org.qortal.network.message;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import org.qortal.transform.Transformer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import static org.qortal.transform.Transformer.INT_LENGTH;
import static org.qortal.transform.Transformer.LONG_LENGTH;
public class GetArbitraryDataFileListMessage extends Message {
private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH;
private final byte[] signature;
private final long requestTime;
private int requestHops;
public GetArbitraryDataFileListMessage(byte[] signature, long requestTime, int requestHops) {
this(-1, signature, requestTime, requestHops);
}
private GetArbitraryDataFileListMessage(int id, byte[] signature, long requestTime, int requestHops) {
super(id, MessageType.GET_ARBITRARY_DATA_FILE_LIST);
this.signature = signature;
this.requestTime = requestTime;
this.requestHops = requestHops;
}
public byte[] getSignature() {
return this.signature;
}
public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException {
if (bytes.remaining() != SIGNATURE_LENGTH + LONG_LENGTH + INT_LENGTH)
return null;
byte[] signature = new byte[SIGNATURE_LENGTH];
bytes.get(signature);
long requestTime = bytes.getLong();
int requestHops = bytes.getInt();
return new GetArbitraryDataFileListMessage(id, signature, requestTime, requestHops);
}
@Override
protected byte[] toData() {
try {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
bytes.write(this.signature);
bytes.write(Longs.toByteArray(this.requestTime));
bytes.write(Ints.toByteArray(this.requestHops));
return bytes.toByteArray();
} catch (IOException e) {
return null;
}
}
public long getRequestTime() {
return this.requestTime;
}
public int getRequestHops() {
return this.requestHops;
}
public void setRequestHops(int requestHops) {
this.requestHops = requestHops;
}
}

View File

@ -0,0 +1,66 @@
package org.qortal.network.message;
import org.qortal.transform.Transformer;
import org.qortal.transform.transaction.TransactionTransformer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
public class GetArbitraryDataFileMessage extends Message {
private static final int SIGNATURE_LENGTH = Transformer.SIGNATURE_LENGTH;
private static final int HASH_LENGTH = TransactionTransformer.SHA256_LENGTH;
private final byte[] signature;
private final byte[] hash;
public GetArbitraryDataFileMessage(byte[] signature, byte[] hash) {
this(-1, signature, hash);
}
private GetArbitraryDataFileMessage(int id, byte[] signature, byte[] hash) {
super(id, MessageType.GET_ARBITRARY_DATA_FILE);
this.signature = signature;
this.hash = hash;
}
public byte[] getSignature() {
return this.signature;
}
public byte[] getHash() {
return this.hash;
}
public static Message fromByteBuffer(int id, ByteBuffer bytes) throws UnsupportedEncodingException {
if (bytes.remaining() != HASH_LENGTH + SIGNATURE_LENGTH)
return null;
byte[] signature = new byte[SIGNATURE_LENGTH];
bytes.get(signature);
byte[] hash = new byte[HASH_LENGTH];
bytes.get(hash);
return new GetArbitraryDataFileMessage(id, signature, hash);
}
@Override
protected byte[] toData() {
try {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
bytes.write(this.signature);
bytes.write(this.hash);
return bytes.toByteArray();
} catch (IOException e) {
return null;
}
}
}

View File

@ -13,16 +13,18 @@ public class HelloMessage extends Message {
private final long timestamp;
private final String versionString;
private final String senderPeerAddress;
private HelloMessage(int id, long timestamp, String versionString) {
private HelloMessage(int id, long timestamp, String versionString, String senderPeerAddress) {
super(id, MessageType.HELLO);
this.timestamp = timestamp;
this.versionString = versionString;
this.senderPeerAddress = senderPeerAddress;
}
public HelloMessage(long timestamp, String versionString) {
this(-1, timestamp, versionString);
public HelloMessage(long timestamp, String versionString, String senderPeerAddress) {
this(-1, timestamp, versionString, senderPeerAddress);
}
public long getTimestamp() {
@ -33,12 +35,22 @@ public class HelloMessage extends Message {
return this.versionString;
}
public String getSenderPeerAddress() {
return this.senderPeerAddress;
}
public static Message fromByteBuffer(int id, ByteBuffer byteBuffer) throws TransformationException {
long timestamp = byteBuffer.getLong();
String versionString = Serialization.deserializeSizedString(byteBuffer, 255);
return new HelloMessage(id, timestamp, versionString);
// Sender peer address added in v3.0, so is an optional field. Older versions won't send it.
String senderPeerAddress = null;
if (byteBuffer.hasRemaining()) {
senderPeerAddress = Serialization.deserializeSizedString(byteBuffer, 255);
}
return new HelloMessage(id, timestamp, versionString, senderPeerAddress);
}
@Override
@ -49,6 +61,8 @@ public class HelloMessage extends Message {
Serialization.serializeSizedString(bytes, this.versionString);
Serialization.serializeSizedString(bytes, this.senderPeerAddress);
return bytes.toByteArray();
}

View File

@ -25,7 +25,7 @@ public abstract class Message {
private static final int MAGIC_LENGTH = 4;
private static final int CHECKSUM_LENGTH = 4;
private static final int MAX_DATA_SIZE = 1024 * 1024; // 1MB
private static final int MAX_DATA_SIZE = 10 * 1024 * 1024; // 10MB
@SuppressWarnings("serial")
public static class MessageException extends Exception {
@ -80,7 +80,18 @@ public abstract class Message {
GET_ONLINE_ACCOUNTS(81),
ARBITRARY_DATA(90),
GET_ARBITRARY_DATA(91);
GET_ARBITRARY_DATA(91),
BLOCKS(100),
GET_BLOCKS(101),
ARBITRARY_DATA_FILE(110),
GET_ARBITRARY_DATA_FILE(111),
ARBITRARY_DATA_FILE_LIST(120),
GET_ARBITRARY_DATA_FILE_LIST(121),
ARBITRARY_SIGNATURES(130);
public final int value;
public final Method fromByteBufferMethod;

View File

@ -40,8 +40,9 @@ public class Payment {
public ValidationResult isValid(byte[] senderPublicKey, List<PaymentData> payments, long fee, boolean isZeroAmountValid) throws DataException {
AssetRepository assetRepository = this.repository.getAssetRepository();
// Check fee is positive
if (fee <= 0)
// Check fee is positive or zero
// We have already checked that the fee is correct in the Transaction superclass
if (fee < 0)
return ValidationResult.NEGATIVE_FEE;
// Total up payment amounts by assetId

View File

@ -1,6 +1,13 @@
package org.qortal.repository;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.arbitrary.ArbitraryResourceInfo;
import org.qortal.data.arbitrary.ArbitraryResourceNameInfo;
import org.qortal.data.network.ArbitraryPeerData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.*;
import java.util.List;
public interface ArbitraryRepository {
@ -12,4 +19,26 @@ public interface ArbitraryRepository {
public void delete(ArbitraryTransactionData arbitraryTransactionData) throws DataException;
public List<ArbitraryTransactionData> getArbitraryTransactions(String name, Service service, String identifier, long since) throws DataException;
public ArbitraryTransactionData getLatestTransaction(String name, Service service, Method method, String identifier) throws DataException;
public List<ArbitraryResourceInfo> getArbitraryResources(Service service, String identifier, String name, boolean defaultResource, Integer limit, Integer offset, Boolean reverse) throws DataException;
public List<ArbitraryResourceNameInfo> getArbitraryResourceCreatorNames(Service service, String identifier, boolean defaultResource, Integer limit, Integer offset, Boolean reverse) throws DataException;
public List<ArbitraryPeerData> getArbitraryPeerDataForSignature(byte[] signature) throws DataException;
public ArbitraryPeerData getArbitraryPeerDataForSignatureAndPeer(byte[] signature, String peerAddress) throws DataException;
public ArbitraryPeerData getArbitraryPeerDataForSignatureAndHost(byte[] signature, String host) throws DataException;
public void save(ArbitraryPeerData arbitraryPeerData) throws DataException;
public void delete(ArbitraryPeerData arbitraryPeerData) throws DataException;
public void deleteArbitraryPeersWithSignature(byte[] signature) throws DataException;
}

View File

@ -5,6 +5,7 @@ import java.util.List;
import java.util.Map;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.group.GroupApprovalData;
import org.qortal.data.transaction.GroupApprovalTransactionData;
import org.qortal.data.transaction.TransactionData;
@ -70,8 +71,8 @@ public interface TransactionRepository {
* @throws DataException
*/
public List<byte[]> getSignaturesMatchingCriteria(Integer startBlock, Integer blockLimit, Integer txGroupId,
List<TransactionType> txTypes, Integer service, String address,
ConfirmationStatus confirmationStatus, Integer limit, Integer offset, Boolean reverse) throws DataException;
List<TransactionType> txTypes, Service service, String name, String address,
ConfirmationStatus confirmationStatus, Integer limit, Integer offset, Boolean reverse) throws DataException;
/**
* Returns signatures for transactions that match search criteria.

View File

@ -1,58 +1,39 @@
package org.qortal.repository.hsqldb;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.DirectoryNotEmptyException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.arbitrary.ArbitraryResourceInfo;
import org.qortal.crypto.Crypto;
import org.qortal.data.arbitrary.ArbitraryResourceNameInfo;
import org.qortal.data.network.ArbitraryPeerData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.*;
import org.qortal.data.transaction.BaseTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.DataType;
import org.qortal.repository.ArbitraryRepository;
import org.qortal.repository.DataException;
import org.qortal.settings.Settings;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.transaction.Transaction.ApprovalStatus;
import org.qortal.utils.Base58;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
public class HSQLDBArbitraryRepository implements ArbitraryRepository {
private static final Logger LOGGER = LogManager.getLogger(HSQLDBArbitraryRepository.class);
private static final int MAX_RAW_DATA_SIZE = 255; // size of VARBINARY
protected HSQLDBRepository repository;
public HSQLDBArbitraryRepository(HSQLDBRepository repository) {
this.repository = repository;
}
/**
* Returns pathname for saving arbitrary transaction data payloads.
* <p>
* Format: <tt>arbitrary/<sender>/<service><tx-sig>.raw</tt>
*
* @param arbitraryTransactionData
* @return
*/
public static String buildPathname(ArbitraryTransactionData arbitraryTransactionData) {
String senderAddress = Crypto.toAddress(arbitraryTransactionData.getSenderPublicKey());
StringBuilder stringBuilder = new StringBuilder(1024);
stringBuilder.append(Settings.getInstance().getUserPath());
stringBuilder.append("arbitrary");
stringBuilder.append(File.separator);
stringBuilder.append(senderAddress);
stringBuilder.append(File.separator);
stringBuilder.append(arbitraryTransactionData.getService());
stringBuilder.append(File.separator);
stringBuilder.append(Base58.encode(arbitraryTransactionData.getSignature()));
stringBuilder.append(".raw");
return stringBuilder.toString();
}
private ArbitraryTransactionData getTransactionData(byte[] signature) throws DataException {
TransactionData transactionData = this.repository.getTransactionRepository().fromSignature(signature);
if (transactionData == null)
@ -64,99 +45,529 @@ public class HSQLDBArbitraryRepository implements ArbitraryRepository {
@Override
public boolean isDataLocal(byte[] signature) throws DataException {
ArbitraryTransactionData transactionData = getTransactionData(signature);
if (transactionData == null)
if (transactionData == null) {
return false;
}
// Raw data is always available
if (transactionData.getDataType() == DataType.RAW_DATA)
if (transactionData.getDataType() == DataType.RAW_DATA) {
return true;
}
String dataPathname = buildPathname(transactionData);
// Load hashes
byte[] hash = transactionData.getData();
byte[] metadataHash = transactionData.getMetadataHash();
Path dataPath = Paths.get(dataPathname);
return Files.exists(dataPath);
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
arbitraryDataFile.setMetadataHash(metadataHash);
// Check if we already have the complete data file or all chunks
if (arbitraryDataFile.allFilesExist()) {
return true;
}
return false;
}
@Override
public byte[] fetchData(byte[] signature) throws DataException {
ArbitraryTransactionData transactionData = getTransactionData(signature);
if (transactionData == null)
return null;
// Raw data is always available
if (transactionData.getDataType() == DataType.RAW_DATA)
return transactionData.getData();
String dataPathname = buildPathname(transactionData);
Path dataPath = Paths.get(dataPathname);
public byte[] fetchData(byte[] signature) {
try {
return Files.readAllBytes(dataPath);
} catch (IOException e) {
ArbitraryTransactionData transactionData = getTransactionData(signature);
if (transactionData == null) {
return null;
}
// Raw data is always available
if (transactionData.getDataType() == DataType.RAW_DATA) {
return transactionData.getData();
}
// Load hashes
byte[] digest = transactionData.getData();
byte[] metadataHash = transactionData.getMetadataHash();
// Load data file(s)
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(digest, signature);
arbitraryDataFile.setMetadataHash(metadataHash);
// If we have the complete data file, return it
if (arbitraryDataFile.exists()) {
// Ensure the file's size matches the size reported by the transaction (throws a DataException if not)
arbitraryDataFile.validateFileSize(transactionData.getSize());
return arbitraryDataFile.getBytes();
}
// Alternatively, if we have all the chunks, combine them into a single file
if (arbitraryDataFile.allChunksExist()) {
arbitraryDataFile.join();
// Verify that the combined hash matches the expected hash
if (!digest.equals(arbitraryDataFile.digest())) {
LOGGER.info(String.format("Hash mismatch for transaction: %s", Base58.encode(signature)));
return null;
}
// Ensure the file's size matches the size reported by the transaction
arbitraryDataFile.validateFileSize(transactionData.getSize());
return arbitraryDataFile.getBytes();
}
} catch (DataException e) {
LOGGER.info("Unable to fetch data for transaction {}: {}", Base58.encode(signature), e.getMessage());
return null;
}
return null;
}
@Override
public void save(ArbitraryTransactionData arbitraryTransactionData) throws DataException {
// Already hashed? Nothing to do
if (arbitraryTransactionData.getDataType() == DataType.DATA_HASH)
if (arbitraryTransactionData.getDataType() == DataType.DATA_HASH) {
return;
}
// Trivial-sized payloads can remain in raw form
if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA && arbitraryTransactionData.getData().length <= MAX_RAW_DATA_SIZE)
if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA && arbitraryTransactionData.getData().length <= MAX_RAW_DATA_SIZE) {
return;
// Store non-trivial payloads in filesystem and convert transaction's data to hash form
byte[] rawData = arbitraryTransactionData.getData();
// Calculate hash of data and update our transaction to use that
byte[] dataHash = Crypto.digest(rawData);
arbitraryTransactionData.setData(dataHash);
arbitraryTransactionData.setDataType(DataType.DATA_HASH);
String dataPathname = buildPathname(arbitraryTransactionData);
Path dataPath = Paths.get(dataPathname);
// Make sure directory structure exists
try {
Files.createDirectories(dataPath.getParent());
} catch (IOException e) {
throw new DataException("Unable to create arbitrary transaction directory", e);
}
// Output actual transaction data
try (OutputStream dataOut = Files.newOutputStream(dataPath)) {
dataOut.write(rawData);
} catch (IOException e) {
throw new DataException("Unable to store arbitrary transaction data", e);
}
throw new IllegalStateException(String.format("Supplied data is larger than maximum size (%d bytes). Please use ArbitraryDataWriter.", MAX_RAW_DATA_SIZE));
}
@Override
public void delete(ArbitraryTransactionData arbitraryTransactionData) throws DataException {
// No need to do anything if we still only have raw data, and hence nothing saved in filesystem
if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA)
if (arbitraryTransactionData.getDataType() == DataType.RAW_DATA) {
return;
}
String dataPathname = buildPathname(arbitraryTransactionData);
Path dataPath = Paths.get(dataPathname);
try {
Files.deleteIfExists(dataPath);
// Load hashes
byte[] hash = arbitraryTransactionData.getData();
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
// Also attempt to delete parent <service> directory if empty
Path servicePath = dataPath.getParent();
Files.deleteIfExists(servicePath);
// Load data file(s)
byte[] signature = arbitraryTransactionData.getSignature();
ArbitraryDataFile arbitraryDataFile = ArbitraryDataFile.fromHash(hash, signature);
arbitraryDataFile.setMetadataHash(metadataHash);
// Also attempt to delete parent <sender's address> directory if empty
Path senderpath = servicePath.getParent();
Files.deleteIfExists(senderpath);
} catch (DirectoryNotEmptyException e) {
// One of the parent service/sender directories still has data from other transactions - this is OK
} catch (IOException e) {
throw new DataException("Unable to delete arbitrary transaction data", e);
// Delete file and chunks
arbitraryDataFile.deleteAll();
}
@Override
public List<ArbitraryTransactionData> getArbitraryTransactions(String name, Service service, String identifier, long since) throws DataException {
String sql = "SELECT type, reference, signature, creator, created_when, fee, " +
"tx_group_id, block_height, approval_status, approval_height, " +
"version, nonce, service, size, is_data_raw, data, metadata_hash, " +
"name, identifier, update_method, secret, compression FROM ArbitraryTransactions " +
"JOIN Transactions USING (signature) " +
"WHERE lower(name) = ? AND service = ?" +
"AND (identifier = ? OR (identifier IS NULL AND ? IS NULL))" +
"AND created_when >= ? ORDER BY created_when ASC";
List<ArbitraryTransactionData> arbitraryTransactionData = new ArrayList<>();
try (ResultSet resultSet = this.repository.checkedExecute(sql, name.toLowerCase(), service.value, identifier, identifier, since)) {
if (resultSet == null)
return null;
do {
//TransactionType type = TransactionType.valueOf(resultSet.getInt(1));
byte[] reference = resultSet.getBytes(2);
byte[] signature = resultSet.getBytes(3);
byte[] creatorPublicKey = resultSet.getBytes(4);
long timestamp = resultSet.getLong(5);
Long fee = resultSet.getLong(6);
if (fee == 0 && resultSet.wasNull())
fee = null;
int txGroupId = resultSet.getInt(7);
Integer blockHeight = resultSet.getInt(8);
if (blockHeight == 0 && resultSet.wasNull())
blockHeight = null;
ApprovalStatus approvalStatus = ApprovalStatus.valueOf(resultSet.getInt(9));
Integer approvalHeight = resultSet.getInt(10);
if (approvalHeight == 0 && resultSet.wasNull())
approvalHeight = null;
BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, creatorPublicKey, fee, approvalStatus, blockHeight, approvalHeight, signature);
int version = resultSet.getInt(11);
int nonce = resultSet.getInt(12);
Service serviceResult = Service.valueOf(resultSet.getInt(13));
int size = resultSet.getInt(14);
boolean isDataRaw = resultSet.getBoolean(15); // NOT NULL, so no null to false
DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH;
byte[] data = resultSet.getBytes(16);
byte[] metadataHash = resultSet.getBytes(17);
String nameResult = resultSet.getString(18);
String identifierResult = resultSet.getString(19);
Method method = Method.valueOf(resultSet.getInt(20));
byte[] secret = resultSet.getBytes(21);
Compression compression = Compression.valueOf(resultSet.getInt(22));
// FUTURE: get payments from signature if needed. Avoiding for now to reduce database calls.
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
version, serviceResult, nonce, size, nameResult, identifierResult, method, secret,
compression, data, dataType, metadataHash, null);
arbitraryTransactionData.add(transactionData);
} while (resultSet.next());
return arbitraryTransactionData;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary transactions from repository", e);
}
}
@Override
public ArbitraryTransactionData getLatestTransaction(String name, Service service, Method method, String identifier) throws DataException {
StringBuilder sql = new StringBuilder(1024);
sql.append("SELECT type, reference, signature, creator, created_when, fee, " +
"tx_group_id, block_height, approval_status, approval_height, " +
"version, nonce, service, size, is_data_raw, data, metadata_hash, " +
"name, identifier, update_method, secret, compression FROM ArbitraryTransactions " +
"JOIN Transactions USING (signature) " +
"WHERE lower(name) = ? AND service = ? " +
"AND (identifier = ? OR (identifier IS NULL AND ? IS NULL))");
if (method != null) {
sql.append(" AND update_method = ");
sql.append(method.value);
}
sql.append("ORDER BY created_when DESC LIMIT 1");
try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), name.toLowerCase(), service.value, identifier, identifier)) {
if (resultSet == null)
return null;
//TransactionType type = TransactionType.valueOf(resultSet.getInt(1));
byte[] reference = resultSet.getBytes(2);
byte[] signature = resultSet.getBytes(3);
byte[] creatorPublicKey = resultSet.getBytes(4);
long timestamp = resultSet.getLong(5);
Long fee = resultSet.getLong(6);
if (fee == 0 && resultSet.wasNull())
fee = null;
int txGroupId = resultSet.getInt(7);
Integer blockHeight = resultSet.getInt(8);
if (blockHeight == 0 && resultSet.wasNull())
blockHeight = null;
ApprovalStatus approvalStatus = ApprovalStatus.valueOf(resultSet.getInt(9));
Integer approvalHeight = resultSet.getInt(10);
if (approvalHeight == 0 && resultSet.wasNull())
approvalHeight = null;
BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, creatorPublicKey, fee, approvalStatus, blockHeight, approvalHeight, signature);
int version = resultSet.getInt(11);
int nonce = resultSet.getInt(12);
Service serviceResult = Service.valueOf(resultSet.getInt(13));
int size = resultSet.getInt(14);
boolean isDataRaw = resultSet.getBoolean(15); // NOT NULL, so no null to false
DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH;
byte[] data = resultSet.getBytes(16);
byte[] metadataHash = resultSet.getBytes(17);
String nameResult = resultSet.getString(18);
String identifierResult = resultSet.getString(19);
Method methodResult = Method.valueOf(resultSet.getInt(20));
byte[] secret = resultSet.getBytes(21);
Compression compression = Compression.valueOf(resultSet.getInt(22));
// FUTURE: get payments from signature if needed. Avoiding for now to reduce database calls.
ArbitraryTransactionData transactionData = new ArbitraryTransactionData(baseTransactionData,
version, serviceResult, nonce, size, nameResult, identifierResult, methodResult, secret,
compression, data, dataType, metadataHash, null);
return transactionData;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary transactions from repository", e);
}
}
@Override
public List<ArbitraryResourceInfo> getArbitraryResources(Service service, String identifier, String name,
boolean defaultResource, Integer limit, Integer offset, Boolean reverse) throws DataException {
StringBuilder sql = new StringBuilder(512);
List<Object> bindParams = new ArrayList<>();
sql.append("SELECT name, service, identifier FROM ArbitraryTransactions WHERE 1=1");
if (service != null) {
sql.append(" AND service = ");
sql.append(service.value);
}
if (defaultResource) {
// Default resource requested - use NULL identifier
sql.append(" AND identifier IS NULL");
}
else {
// Non-default resource requested
// Use an exact match identifier, or list all if supplied identifier is null
sql.append(" AND (identifier = ? OR (? IS NULL))");
bindParams.add(identifier);
bindParams.add(identifier);
}
if (name != null) {
sql.append(" AND name = ?");
bindParams.add(name);
}
sql.append(" GROUP BY name, service, identifier ORDER BY name");
if (reverse != null && reverse) {
sql.append(" DESC");
}
HSQLDBRepository.limitOffsetSql(sql, limit, offset);
List<ArbitraryResourceInfo> arbitraryResources = new ArrayList<>();
try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), bindParams.toArray())) {
if (resultSet == null)
return null;
do {
String nameResult = resultSet.getString(1);
Service serviceResult = Service.valueOf(resultSet.getInt(2));
String identifierResult = resultSet.getString(3);
// We should filter out resources without names
if (nameResult == null) {
continue;
}
ArbitraryResourceInfo arbitraryResourceInfo = new ArbitraryResourceInfo();
arbitraryResourceInfo.name = nameResult;
arbitraryResourceInfo.service = serviceResult;
arbitraryResourceInfo.identifier = identifierResult;
arbitraryResources.add(arbitraryResourceInfo);
} while (resultSet.next());
return arbitraryResources;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary transactions from repository", e);
}
}
@Override
public List<ArbitraryResourceNameInfo> getArbitraryResourceCreatorNames(Service service, String identifier,
boolean defaultResource, Integer limit, Integer offset, Boolean reverse) throws DataException {
StringBuilder sql = new StringBuilder(512);
sql.append("SELECT name FROM ArbitraryTransactions WHERE 1=1");
if (service != null) {
sql.append(" AND service = ");
sql.append(service.value);
}
if (defaultResource) {
// Default resource requested - use NULL identifier
// The AND ? IS NULL AND ? IS NULL is a hack to make use of the identifier params in checkedExecute()
identifier = null;
sql.append(" AND (identifier IS NULL AND ? IS NULL AND ? IS NULL)");
}
else {
// Non-default resource requested
// Use an exact match identifier, or list all if supplied identifier is null
sql.append(" AND (identifier = ? OR (? IS NULL))");
}
sql.append(" GROUP BY name ORDER BY name");
if (reverse != null && reverse) {
sql.append(" DESC");
}
HSQLDBRepository.limitOffsetSql(sql, limit, offset);
List<ArbitraryResourceNameInfo> arbitraryResources = new ArrayList<>();
try (ResultSet resultSet = this.repository.checkedExecute(sql.toString(), identifier, identifier)) {
if (resultSet == null)
return null;
do {
String name = resultSet.getString(1);
// We should filter out resources without names
if (name == null) {
continue;
}
ArbitraryResourceNameInfo arbitraryResourceNameInfo = new ArbitraryResourceNameInfo();
arbitraryResourceNameInfo.name = name;
arbitraryResources.add(arbitraryResourceNameInfo);
} while (resultSet.next());
return arbitraryResources;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary transactions from repository", e);
}
}
// Peer file tracking
/**
* Fetch a list of peers that have reported to be holding chunks related to
* supplied transaction signature.
* @param signature
* @return a list of ArbitraryPeerData objects, or null if none found
* @throws DataException
*/
@Override
public List<ArbitraryPeerData> getArbitraryPeerDataForSignature(byte[] signature) throws DataException {
// Hash the signature so it fits within 32 bytes
byte[] hashedSignature = Crypto.digest(signature);
String sql = "SELECT hash, peer_address, successes, failures, last_attempted, last_retrieved " +
"FROM ArbitraryPeers " +
"WHERE hash = ?";
List<ArbitraryPeerData> arbitraryPeerData = new ArrayList<>();
try (ResultSet resultSet = this.repository.checkedExecute(sql, hashedSignature)) {
if (resultSet == null)
return null;
do {
byte[] hash = resultSet.getBytes(1);
String peerAddr = resultSet.getString(2);
Integer successes = resultSet.getInt(3);
Integer failures = resultSet.getInt(4);
Long lastAttempted = resultSet.getLong(5);
Long lastRetrieved = resultSet.getLong(6);
ArbitraryPeerData peerData = new ArbitraryPeerData(hash, peerAddr, successes, failures,
lastAttempted, lastRetrieved);
arbitraryPeerData.add(peerData);
} while (resultSet.next());
return arbitraryPeerData;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary peer data from repository", e);
}
}
public ArbitraryPeerData getArbitraryPeerDataForSignatureAndPeer(byte[] signature, String peerAddress) throws DataException {
// Hash the signature so it fits within 32 bytes
byte[] hashedSignature = Crypto.digest(signature);
String sql = "SELECT hash, peer_address, successes, failures, last_attempted, last_retrieved " +
"FROM ArbitraryPeers " +
"WHERE hash = ? AND peer_address = ?";
try (ResultSet resultSet = this.repository.checkedExecute(sql, hashedSignature, peerAddress)) {
if (resultSet == null)
return null;
byte[] hash = resultSet.getBytes(1);
String peerAddr = resultSet.getString(2);
Integer successes = resultSet.getInt(3);
Integer failures = resultSet.getInt(4);
Long lastAttempted = resultSet.getLong(5);
Long lastRetrieved = resultSet.getLong(6);
ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(hash, peerAddr, successes, failures,
lastAttempted, lastRetrieved);
return arbitraryPeerData;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary peer data from repository", e);
}
}
public ArbitraryPeerData getArbitraryPeerDataForSignatureAndHost(byte[] signature, String host) throws DataException {
// Hash the signature so it fits within 32 bytes
byte[] hashedSignature = Crypto.digest(signature);
// Create a host wildcard string which allows any port
String hostWildcard = String.format("%s:%%", host);
String sql = "SELECT hash, peer_address, successes, failures, last_attempted, last_retrieved " +
"FROM ArbitraryPeers " +
"WHERE hash = ? AND peer_address LIKE ?";
try (ResultSet resultSet = this.repository.checkedExecute(sql, hashedSignature, hostWildcard)) {
if (resultSet == null)
return null;
byte[] hash = resultSet.getBytes(1);
String peerAddr = resultSet.getString(2);
Integer successes = resultSet.getInt(3);
Integer failures = resultSet.getInt(4);
Long lastAttempted = resultSet.getLong(5);
Long lastRetrieved = resultSet.getLong(6);
ArbitraryPeerData arbitraryPeerData = new ArbitraryPeerData(hash, peerAddr, successes, failures,
lastAttempted, lastRetrieved);
return arbitraryPeerData;
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary peer data from repository", e);
}
}
@Override
public void save(ArbitraryPeerData arbitraryPeerData) throws DataException {
HSQLDBSaver saveHelper = new HSQLDBSaver("ArbitraryPeers");
saveHelper.bind("hash", arbitraryPeerData.getHash())
.bind("peer_address", arbitraryPeerData.getPeerAddress())
.bind("successes", arbitraryPeerData.getSuccesses())
.bind("failures", arbitraryPeerData.getFailures())
.bind("last_attempted", arbitraryPeerData.getLastAttempted())
.bind("last_retrieved", arbitraryPeerData.getLastRetrieved());
try {
saveHelper.execute(this.repository);
} catch (SQLException e) {
throw new DataException("Unable to save ArbitraryPeerData into repository", e);
}
}
@Override
public void delete(ArbitraryPeerData arbitraryPeerData) throws DataException {
try {
// Remove peer/hash combination
this.repository.delete("ArbitraryPeers", "hash = ? AND peer_address = ?",
arbitraryPeerData.getHash(), arbitraryPeerData.getPeerAddress());
} catch (SQLException e) {
throw new DataException("Unable to delete arbitrary peer data from repository", e);
}
}
@Override
public void deleteArbitraryPeersWithSignature(byte[] signature) throws DataException {
byte[] hash = Crypto.digest(signature);
try {
// Remove all records of peers hosting supplied signature
this.repository.delete("ArbitraryPeers", "hash = ?", hash);
} catch (SQLException e) {
throw new DataException("Unable to delete arbitrary peer data from repository", e);
}
}
}

View File

@ -286,7 +286,6 @@ public class HSQLDBDatabaseUpdates {
+ "service SMALLINT NOT NULL, is_data_raw BOOLEAN NOT NULL, data ArbitraryData NOT NULL, "
+ TRANSACTION_KEYS + ")");
// NB: Actual data payload stored elsewhere
// For the future: data payload should be encrypted, at the very least with transaction's reference as the seed for the encryption key
break;
case 8:
@ -899,6 +898,53 @@ public class HSQLDBDatabaseUpdates {
stmt.execute("SET TABLE BlockArchive NEW SPACE");
break;
case 37:
// ARBITRARY transaction updates for off-chain data storage
// We may want to use a nonce rather than a transaction fee on the data chain
stmt.execute("ALTER TABLE ArbitraryTransactions ADD nonce INT NOT NULL DEFAULT 0");
// We need to know the total size of the data file(s) associated with each transaction
stmt.execute("ALTER TABLE ArbitraryTransactions ADD size INT NOT NULL DEFAULT 0");
// Larger data files need to be split into chunks, for easier transmission and greater decentralization
// We store their hashes (and possibly other things) in a metadata file
stmt.execute("ALTER TABLE ArbitraryTransactions ADD metadata_hash VARBINARY(32)");
// For finding transactions by file hash
stmt.execute("CREATE INDEX ArbitraryDataIndex ON ArbitraryTransactions (is_data_raw, data)");
break;
case 38:
// We need the ability for arbitrary transactions to be associated with a name
stmt.execute("ALTER TABLE ArbitraryTransactions ADD name RegisteredName");
// A "method" specifies how the data should be applied (e.g. PUT or PATCH)
stmt.execute("ALTER TABLE ArbitraryTransactions ADD update_method INTEGER NOT NULL DEFAULT 0");
// For public data, the AES shared secret needs to be available. This is more for data obfuscation as apposed to actual encryption.
stmt.execute("ALTER TABLE ArbitraryTransactions ADD secret VARBINARY(32)");
// We want to support compressed and uncompressed data, as well as different compression algorithms
stmt.execute("ALTER TABLE ArbitraryTransactions ADD compression INTEGER NOT NULL DEFAULT 0");
// An optional identifier string can be used to allow more than one resource per user/service combo
stmt.execute("ALTER TABLE ArbitraryTransactions ADD identifier VARCHAR(64)");
// For finding transactions by registered name
stmt.execute("CREATE INDEX ArbitraryNameIndex ON ArbitraryTransactions (name)");
break;
case 39:
// Add DHT-style lookup table to track file locations
// This maps ARBITRARY transactions to peer addresses, but also includes additional metadata to
// track the local success rate and reachability. It is keyed by a "hash" column, to keep it
// generic, as this way we aren't limited to transaction signatures only.
// Multiple rows with the same hash are allowed, to allow for metadata. Longer term it could be
// reshaped to one row per hash if this is too verbose.
// Transaction signatures are hashed to 32 bytes using SHA256. In doing this we lose the ability
// to join against transaction tables, but on balance the space savings seem more important.
stmt.execute("CREATE TABLE ArbitraryPeers (hash VARBINARY(32) NOT NULL, "
+ "peer_address VARCHAR(255), successes INTEGER NOT NULL, failures INTEGER NOT NULL, "
+ "last_attempted EpochMillis NOT NULL, last_retrieved EpochMillis NOT NULL, "
+ "PRIMARY KEY (hash, peer_address))");
// For finding peers by data hash
stmt.execute("CREATE INDEX ArbitraryPeersHashIndex ON ArbitraryPeers (hash)");
break;
default:
// nothing to do
return false;

View File

@ -4,6 +4,7 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.List;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.PaymentData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.BaseTransactionData;
@ -20,21 +21,31 @@ public class HSQLDBArbitraryTransactionRepository extends HSQLDBTransactionRepos
}
TransactionData fromBase(BaseTransactionData baseTransactionData) throws DataException {
String sql = "SELECT version, service, is_data_raw, data from ArbitraryTransactions WHERE signature = ?";
String sql = "SELECT version, nonce, service, size, is_data_raw, data, metadata_hash, " +
"name, identifier, update_method, secret, compression from ArbitraryTransactions " +
"WHERE signature = ?";
try (ResultSet resultSet = this.repository.checkedExecute(sql, baseTransactionData.getSignature())) {
if (resultSet == null)
return null;
int version = resultSet.getInt(1);
int service = resultSet.getInt(2);
boolean isDataRaw = resultSet.getBoolean(3); // NOT NULL, so no null to false
int nonce = resultSet.getInt(2);
Service service = Service.valueOf(resultSet.getInt(3));
int size = resultSet.getInt(4);
boolean isDataRaw = resultSet.getBoolean(5); // NOT NULL, so no null to false
DataType dataType = isDataRaw ? DataType.RAW_DATA : DataType.DATA_HASH;
byte[] data = resultSet.getBytes(4);
byte[] data = resultSet.getBytes(6);
byte[] metadataHash = resultSet.getBytes(7);
String name = resultSet.getString(8);
String identifier = resultSet.getString(9);
ArbitraryTransactionData.Method method = ArbitraryTransactionData.Method.valueOf(resultSet.getInt(10));
byte[] secret = resultSet.getBytes(11);
ArbitraryTransactionData.Compression compression = ArbitraryTransactionData.Compression.valueOf(resultSet.getInt(12));
List<PaymentData> payments = this.getPaymentsFromSignature(baseTransactionData.getSignature());
return new ArbitraryTransactionData(baseTransactionData, version, service, data, dataType, payments);
return new ArbitraryTransactionData(baseTransactionData, version, service, nonce, size, name,
identifier, method, secret, compression, data, dataType, metadataHash, payments);
} catch (SQLException e) {
throw new DataException("Unable to fetch arbitrary transaction from repository", e);
}
@ -51,8 +62,12 @@ public class HSQLDBArbitraryTransactionRepository extends HSQLDBTransactionRepos
HSQLDBSaver saveHelper = new HSQLDBSaver("ArbitraryTransactions");
saveHelper.bind("signature", arbitraryTransactionData.getSignature()).bind("sender", arbitraryTransactionData.getSenderPublicKey())
.bind("version", arbitraryTransactionData.getVersion()).bind("service", arbitraryTransactionData.getService())
.bind("is_data_raw", arbitraryTransactionData.getDataType() == DataType.RAW_DATA).bind("data", arbitraryTransactionData.getData());
.bind("version", arbitraryTransactionData.getVersion()).bind("service", arbitraryTransactionData.getService().value)
.bind("nonce", arbitraryTransactionData.getNonce()).bind("size", arbitraryTransactionData.getSize())
.bind("is_data_raw", arbitraryTransactionData.getDataType() == DataType.RAW_DATA).bind("data", arbitraryTransactionData.getData())
.bind("metadata_hash", arbitraryTransactionData.getMetadataHash()).bind("name", arbitraryTransactionData.getName())
.bind("identifier", arbitraryTransactionData.getIdentifier()).bind("update_method", arbitraryTransactionData.getMethod().value)
.bind("secret", arbitraryTransactionData.getSecret()).bind("compression", arbitraryTransactionData.getCompression().value);
try {
saveHelper.execute(this.repository);

View File

@ -16,6 +16,7 @@ import java.util.Map;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.qortal.api.resource.TransactionsResource.ConfirmationStatus;
import org.qortal.arbitrary.misc.Service;
import org.qortal.data.PaymentData;
import org.qortal.data.group.GroupApprovalData;
import org.qortal.data.transaction.BaseTransactionData;
@ -386,8 +387,8 @@ public class HSQLDBTransactionRepository implements TransactionRepository {
@Override
public List<byte[]> getSignaturesMatchingCriteria(Integer startBlock, Integer blockLimit, Integer txGroupId,
List<TransactionType> txTypes, Integer service, String address,
ConfirmationStatus confirmationStatus, Integer limit, Integer offset, Boolean reverse) throws DataException {
List<TransactionType> txTypes, Service service, String name, String address,
ConfirmationStatus confirmationStatus, Integer limit, Integer offset, Boolean reverse) throws DataException {
List<byte[]> signatures = new ArrayList<>();
boolean hasAddress = address != null && !address.isEmpty();
@ -412,8 +413,8 @@ public class HSQLDBTransactionRepository implements TransactionRepository {
signatureColumn = "TransactionParticipants.signature";
}
if (service != null) {
// This is for ARBITRARY transactions
if (service != null || name != null) {
// These are for ARBITRARY transactions
tables.append(" LEFT OUTER JOIN ArbitraryTransactions ON ArbitraryTransactions.signature = Transactions.signature");
}
@ -466,7 +467,12 @@ public class HSQLDBTransactionRepository implements TransactionRepository {
if (service != null) {
whereClauses.add("ArbitraryTransactions.service = ?");
bindParams.add(service);
bindParams.add(service.value);
}
if (name != null) {
whereClauses.add("lower(ArbitraryTransactions.name) = ?");
bindParams.add(name.toLowerCase());
}
if (hasAddress) {

View File

@ -5,8 +5,8 @@ import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.List;
import java.util.Locale;
import java.nio.file.Paths;
import java.util.*;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
@ -22,19 +22,27 @@ import org.eclipse.persistence.exceptions.XMLMarshalException;
import org.eclipse.persistence.jaxb.JAXBContextFactory;
import org.eclipse.persistence.jaxb.UnmarshallerProperties;
import org.qortal.block.BlockChain;
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager.*;
import org.qortal.crosschain.Bitcoin.BitcoinNet;
import org.qortal.crosschain.Litecoin.LitecoinNet;
import org.qortal.crosschain.Dogecoin.DogecoinNet;
import org.qortal.utils.EnumUtils;
// All properties to be converted to JSON via JAXB
@XmlAccessorType(XmlAccessType.FIELD)
public class Settings {
private static final int MAINNET_LISTEN_PORT = 12392;
private static final int TESTNET_LISTEN_PORT = 62392;
private static final int MAINNET_LISTEN_PORT = 12394;
private static final int TESTNET_LISTEN_PORT = 62394;
private static final int MAINNET_API_PORT = 12391;
private static final int TESTNET_API_PORT = 62391;
private static final int MAINNET_API_PORT = 12393;
private static final int TESTNET_API_PORT = 62393;
private static final int MAINNET_DOMAIN_MAP_PORT = 80;
private static final int TESTNET_DOMAIN_MAP_PORT = 8080;
private static final int MAINNET_GATEWAY_PORT = 80;
private static final int TESTNET_GATEWAY_PORT = 8080;
private static final Logger LOGGER = LogManager.getLogger(Settings.class);
private static final String SETTINGS_FILENAME = "settings.json";
@ -64,20 +72,36 @@ public class Settings {
// API-related
private boolean apiEnabled = true;
private Integer apiPort;
private boolean apiWhitelistEnabled = true;
private String[] apiWhitelist = new String[] {
"::1", "127.0.0.1"
};
private Boolean apiRestricted;
/** Legacy API key (deprecated Nov 2021). Use /admin/apikey/generate API endpoint instead */
private String apiKey = null;
/** Whether to disable API key or loopback address checking
* IMPORTANT: do not disable for shared nodes or low-security local networks */
private boolean apiKeyDisabled = false;
/** Storage location for API key generated by API (Nov 2021 onwards) */
private String apiKeyPath = "";
/** Whether to allow automatic authentication from localhost (loopback) addresses */
private boolean localAuthBypassEnabled = false;
private Boolean apiRestricted;
private boolean apiLoggingEnabled = false;
private boolean apiDocumentationEnabled = false;
// Both of these need to be set for API to use SSL
private String sslKeystorePathname = null;
private String sslKeystorePassword = null;
// Domain mapping
private Integer domainMapPort;
private boolean domainMapEnabled = false;
private boolean domainMapLoggingEnabled = false;
private List<DomainMap> domainMap = null;
// Gateway
private Integer gatewayPort;
private boolean gatewayEnabled = false;
private boolean gatewayLoggingEnabled = false;
// Specific to this node
private boolean wipeUnconfirmedOnStart = false;
/** Maximum number of unconfirmed transactions allowed per account */
@ -182,6 +206,17 @@ public class Settings {
/** Maximum time (in seconds) that we should attempt to remain connected to a peer for */
private int maxPeerConnectionTime = 20 * 60; // seconds
/** Whether to sync multiple blocks at once in normal operation */
private boolean fastSyncEnabled = true;
/** Whether to sync multiple blocks at once when the peer has a different chain */
private boolean fastSyncEnabledWhenResolvingFork = true;
/** Maximum number of blocks to request at once */
private int maxBlocksPerRequest = 100;
/** Maximum number of blocks this node will serve in a single response */
private int maxBlocksPerResponse = 200;
/** Maximum number of untrimmed blocks this node will serve in a single response */
private int maxUntrimmedBlocksPerResponse = 10;
// Which blockchains this node is running
private String blockchainConfig = null; // use default from resources
private BitcoinNet bitcoinNet = BitcoinNet.MAIN;
@ -238,6 +273,69 @@ public class Settings {
/** Additional offset added to values returned by NTP.getTime() */
private Long testNtpOffset = null;
// Data storage (QDN)
/** Data storage enabled/disabled*/
private boolean qdnEnabled = true;
/** Data storage path. */
private String dataPath = "data";
/** Data storage path (for temporary data). Defaults to {dataPath}/_temp */
private String tempDataPath = null;
/** Storage policy to indicate which data should be hosted */
private String storagePolicy = "FOLLOWED_AND_VIEWED";
/** Whether to allow data outside of the storage policy to be relayed between other peers */
private boolean relayModeEnabled = true;
/** Whether to remember which data was originally uploaded using this node.
* This prevents auto deletion of own files when storage limits are reached. */
private boolean originalCopyIndicatorFileEnabled = true;
/** Whether to make connections directly with peers that have the required data */
private boolean directDataRetrievalEnabled = true;
/** Expiry time (ms) for (unencrypted) built/cached data */
private Long builtDataExpiryInterval = 30 * 24 * 60 * 60 * 1000L; // 30 days
/** Whether to validate every layer when building arbitrary data, or just the final layer */
private boolean validateAllDataLayers = false;
/** Whether to allow public (decryptable) data to be stored */
private boolean publicDataEnabled = true;
/** Whether to allow private (non-decryptable) data to be stored */
private boolean privateDataEnabled = false;
/** Maximum total size of hosted data, in bytes. Unlimited if null */
private Long maxStorageCapacity = null;
// Domain mapping
public static class DomainMap {
private String domain;
private String name;
private DomainMap() { // makes JAXB happy; will never be invoked
}
public String getDomain() {
return domain;
}
public void setDomain(String domain) {
this.domain = domain;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
// Constructors
private Settings() {
@ -362,6 +460,13 @@ public class Settings {
if (this.apiKey != null && this.apiKey.trim().length() < 8)
throwValidationError("apiKey must be at least 8 characters");
try {
StoragePolicy.valueOf(this.storagePolicy);
} catch (IllegalArgumentException ex) {
String possibleValues = EnumUtils.getNames(StoragePolicy.class, ", ");
throwValidationError(String.format("storagePolicy must be one of: %s", possibleValues));
}
}
// Getters / setters
@ -398,6 +503,10 @@ public class Settings {
}
public String[] getApiWhitelist() {
if (!this.apiWhitelistEnabled) {
// Allow all connections if the whitelist is disabled
return new String[] {"0.0.0.0/0", "::/0"};
}
return this.apiWhitelist;
}
@ -414,8 +523,12 @@ public class Settings {
return this.apiKey;
}
public boolean isApiKeyDisabled() {
return this.apiKeyDisabled;
public String getApiKeyPath() {
return this.apiKeyPath;
}
public boolean isLocalAuthBypassEnabled() {
return this.localAuthBypassEnabled;
}
public boolean isApiLoggingEnabled() {
@ -434,6 +547,51 @@ public class Settings {
return this.sslKeystorePassword;
}
public int getDomainMapPort() {
if (this.domainMapPort != null)
return this.domainMapPort;
return this.isTestNet ? TESTNET_DOMAIN_MAP_PORT : MAINNET_DOMAIN_MAP_PORT;
}
public boolean isDomainMapEnabled() {
return this.domainMapEnabled;
}
public boolean isDomainMapLoggingEnabled() {
return this.domainMapLoggingEnabled;
}
public Map<String, String> getSimpleDomainMap() {
HashMap<String, String> map = new HashMap<>();
for (DomainMap dMap : this.domainMap) {
map.put(dMap.getDomain(), dMap.getName());
// If the domain doesn't include a subdomain then add a www. alternative
if (dMap.getDomain().chars().filter(c -> c == '.').count() == 1) {
map.put("www.".concat(dMap.getDomain()), dMap.getName());
}
}
return map;
}
public int getGatewayPort() {
if (this.gatewayPort != null)
return this.gatewayPort;
return this.isTestNet ? TESTNET_GATEWAY_PORT : MAINNET_GATEWAY_PORT;
}
public boolean isGatewayEnabled() {
return this.gatewayEnabled;
}
public boolean isGatewayLoggingEnabled() {
return this.gatewayLoggingEnabled;
}
public boolean getWipeUnconfirmedOnStart() {
return this.wipeUnconfirmedOnStart;
}
@ -539,6 +697,20 @@ public class Settings {
return this.bootstrapFilenamePrefix;
}
public boolean isFastSyncEnabled() {
return this.fastSyncEnabled;
}
public boolean isFastSyncEnabledWhenResolvingFork() {
return this.fastSyncEnabledWhenResolvingFork;
}
public int getMaxBlocksPerRequest() { return this.maxBlocksPerRequest; }
public int getMaxBlocksPerResponse() { return this.maxBlocksPerResponse; }
public int getMaxUntrimmedBlocksPerResponse() { return this.maxUntrimmedBlocksPerResponse; }
public boolean isAutoUpdateEnabled() {
return this.autoUpdateEnabled;
}
@ -664,4 +836,56 @@ public class Settings {
return this.bootstrap;
}
public boolean isQdnEnabled() {
return this.qdnEnabled;
}
public String getDataPath() {
return this.dataPath;
}
public String getTempDataPath() {
if (this.tempDataPath != null) {
return this.tempDataPath;
}
// Default the temp path to a "_temp" folder inside the data directory
return Paths.get(this.getDataPath(), "_temp").toString();
}
public StoragePolicy getStoragePolicy() {
return StoragePolicy.valueOf(this.storagePolicy);
}
public boolean isRelayModeEnabled() {
return this.relayModeEnabled;
}
public boolean isDirectDataRetrievalEnabled() {
return this.directDataRetrievalEnabled;
}
public boolean isOriginalCopyIndicatorFileEnabled() {
return this.originalCopyIndicatorFileEnabled;
}
public Long getBuiltDataExpiryInterval() {
return this.builtDataExpiryInterval;
}
public boolean shouldValidateAllDataLayers() {
return this.validateAllDataLayers;
}
public boolean isPublicDataEnabled() {
return this.publicDataEnabled;
}
public boolean isPrivateDataEnabled() {
return this.privateDataEnabled;
}
public Long getMaxStorageCapacity() {
return this.maxStorageCapacity;
}
}

View File

@ -1,15 +1,30 @@
package org.qortal.transaction;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import org.qortal.account.Account;
import org.qortal.controller.arbitrary.ArbitraryDataManager;
import org.qortal.controller.arbitrary.ArbitraryDataStorageManager;
import org.qortal.crypto.Crypto;
import org.qortal.crypto.MemoryPoW;
import org.qortal.data.PaymentData;
import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.network.Network;
import org.qortal.network.message.ArbitrarySignaturesMessage;
import org.qortal.network.message.Message;
import org.qortal.payment.Payment;
import org.qortal.repository.DataException;
import org.qortal.repository.Repository;
import org.qortal.arbitrary.ArbitraryDataFile;
import org.qortal.transform.TransformationException;
import org.qortal.transform.transaction.ArbitraryTransactionTransformer;
import org.qortal.transform.transaction.TransactionTransformer;
import org.qortal.utils.ArbitraryTransactionUtils;
public class ArbitraryTransaction extends Transaction {
@ -18,6 +33,10 @@ public class ArbitraryTransaction extends Transaction {
// Other useful constants
public static final int MAX_DATA_SIZE = 4000;
public static final int MAX_METADATA_LENGTH = 32;
public static final int HASH_LENGTH = TransactionTransformer.SHA256_LENGTH;
public static final int POW_BUFFER_SIZE = 8 * 1024 * 1024; // bytes
public static final int MAX_IDENTIFIER_LENGTH = 64;
// Constructors
@ -42,17 +61,148 @@ public class ArbitraryTransaction extends Transaction {
// Processing
public void computeNonce() throws DataException {
byte[] transactionBytes;
try {
transactionBytes = TransactionTransformer.toBytesForSigning(this.transactionData);
} catch (TransformationException e) {
throw new RuntimeException("Unable to transform transaction to byte array for verification", e);
}
// Clear nonce from transactionBytes
ArbitraryTransactionTransformer.clearNonce(transactionBytes);
// Calculate nonce
int difficulty = ArbitraryDataManager.getInstance().getPowDifficulty();
this.arbitraryTransactionData.setNonce(MemoryPoW.compute2(transactionBytes, POW_BUFFER_SIZE, difficulty));
}
@Override
public ValidationResult isFeeValid() throws DataException {
if (this.transactionData.getFee() < 0)
return ValidationResult.NEGATIVE_FEE;
return ValidationResult.OK;
}
@Override
public boolean hasValidReference() throws DataException {
// We shouldn't really get this far, but just in case:
if (this.arbitraryTransactionData.getReference() == null) {
return false;
}
// If the account current doesn't have a last reference, and the fee is 0, we will allow any value.
// This ensures that the first transaction for an account will be valid whilst still validating
// the last reference from the second transaction onwards. By checking for a zero fee, we ensure
// standard last reference validation when fee > 0.
Account creator = getCreator();
Long fee = this.arbitraryTransactionData.getFee();
if (creator.getLastReference() == null && fee == 0) {
return true;
}
return super.hasValidReference();
}
@Override
public ValidationResult isValid() throws DataException {
// Check data length
if (arbitraryTransactionData.getData().length < 1 || arbitraryTransactionData.getData().length > MAX_DATA_SIZE)
// Check that some data - or a data hash - has been supplied
if (arbitraryTransactionData.getData() == null) {
return ValidationResult.INVALID_DATA_LENGTH;
}
// Check data length
if (arbitraryTransactionData.getData().length < 1 || arbitraryTransactionData.getData().length > MAX_DATA_SIZE) {
return ValidationResult.INVALID_DATA_LENGTH;
}
// Check hashes and metadata
if (arbitraryTransactionData.getDataType() == ArbitraryTransactionData.DataType.DATA_HASH) {
// Check length of data hash
if (arbitraryTransactionData.getData().length != HASH_LENGTH) {
return ValidationResult.INVALID_DATA_LENGTH;
}
// Version 5+
if (arbitraryTransactionData.getVersion() >= 5) {
byte[] metadata = arbitraryTransactionData.getMetadataHash();
// Check maximum length of metadata hash
if (metadata != null && metadata.length > MAX_METADATA_LENGTH) {
return ValidationResult.INVALID_DATA_LENGTH;
}
}
}
// Check raw data
if (arbitraryTransactionData.getDataType() == ArbitraryTransactionData.DataType.RAW_DATA) {
// Version 5+
if (arbitraryTransactionData.getVersion() >= 5) {
// Check reported length of the raw data
// We should not download the raw data, so validation of that will be performed later
if (arbitraryTransactionData.getSize() > ArbitraryDataFile.MAX_FILE_SIZE) {
return ValidationResult.INVALID_DATA_LENGTH;
}
}
}
// Check name if one has been included
if (arbitraryTransactionData.getName() != null) {
NameData nameData = this.repository.getNameRepository().fromName(arbitraryTransactionData.getName());
// Check the name is registered
if (nameData == null) {
return ValidationResult.NAME_DOES_NOT_EXIST;
}
// Check that the transaction signer owns the name
if (!Objects.equals(this.getCreator().getAddress(), nameData.getOwner())) {
return ValidationResult.INVALID_NAME_OWNER;
}
}
// Wrap and delegate final payment validity checks to Payment class
return new Payment(this.repository).isValid(arbitraryTransactionData.getSenderPublicKey(), arbitraryTransactionData.getPayments(),
arbitraryTransactionData.getFee());
}
@Override
public boolean isSignatureValid() {
byte[] signature = this.transactionData.getSignature();
if (signature == null) {
return false;
}
byte[] transactionBytes;
try {
transactionBytes = ArbitraryTransactionTransformer.toBytesForSigning(this.transactionData);
} catch (TransformationException e) {
throw new RuntimeException("Unable to transform transaction to byte array for verification", e);
}
if (!Crypto.verify(this.transactionData.getCreatorPublicKey(), signature, transactionBytes)) {
return false;
}
// Nonce wasn't added until version 5+
if (arbitraryTransactionData.getVersion() >= 5) {
int nonce = arbitraryTransactionData.getNonce();
// Clear nonce from transactionBytes
ArbitraryTransactionTransformer.clearNonce(transactionBytes);
// Check nonce
int difficulty = ArbitraryDataManager.getInstance().getPowDifficulty();
return MemoryPoW.verify2(transactionBytes, POW_BUFFER_SIZE, difficulty, nonce);
}
return true;
}
@Override
public ValidationResult isProcessable() throws DataException {
// Wrap and delegate final payment processable checks to Payment class
@ -60,6 +210,30 @@ public class ArbitraryTransaction extends Transaction {
arbitraryTransactionData.getFee());
}
@Override
protected void onImportAsUnconfirmed() throws DataException {
// We may need to move files from the misc_ folder
ArbitraryTransactionUtils.checkAndRelocateMiscFiles(arbitraryTransactionData);
// If the data is local, we need to perform a few actions
if (isDataLocal()) {
// We have the data for this transaction, so invalidate the cache
if (arbitraryTransactionData.getName() != null) {
ArbitraryDataManager.getInstance().invalidateCache(arbitraryTransactionData);
}
// We also need to broadcast to the network that we are now hosting files for this transaction,
// but only if these files are in accordance with our storage policy
if (ArbitraryDataStorageManager.getInstance().canStoreData(arbitraryTransactionData)) {
// Use a null peer address to indicate our own
byte[] signature = arbitraryTransactionData.getSignature();
Message arbitrarySignatureMessage = new ArbitrarySignaturesMessage(null, Arrays.asList(signature));
Network.getInstance().broadcast(broadcastPeer -> arbitrarySignatureMessage);
}
}
}
@Override
public void preProcess() throws DataException {
// Nothing to do
@ -100,10 +274,9 @@ public class ArbitraryTransaction extends Transaction {
/** Returns arbitrary data payload, fetching from network if needed. Can block for a while! */
public byte[] fetchData() throws DataException {
// If local, read from file
if (isDataLocal())
if (isDataLocal()) {
return this.repository.getArbitraryRepository().fetchData(this.transactionData.getSignature());
// TODO If not local, attempt to fetch via network?
}
return null;
}

View File

@ -8,6 +8,7 @@ import org.qortal.account.PublicKeyAccount;
import org.qortal.asset.Asset;
import org.qortal.crypto.Crypto;
import org.qortal.crypto.MemoryPoW;
import org.qortal.data.naming.NameData;
import org.qortal.data.transaction.ChatTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.group.Group;
@ -144,10 +145,22 @@ public class ChatTransaction extends Transaction {
public ValidationResult isValid() throws DataException {
// Nonce checking is done via isSignatureValid() as that method is only called once per import
// Check for blacklisted author by address
// Check for blocked author by address
ResourceListManager listManager = ResourceListManager.getInstance();
if (listManager.listContains("blacklist", "address", this.chatTransactionData.getSender())) {
return ValidationResult.ADDRESS_IN_BLACKLIST;
if (listManager.listContains("blockedAddresses", this.chatTransactionData.getSender(), true)) {
return ValidationResult.ADDRESS_BLOCKED;
}
// Check for blocked author by registered name
List<NameData> names = this.repository.getNameRepository().getNamesByOwner(this.chatTransactionData.getSender());
if (names != null && names.size() > 0) {
for (NameData nameData : names) {
if (nameData != null && nameData.getName() != null) {
if (listManager.listContains("blockedNames", nameData.getName(), false)) {
return ValidationResult.NAME_BLOCKED;
}
}
}
}
// If we exist in the repository then we've been imported as unconfirmed,

View File

@ -247,7 +247,8 @@ public abstract class Transaction {
INVALID_GROUP_BLOCK_DELAY(93),
INCORRECT_NONCE(94),
INVALID_TIMESTAMP_SIGNATURE(95),
ADDRESS_IN_BLACKLIST(96),
ADDRESS_BLOCKED(96),
NAME_BLOCKED(97),
INVALID_BUT_OK(999),
NOT_YET_RELEASED(1000);
@ -316,6 +317,10 @@ public abstract class Transaction {
return this.transactionData;
}
public void setRepository(Repository repository) {
this.repository = repository;
}
// More information
public static long getDeadline(TransactionData transactionData) {
@ -345,6 +350,10 @@ public abstract class Transaction {
long unitFee = BlockChain.getInstance().getUnitFee();
int maxBytePerUnitFee = BlockChain.getInstance().getMaxBytesPerUnitFee();
// If the unit fee is zero, any fee is enough to cover the byte-length of the transaction
if (unitFee == 0) {
return true;
}
return this.feePerByte() >= maxBytePerUnitFee / unitFee;
}
@ -373,7 +382,7 @@ public abstract class Transaction {
* @return transaction version number
*/
public static int getVersionByTimestamp(long timestamp) {
return 4;
return 5; // TODO: hard fork timestamp!!
}
/**

View File

@ -20,5 +20,6 @@ public abstract class Transformer {
public static final int MD5_LENGTH = 16;
public static final int SHA256_LENGTH = 32;
public static final int AES256_LENGTH = 32;
}

View File

@ -74,19 +74,30 @@ public class BlockTransformer extends Transformer {
}
/**
* Extract block data and transaction data from serialized bytes.
*
* Extract block data and transaction data from serialized bytes containing a single block.
*
* @param bytes
* @return BlockData and a List of transactions.
* @throws TransformationException
*/
public static Triple<BlockData, List<TransactionData>, List<ATStateData>> fromByteBuffer(ByteBuffer byteBuffer) throws TransformationException {
return BlockTransformer.fromByteBuffer(byteBuffer, true);
}
/**
* Extract block data and transaction data from serialized bytes containing one or more blocks.
*
* @param bytes
* @return the next block's BlockData and a List of transactions.
* @throws TransformationException
*/
public static Triple<BlockData, List<TransactionData>, List<ATStateData>> fromByteBuffer(ByteBuffer byteBuffer, boolean finalBlockInBuffer) throws TransformationException {
int version = byteBuffer.getInt();
if (byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH)
if (finalBlockInBuffer && byteBuffer.remaining() < BASE_LENGTH + AT_BYTES_LENGTH - VERSION_LENGTH)
throw new TransformationException("Byte data too short for Block");
if (byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize())
if (finalBlockInBuffer && byteBuffer.remaining() > BlockChain.getInstance().getMaxBlockSize())
throw new TransformationException("Byte data too long for Block");
long timestamp = byteBuffer.getLong();
@ -210,7 +221,8 @@ public class BlockTransformer extends Transformer {
byteBuffer.get(onlineAccountsSignatures);
}
if (byteBuffer.hasRemaining())
// We should only complain about excess byte data if we aren't expecting more blocks in this ByteBuffer
if (finalBlockInBuffer && byteBuffer.hasRemaining())
throw new TransformationException("Excess byte data found after parsing Block");
// We don't have a height!

View File

@ -6,12 +6,15 @@ import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import com.google.common.base.Utf8;
import org.qortal.arbitrary.misc.Service;
import org.qortal.crypto.Crypto;
import org.qortal.data.PaymentData;
import org.qortal.data.transaction.ArbitraryTransactionData;
import org.qortal.data.transaction.BaseTransactionData;
import org.qortal.data.transaction.TransactionData;
import org.qortal.data.transaction.ArbitraryTransactionData.DataType;
import org.qortal.naming.Name;
import org.qortal.transaction.ArbitraryTransaction;
import org.qortal.transaction.Transaction;
import org.qortal.transaction.Transaction.TransactionType;
@ -26,12 +29,23 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
// Property lengths
private static final int SERVICE_LENGTH = INT_LENGTH;
private static final int NONCE_LENGTH = INT_LENGTH;
private static final int DATA_TYPE_LENGTH = BYTE_LENGTH;
private static final int DATA_SIZE_LENGTH = INT_LENGTH;
private static final int RAW_DATA_SIZE_LENGTH = INT_LENGTH;
private static final int METADATA_HASH_SIZE_LENGTH = INT_LENGTH;
private static final int NUMBER_PAYMENTS_LENGTH = INT_LENGTH;
private static final int NAME_SIZE_LENGTH = INT_LENGTH;
private static final int IDENTIFIER_SIZE_LENGTH = INT_LENGTH;
private static final int COMPRESSION_LENGTH = INT_LENGTH;
private static final int METHOD_LENGTH = INT_LENGTH;
private static final int SECRET_LENGTH = INT_LENGTH; // TODO: wtf?
private static final int EXTRAS_LENGTH = SERVICE_LENGTH + DATA_TYPE_LENGTH + DATA_SIZE_LENGTH;
private static final int EXTRAS_V5_LENGTH = NONCE_LENGTH + NAME_SIZE_LENGTH + IDENTIFIER_SIZE_LENGTH +
METHOD_LENGTH + SECRET_LENGTH + COMPRESSION_LENGTH + RAW_DATA_SIZE_LENGTH + METADATA_HASH_SIZE_LENGTH;
protected static final TransactionLayout layout;
static {
@ -41,8 +55,18 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
layout.add("transaction's groupID", TransformationType.INT);
layout.add("reference", TransformationType.SIGNATURE);
layout.add("sender's public key", TransformationType.PUBLIC_KEY);
layout.add("number of payments", TransformationType.INT);
layout.add("nonce", TransformationType.INT); // Version 5+
layout.add("name length", TransformationType.INT); // Version 5+
layout.add("name", TransformationType.DATA); // Version 5+
layout.add("identifier length", TransformationType.INT); // Version 5+
layout.add("identifier", TransformationType.DATA); // Version 5+
layout.add("method", TransformationType.INT); // Version 5+
layout.add("secret length", TransformationType.INT); // Version 5+
layout.add("secret", TransformationType.DATA); // Version 5+
layout.add("compression", TransformationType.INT); // Version 5+
layout.add("number of payments", TransformationType.INT);
layout.add("* recipient", TransformationType.ADDRESS);
layout.add("* asset ID of payment", TransformationType.LONG);
layout.add("* payment amount", TransformationType.AMOUNT);
@ -51,6 +75,11 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
layout.add("is data raw?", TransformationType.BOOLEAN);
layout.add("data length", TransformationType.INT);
layout.add("data", TransformationType.DATA);
layout.add("raw data size", TransformationType.INT); // Version 5+
layout.add("metadata hash length", TransformationType.INT); // Version 5+
layout.add("metadata hash", TransformationType.DATA); // Version 5+
layout.add("fee", TransformationType.AMOUNT);
layout.add("signature", TransformationType.SIGNATURE);
}
@ -67,6 +96,32 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
byte[] senderPublicKey = Serialization.deserializePublicKey(byteBuffer);
int nonce = 0;
String name = null;
String identifier = null;
ArbitraryTransactionData.Method method = null;
byte[] secret = null;
ArbitraryTransactionData.Compression compression = null;
if (version >= 5) {
nonce = byteBuffer.getInt();
name = Serialization.deserializeSizedString(byteBuffer, Name.MAX_NAME_SIZE);
identifier = Serialization.deserializeSizedString(byteBuffer, ArbitraryTransaction.MAX_IDENTIFIER_LENGTH);
method = ArbitraryTransactionData.Method.valueOf(byteBuffer.getInt());
int secretLength = byteBuffer.getInt();
if (secretLength > 0) {
secret = new byte[secretLength];
byteBuffer.get(secret);
}
compression = ArbitraryTransactionData.Compression.valueOf(byteBuffer.getInt());
}
// Always return a list of payments, even if empty
List<PaymentData> payments = new ArrayList<>();
if (version != 1) {
@ -76,7 +131,7 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
payments.add(PaymentTransformer.fromByteBuffer(byteBuffer));
}
int service = byteBuffer.getInt();
Service service = Service.valueOf(byteBuffer.getInt());
// We might be receiving hash of data instead of actual raw data
boolean isRaw = byteBuffer.get() != 0;
@ -91,6 +146,20 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
byte[] data = new byte[dataSize];
byteBuffer.get(data);
int size = 0;
byte[] metadataHash = null;
if (version >= 5) {
size = byteBuffer.getInt();
int metadataHashLength = byteBuffer.getInt();
if (metadataHashLength > 0) {
metadataHash = new byte[metadataHashLength];
byteBuffer.get(metadataHash);
}
}
long fee = byteBuffer.getLong();
byte[] signature = new byte[SIGNATURE_LENGTH];
@ -98,13 +167,24 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
BaseTransactionData baseTransactionData = new BaseTransactionData(timestamp, txGroupId, reference, senderPublicKey, fee, signature);
return new ArbitraryTransactionData(baseTransactionData, version, service, data, dataType, payments);
return new ArbitraryTransactionData(baseTransactionData, version, service, nonce, size, name, identifier,
method, secret, compression, data, dataType, metadataHash, payments);
}
public static int getDataLength(TransactionData transactionData) throws TransformationException {
ArbitraryTransactionData arbitraryTransactionData = (ArbitraryTransactionData) transactionData;
int length = getBaseLength(transactionData) + EXTRAS_LENGTH + arbitraryTransactionData.getData().length;
int nameLength = (arbitraryTransactionData.getName() != null) ? Utf8.encodedLength(arbitraryTransactionData.getName()) : 0;
int identifierLength = (arbitraryTransactionData.getIdentifier() != null) ? Utf8.encodedLength(arbitraryTransactionData.getIdentifier()) : 0;
int secretLength = (arbitraryTransactionData.getSecret() != null) ? arbitraryTransactionData.getSecret().length : 0;
int dataLength = (arbitraryTransactionData.getData() != null) ? arbitraryTransactionData.getData().length : 0;
int metadataHashLength = (arbitraryTransactionData.getMetadataHash() != null) ? arbitraryTransactionData.getMetadataHash().length : 0;
int length = getBaseLength(transactionData) + EXTRAS_LENGTH + nameLength + identifierLength + secretLength + dataLength + metadataHashLength;
if (arbitraryTransactionData.getVersion() >= 5) {
length += EXTRAS_V5_LENGTH;
}
// Optional payments
length += NUMBER_PAYMENTS_LENGTH + arbitraryTransactionData.getPayments().size() * PaymentTransformer.getDataLength();
@ -120,19 +200,51 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
transformCommonBytes(transactionData, bytes);
if (arbitraryTransactionData.getVersion() >= 5) {
bytes.write(Ints.toByteArray(arbitraryTransactionData.getNonce()));
Serialization.serializeSizedString(bytes, arbitraryTransactionData.getName());
Serialization.serializeSizedString(bytes, arbitraryTransactionData.getIdentifier());
bytes.write(Ints.toByteArray(arbitraryTransactionData.getMethod().value));
byte[] secret = arbitraryTransactionData.getSecret();
int secretLength = (secret != null) ? secret.length : 0;
bytes.write(Ints.toByteArray(secretLength));
if (secretLength > 0) {
bytes.write(secret);
}
bytes.write(Ints.toByteArray(arbitraryTransactionData.getCompression().value));
}
List<PaymentData> payments = arbitraryTransactionData.getPayments();
bytes.write(Ints.toByteArray(payments.size()));
for (PaymentData paymentData : payments)
bytes.write(PaymentTransformer.toBytes(paymentData));
bytes.write(Ints.toByteArray(arbitraryTransactionData.getService()));
bytes.write(Ints.toByteArray(arbitraryTransactionData.getService().value));
bytes.write((byte) (arbitraryTransactionData.getDataType() == DataType.RAW_DATA ? 1 : 0));
bytes.write(Ints.toByteArray(arbitraryTransactionData.getData().length));
bytes.write(arbitraryTransactionData.getData());
if (arbitraryTransactionData.getVersion() >= 5) {
bytes.write(Ints.toByteArray(arbitraryTransactionData.getSize()));
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
int metadataHashLength = (metadataHash != null) ? metadataHash.length : 0;
bytes.write(Ints.toByteArray(metadataHashLength));
if (metadataHashLength > 0) {
bytes.write(metadataHash);
}
}
bytes.write(Longs.toByteArray(arbitraryTransactionData.getFee()));
if (arbitraryTransactionData.getSignature() != null)
@ -159,6 +271,26 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
transformCommonBytes(arbitraryTransactionData, bytes);
if (arbitraryTransactionData.getVersion() >= 5) {
bytes.write(Ints.toByteArray(arbitraryTransactionData.getNonce()));
Serialization.serializeSizedString(bytes, arbitraryTransactionData.getName());
Serialization.serializeSizedString(bytes, arbitraryTransactionData.getIdentifier());
bytes.write(Ints.toByteArray(arbitraryTransactionData.getMethod().value));
byte[] secret = arbitraryTransactionData.getSecret();
int secretLength = (secret != null) ? secret.length : 0;
bytes.write(Ints.toByteArray(secretLength));
if (secretLength > 0) {
bytes.write(secret);
}
bytes.write(Ints.toByteArray(arbitraryTransactionData.getCompression().value));
}
if (arbitraryTransactionData.getVersion() != 1) {
List<PaymentData> payments = arbitraryTransactionData.getPayments();
bytes.write(Ints.toByteArray(payments.size()));
@ -167,7 +299,7 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
bytes.write(PaymentTransformer.toBytes(paymentData));
}
bytes.write(Ints.toByteArray(arbitraryTransactionData.getService()));
bytes.write(Ints.toByteArray(arbitraryTransactionData.getService().value));
bytes.write(Ints.toByteArray(arbitraryTransactionData.getData().length));
@ -182,6 +314,18 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
break;
}
if (arbitraryTransactionData.getVersion() >= 5) {
bytes.write(Ints.toByteArray(arbitraryTransactionData.getSize()));
byte[] metadataHash = arbitraryTransactionData.getMetadataHash();
int metadataHashLength = (metadataHash != null) ? metadataHash.length : 0;
bytes.write(Ints.toByteArray(metadataHashLength));
if (metadataHashLength > 0) {
bytes.write(metadataHash);
}
}
bytes.write(Longs.toByteArray(arbitraryTransactionData.getFee()));
// Never append signature
@ -192,4 +336,13 @@ public class ArbitraryTransactionTransformer extends TransactionTransformer {
}
}
public static void clearNonce(byte[] transactionBytes) {
int nonceIndex = TYPE_LENGTH + TIMESTAMP_LENGTH + GROUPID_LENGTH + REFERENCE_LENGTH + PUBLIC_KEY_LENGTH;
transactionBytes[nonceIndex++] = (byte) 0;
transactionBytes[nonceIndex++] = (byte) 0;
transactionBytes[nonceIndex++] = (byte) 0;
transactionBytes[nonceIndex++] = (byte) 0;
}
}

Some files were not shown because too many files have changed in this diff Show More